1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/Target/TargetData.h"
16 #include "llvm/Target/TargetMachine.h"
17 #include "llvm/Target/MRegisterInfo.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Support/MathExtras.h"
24 /// InitLibcallNames - Set default libcall names.
26 static void InitLibcallNames(const char **Names) {
27 Names[RTLIB::SHL_I32] = "__ashlsi3";
28 Names[RTLIB::SHL_I64] = "__ashldi3";
29 Names[RTLIB::SRL_I32] = "__lshrsi3";
30 Names[RTLIB::SRL_I64] = "__lshrdi3";
31 Names[RTLIB::SRA_I32] = "__ashrsi3";
32 Names[RTLIB::SRA_I64] = "__ashrdi3";
33 Names[RTLIB::MUL_I32] = "__mulsi3";
34 Names[RTLIB::MUL_I64] = "__muldi3";
35 Names[RTLIB::SDIV_I32] = "__divsi3";
36 Names[RTLIB::SDIV_I64] = "__divdi3";
37 Names[RTLIB::UDIV_I32] = "__udivsi3";
38 Names[RTLIB::UDIV_I64] = "__udivdi3";
39 Names[RTLIB::SREM_I32] = "__modsi3";
40 Names[RTLIB::SREM_I64] = "__moddi3";
41 Names[RTLIB::UREM_I32] = "__umodsi3";
42 Names[RTLIB::UREM_I64] = "__umoddi3";
43 Names[RTLIB::NEG_I32] = "__negsi2";
44 Names[RTLIB::NEG_I64] = "__negdi2";
45 Names[RTLIB::ADD_F32] = "__addsf3";
46 Names[RTLIB::ADD_F64] = "__adddf3";
47 Names[RTLIB::SUB_F32] = "__subsf3";
48 Names[RTLIB::SUB_F64] = "__subdf3";
49 Names[RTLIB::MUL_F32] = "__mulsf3";
50 Names[RTLIB::MUL_F64] = "__muldf3";
51 Names[RTLIB::DIV_F32] = "__divsf3";
52 Names[RTLIB::DIV_F64] = "__divdf3";
53 Names[RTLIB::REM_F32] = "fmodf";
54 Names[RTLIB::REM_F64] = "fmod";
55 Names[RTLIB::NEG_F32] = "__negsf2";
56 Names[RTLIB::NEG_F64] = "__negdf2";
57 Names[RTLIB::POWI_F32] = "__powisf2";
58 Names[RTLIB::POWI_F64] = "__powidf2";
59 Names[RTLIB::SQRT_F32] = "sqrtf";
60 Names[RTLIB::SQRT_F64] = "sqrt";
61 Names[RTLIB::SIN_F32] = "sinf";
62 Names[RTLIB::SIN_F64] = "sin";
63 Names[RTLIB::COS_F32] = "cosf";
64 Names[RTLIB::COS_F64] = "cos";
65 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
66 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
67 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
68 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
69 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
70 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
71 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
72 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
73 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
74 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
75 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
76 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
77 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
78 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
79 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
80 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
81 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
82 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
83 Names[RTLIB::OEQ_F32] = "__eqsf2";
84 Names[RTLIB::OEQ_F64] = "__eqdf2";
85 Names[RTLIB::UNE_F32] = "__nesf2";
86 Names[RTLIB::UNE_F64] = "__nedf2";
87 Names[RTLIB::OGE_F32] = "__gesf2";
88 Names[RTLIB::OGE_F64] = "__gedf2";
89 Names[RTLIB::OLT_F32] = "__ltsf2";
90 Names[RTLIB::OLT_F64] = "__ltdf2";
91 Names[RTLIB::OLE_F32] = "__lesf2";
92 Names[RTLIB::OLE_F64] = "__ledf2";
93 Names[RTLIB::OGT_F32] = "__gtsf2";
94 Names[RTLIB::OGT_F64] = "__gtdf2";
95 Names[RTLIB::UO_F32] = "__unordsf2";
96 Names[RTLIB::UO_F64] = "__unorddf2";
97 Names[RTLIB::O_F32] = "__unordsf2";
98 Names[RTLIB::O_F64] = "__unorddf2";
101 /// InitCmpLibcallCCs - Set default comparison libcall CC.
103 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
104 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
105 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
106 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
107 CCs[RTLIB::UNE_F32] = ISD::SETNE;
108 CCs[RTLIB::UNE_F64] = ISD::SETNE;
109 CCs[RTLIB::OGE_F32] = ISD::SETGE;
110 CCs[RTLIB::OGE_F64] = ISD::SETGE;
111 CCs[RTLIB::OLT_F32] = ISD::SETLT;
112 CCs[RTLIB::OLT_F64] = ISD::SETLT;
113 CCs[RTLIB::OLE_F32] = ISD::SETLE;
114 CCs[RTLIB::OLE_F64] = ISD::SETLE;
115 CCs[RTLIB::OGT_F32] = ISD::SETGT;
116 CCs[RTLIB::OGT_F64] = ISD::SETGT;
117 CCs[RTLIB::UO_F32] = ISD::SETNE;
118 CCs[RTLIB::UO_F64] = ISD::SETNE;
119 CCs[RTLIB::O_F32] = ISD::SETEQ;
120 CCs[RTLIB::O_F64] = ISD::SETEQ;
123 TargetLowering::TargetLowering(TargetMachine &tm)
124 : TM(tm), TD(TM.getTargetData()) {
125 assert(ISD::BUILTIN_OP_END <= 156 &&
126 "Fixed size array in TargetLowering is not large enough!");
127 // All operations default to being supported.
128 memset(OpActions, 0, sizeof(OpActions));
129 memset(LoadXActions, 0, sizeof(LoadXActions));
130 memset(&StoreXActions, 0, sizeof(StoreXActions));
131 // Initialize all indexed load / store to expand.
132 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
133 for (unsigned IM = (unsigned)ISD::PRE_INC;
134 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
135 setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
136 setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
140 IsLittleEndian = TD->isLittleEndian();
141 UsesGlobalOffsetTable = false;
142 ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType());
143 ShiftAmtHandling = Undefined;
144 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
145 memset(TargetDAGCombineArray, 0,
146 sizeof(TargetDAGCombineArray)/sizeof(TargetDAGCombineArray[0]));
147 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
148 allowUnalignedMemoryAccesses = false;
149 UseUnderscoreSetJmp = false;
150 UseUnderscoreLongJmp = false;
151 IntDivIsCheap = false;
152 Pow2DivIsCheap = false;
153 StackPointerRegisterToSaveRestore = 0;
154 ExceptionPointerRegister = 0;
155 ExceptionSelectorRegister = 0;
156 SchedPreferenceInfo = SchedulingForLatency;
158 JumpBufAlignment = 0;
160 InitLibcallNames(LibcallRoutineNames);
161 InitCmpLibcallCCs(CmpLibcallCCs);
164 TargetLowering::~TargetLowering() {}
166 /// setValueTypeAction - Set the action for a particular value type. This
167 /// assumes an action has not already been set for this value type.
168 static void SetValueTypeAction(MVT::ValueType VT,
169 TargetLowering::LegalizeAction Action,
171 MVT::ValueType *TransformToType,
172 TargetLowering::ValueTypeActionImpl &ValueTypeActions) {
173 ValueTypeActions.setTypeAction(VT, Action);
174 if (Action == TargetLowering::Promote) {
175 MVT::ValueType PromoteTo;
177 PromoteTo = MVT::f64;
179 unsigned LargerReg = VT+1;
180 while (!TLI.isTypeLegal((MVT::ValueType)LargerReg)) {
182 assert(MVT::isInteger((MVT::ValueType)LargerReg) &&
183 "Nothing to promote to??");
185 PromoteTo = (MVT::ValueType)LargerReg;
188 assert(MVT::isInteger(VT) == MVT::isInteger(PromoteTo) &&
189 MVT::isFloatingPoint(VT) == MVT::isFloatingPoint(PromoteTo) &&
190 "Can only promote from int->int or fp->fp!");
191 assert(VT < PromoteTo && "Must promote to a larger type!");
192 TransformToType[VT] = PromoteTo;
193 } else if (Action == TargetLowering::Expand) {
194 // f32 and f64 is each expanded to corresponding integer type of same size.
196 TransformToType[VT] = MVT::i32;
197 else if (VT == MVT::f64)
198 TransformToType[VT] = MVT::i64;
200 assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 &&
201 "Cannot expand this type: target must support SOME integer reg!");
202 // Expand to the next smaller integer type!
203 TransformToType[VT] = (MVT::ValueType)(VT-1);
209 /// computeRegisterProperties - Once all of the register classes are added,
210 /// this allows us to compute derived properties we expose.
211 void TargetLowering::computeRegisterProperties() {
212 assert(MVT::LAST_VALUETYPE <= 32 &&
213 "Too many value types for ValueTypeActions to hold!");
215 // Everything defaults to one.
216 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i)
217 NumElementsForVT[i] = 1;
219 // Find the largest integer register class.
220 unsigned LargestIntReg = MVT::i128;
221 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
222 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
224 // Every integer value type larger than this largest register takes twice as
225 // many registers to represent as the previous ValueType.
226 unsigned ExpandedReg = LargestIntReg; ++LargestIntReg;
227 for (++ExpandedReg; MVT::isInteger((MVT::ValueType)ExpandedReg);++ExpandedReg)
228 NumElementsForVT[ExpandedReg] = 2*NumElementsForVT[ExpandedReg-1];
230 // Inspect all of the ValueType's possible, deciding how to process them.
231 for (unsigned IntReg = MVT::i1; IntReg <= MVT::i128; ++IntReg)
232 // If we are expanding this type, expand it!
233 if (getNumElements((MVT::ValueType)IntReg) != 1)
234 SetValueTypeAction((MVT::ValueType)IntReg, Expand, *this, TransformToType,
236 else if (!isTypeLegal((MVT::ValueType)IntReg))
237 // Otherwise, if we don't have native support, we must promote to a
239 SetValueTypeAction((MVT::ValueType)IntReg, Promote, *this,
240 TransformToType, ValueTypeActions);
242 TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg;
244 // If the target does not have native F64 support, expand it to I64. We will
245 // be generating soft float library calls. If the target does not have native
246 // support for F32, promote it to F64 if it is legal. Otherwise, expand it to
248 if (isTypeLegal(MVT::f64))
249 TransformToType[MVT::f64] = MVT::f64;
251 NumElementsForVT[MVT::f64] = NumElementsForVT[MVT::i64];
252 SetValueTypeAction(MVT::f64, Expand, *this, TransformToType,
255 if (isTypeLegal(MVT::f32))
256 TransformToType[MVT::f32] = MVT::f32;
257 else if (isTypeLegal(MVT::f64))
258 SetValueTypeAction(MVT::f32, Promote, *this, TransformToType,
261 NumElementsForVT[MVT::f32] = NumElementsForVT[MVT::i32];
262 SetValueTypeAction(MVT::f32, Expand, *this, TransformToType,
266 // Set MVT::Vector to always be Expanded
267 SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType,
270 // Loop over all of the legal vector value types, specifying an identity type
272 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
273 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
274 if (isTypeLegal((MVT::ValueType)i))
275 TransformToType[i] = (MVT::ValueType)i;
279 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
283 /// getVectorTypeBreakdown - Packed types are broken down into some number of
284 /// legal first class types. For example, <8 x float> maps to 2 MVT::v4f32
285 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
287 /// This method returns the number and type of the resultant breakdown.
289 unsigned TargetLowering::getVectorTypeBreakdown(const VectorType *PTy,
290 MVT::ValueType &PTyElementVT,
291 MVT::ValueType &PTyLegalElementVT) const {
292 // Figure out the right, legal destination reg to copy into.
293 unsigned NumElts = PTy->getNumElements();
294 MVT::ValueType EltTy = getValueType(PTy->getElementType());
296 unsigned NumVectorRegs = 1;
298 // Divide the input until we get to a supported size. This will always
299 // end with a scalar if the target doesn't support vectors.
300 while (NumElts > 1 && !isTypeLegal(getVectorType(EltTy, NumElts))) {
309 VT = getVectorType(EltTy, NumElts);
313 MVT::ValueType DestVT = getTypeToTransformTo(VT);
314 PTyLegalElementVT = DestVT;
316 // Value is expanded, e.g. i64 -> i16.
317 return NumVectorRegs*(MVT::getSizeInBits(VT)/MVT::getSizeInBits(DestVT));
319 // Otherwise, promotion or legal types use the same number of registers as
320 // the vector decimated to the appropriate level.
321 return NumVectorRegs;
327 //===----------------------------------------------------------------------===//
328 // Optimization Methods
329 //===----------------------------------------------------------------------===//
331 /// ShrinkDemandedConstant - Check to see if the specified operand of the
332 /// specified instruction is a constant integer. If so, check to see if there
333 /// are any bits set in the constant that are not demanded. If so, shrink the
334 /// constant and return true.
335 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op,
337 // FIXME: ISD::SELECT, ISD::SELECT_CC
338 switch(Op.getOpcode()) {
343 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
344 if ((~Demanded & C->getValue()) != 0) {
345 MVT::ValueType VT = Op.getValueType();
346 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
347 DAG.getConstant(Demanded & C->getValue(),
349 return CombineTo(Op, New);
356 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the
357 /// DemandedMask bits of the result of Op are ever used downstream. If we can
358 /// use this information to simplify Op, create a new simplified DAG node and
359 /// return true, returning the original and new nodes in Old and New. Otherwise,
360 /// analyze the expression and return a mask of KnownOne and KnownZero bits for
361 /// the expression (used to simplify the caller). The KnownZero/One bits may
362 /// only be accurate for those bits in the DemandedMask.
363 bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask,
366 TargetLoweringOpt &TLO,
367 unsigned Depth) const {
368 KnownZero = KnownOne = 0; // Don't know anything.
369 // Other users may use these bits.
370 if (!Op.Val->hasOneUse()) {
372 // If not at the root, Just compute the KnownZero/KnownOne bits to
373 // simplify things downstream.
374 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
377 // If this is the root being simplified, allow it to have multiple uses,
378 // just set the DemandedMask to all bits.
379 DemandedMask = MVT::getIntVTBitMask(Op.getValueType());
380 } else if (DemandedMask == 0) {
381 // Not demanding any bits from Op.
382 if (Op.getOpcode() != ISD::UNDEF)
383 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType()));
385 } else if (Depth == 6) { // Limit search depth.
389 uint64_t KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
390 switch (Op.getOpcode()) {
392 // We know all of the bits for a constant!
393 KnownOne = cast<ConstantSDNode>(Op)->getValue() & DemandedMask;
394 KnownZero = ~KnownOne & DemandedMask;
395 return false; // Don't fall through, will infinitely loop.
397 // If the RHS is a constant, check to see if the LHS would be zero without
398 // using the bits from the RHS. Below, we use knowledge about the RHS to
399 // simplify the LHS, here we're using information from the LHS to simplify
401 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
402 uint64_t LHSZero, LHSOne;
403 ComputeMaskedBits(Op.getOperand(0), DemandedMask,
404 LHSZero, LHSOne, Depth+1);
405 // If the LHS already has zeros where RHSC does, this and is dead.
406 if ((LHSZero & DemandedMask) == (~RHSC->getValue() & DemandedMask))
407 return TLO.CombineTo(Op, Op.getOperand(0));
408 // If any of the set bits in the RHS are known zero on the LHS, shrink
410 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & DemandedMask))
414 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
415 KnownOne, TLO, Depth+1))
417 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
418 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownZero,
419 KnownZero2, KnownOne2, TLO, Depth+1))
421 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
423 // If all of the demanded bits are known one on one side, return the other.
424 // These bits cannot contribute to the result of the 'and'.
425 if ((DemandedMask & ~KnownZero2 & KnownOne)==(DemandedMask & ~KnownZero2))
426 return TLO.CombineTo(Op, Op.getOperand(0));
427 if ((DemandedMask & ~KnownZero & KnownOne2)==(DemandedMask & ~KnownZero))
428 return TLO.CombineTo(Op, Op.getOperand(1));
429 // If all of the demanded bits in the inputs are known zeros, return zero.
430 if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask)
431 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType()));
432 // If the RHS is a constant, see if we can simplify it.
433 if (TLO.ShrinkDemandedConstant(Op, DemandedMask & ~KnownZero2))
436 // Output known-1 bits are only known if set in both the LHS & RHS.
437 KnownOne &= KnownOne2;
438 // Output known-0 are known to be clear if zero in either the LHS | RHS.
439 KnownZero |= KnownZero2;
442 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
443 KnownOne, TLO, Depth+1))
445 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
446 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownOne,
447 KnownZero2, KnownOne2, TLO, Depth+1))
449 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
451 // If all of the demanded bits are known zero on one side, return the other.
452 // These bits cannot contribute to the result of the 'or'.
453 if ((DemandedMask & ~KnownOne2 & KnownZero) == (DemandedMask & ~KnownOne2))
454 return TLO.CombineTo(Op, Op.getOperand(0));
455 if ((DemandedMask & ~KnownOne & KnownZero2) == (DemandedMask & ~KnownOne))
456 return TLO.CombineTo(Op, Op.getOperand(1));
457 // If all of the potentially set bits on one side are known to be set on
458 // the other side, just use the 'other' side.
459 if ((DemandedMask & (~KnownZero) & KnownOne2) ==
460 (DemandedMask & (~KnownZero)))
461 return TLO.CombineTo(Op, Op.getOperand(0));
462 if ((DemandedMask & (~KnownZero2) & KnownOne) ==
463 (DemandedMask & (~KnownZero2)))
464 return TLO.CombineTo(Op, Op.getOperand(1));
465 // If the RHS is a constant, see if we can simplify it.
466 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
469 // Output known-0 bits are only known if clear in both the LHS & RHS.
470 KnownZero &= KnownZero2;
471 // Output known-1 are known to be set if set in either the LHS | RHS.
472 KnownOne |= KnownOne2;
475 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
476 KnownOne, TLO, Depth+1))
478 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
479 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero2,
480 KnownOne2, TLO, Depth+1))
482 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
484 // If all of the demanded bits are known zero on one side, return the other.
485 // These bits cannot contribute to the result of the 'xor'.
486 if ((DemandedMask & KnownZero) == DemandedMask)
487 return TLO.CombineTo(Op, Op.getOperand(0));
488 if ((DemandedMask & KnownZero2) == DemandedMask)
489 return TLO.CombineTo(Op, Op.getOperand(1));
491 // If all of the unknown bits are known to be zero on one side or the other
492 // (but not both) turn this into an *inclusive* or.
493 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
494 if ((DemandedMask & ~KnownZero & ~KnownZero2) == 0)
495 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
499 // Output known-0 bits are known if clear or set in both the LHS & RHS.
500 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
501 // Output known-1 are known to be set if set in only one of the LHS, RHS.
502 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
504 // If all of the demanded bits on one side are known, and all of the set
505 // bits on that side are also known to be set on the other side, turn this
506 // into an AND, as we know the bits will be cleared.
507 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
508 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known
509 if ((KnownOne & KnownOne2) == KnownOne) {
510 MVT::ValueType VT = Op.getValueType();
511 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & DemandedMask, VT);
512 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0),
517 // If the RHS is a constant, see if we can simplify it.
518 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
519 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
522 KnownZero = KnownZeroOut;
523 KnownOne = KnownOneOut;
526 // If we know the result of a setcc has the top bits zero, use this info.
527 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
528 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
531 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero,
532 KnownOne, TLO, Depth+1))
534 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero2,
535 KnownOne2, TLO, Depth+1))
537 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
538 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
540 // If the operands are constants, see if we can simplify them.
541 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
544 // Only known if known in both the LHS and RHS.
545 KnownOne &= KnownOne2;
546 KnownZero &= KnownZero2;
549 if (SimplifyDemandedBits(Op.getOperand(3), DemandedMask, KnownZero,
550 KnownOne, TLO, Depth+1))
552 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero2,
553 KnownOne2, TLO, Depth+1))
555 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
556 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
558 // If the operands are constants, see if we can simplify them.
559 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
562 // Only known if known in both the LHS and RHS.
563 KnownOne &= KnownOne2;
564 KnownZero &= KnownZero2;
567 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
568 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> SA->getValue(),
569 KnownZero, KnownOne, TLO, Depth+1))
571 KnownZero <<= SA->getValue();
572 KnownOne <<= SA->getValue();
573 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
577 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
578 MVT::ValueType VT = Op.getValueType();
579 unsigned ShAmt = SA->getValue();
581 // Compute the new bits that are at the top now.
582 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
583 if (SimplifyDemandedBits(Op.getOperand(0),
584 (DemandedMask << ShAmt) & TypeMask,
585 KnownZero, KnownOne, TLO, Depth+1))
587 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
588 KnownZero &= TypeMask;
589 KnownOne &= TypeMask;
593 uint64_t HighBits = (1ULL << ShAmt)-1;
594 HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
595 KnownZero |= HighBits; // High bits known zero.
599 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
600 MVT::ValueType VT = Op.getValueType();
601 unsigned ShAmt = SA->getValue();
603 // Compute the new bits that are at the top now.
604 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
606 uint64_t InDemandedMask = (DemandedMask << ShAmt) & TypeMask;
608 // If any of the demanded bits are produced by the sign extension, we also
609 // demand the input sign bit.
610 uint64_t HighBits = (1ULL << ShAmt)-1;
611 HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
612 if (HighBits & DemandedMask)
613 InDemandedMask |= MVT::getIntVTSignBit(VT);
615 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
616 KnownZero, KnownOne, TLO, Depth+1))
618 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
619 KnownZero &= TypeMask;
620 KnownOne &= TypeMask;
624 // Handle the sign bits.
625 uint64_t SignBit = MVT::getIntVTSignBit(VT);
626 SignBit >>= ShAmt; // Adjust to where it is now in the mask.
628 // If the input sign bit is known to be zero, or if none of the top bits
629 // are demanded, turn this into an unsigned shift right.
630 if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) {
631 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0),
633 } else if (KnownOne & SignBit) { // New bits are known one.
634 KnownOne |= HighBits;
638 case ISD::SIGN_EXTEND_INREG: {
639 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
641 // Sign extension. Compute the demanded bits in the result that are not
642 // present in the input.
643 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & DemandedMask;
645 // If none of the extended bits are demanded, eliminate the sextinreg.
647 return TLO.CombineTo(Op, Op.getOperand(0));
649 uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
650 int64_t InputDemandedBits = DemandedMask & MVT::getIntVTBitMask(EVT);
652 // Since the sign extended bits are demanded, we know that the sign
654 InputDemandedBits |= InSignBit;
656 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
657 KnownZero, KnownOne, TLO, Depth+1))
659 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
661 // If the sign bit of the input is known set or clear, then we know the
662 // top bits of the result.
664 // If the input sign bit is known zero, convert this into a zero extension.
665 if (KnownZero & InSignBit)
666 return TLO.CombineTo(Op,
667 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT));
669 if (KnownOne & InSignBit) { // Input sign bit known set
671 KnownZero &= ~NewBits;
672 } else { // Input sign bit unknown
673 KnownZero &= ~NewBits;
674 KnownOne &= ~NewBits;
681 MVT::ValueType VT = Op.getValueType();
682 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
683 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
688 if (ISD::isZEXTLoad(Op.Val)) {
689 LoadSDNode *LD = cast<LoadSDNode>(Op);
690 MVT::ValueType VT = LD->getLoadedVT();
691 KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask;
695 case ISD::ZERO_EXTEND: {
696 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
698 // If none of the top bits are demanded, convert this into an any_extend.
699 uint64_t NewBits = (~InMask) & DemandedMask;
701 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND,
705 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
706 KnownZero, KnownOne, TLO, Depth+1))
708 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
709 KnownZero |= NewBits;
712 case ISD::SIGN_EXTEND: {
713 MVT::ValueType InVT = Op.getOperand(0).getValueType();
714 uint64_t InMask = MVT::getIntVTBitMask(InVT);
715 uint64_t InSignBit = MVT::getIntVTSignBit(InVT);
716 uint64_t NewBits = (~InMask) & DemandedMask;
718 // If none of the top bits are demanded, convert this into an any_extend.
720 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(),
723 // Since some of the sign extended bits are demanded, we know that the sign
725 uint64_t InDemandedBits = DemandedMask & InMask;
726 InDemandedBits |= InSignBit;
728 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
729 KnownOne, TLO, Depth+1))
732 // If the sign bit is known zero, convert this to a zero extend.
733 if (KnownZero & InSignBit)
734 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND,
738 // If the sign bit is known one, the top bits match.
739 if (KnownOne & InSignBit) {
741 KnownZero &= ~NewBits;
742 } else { // Otherwise, top bits aren't known.
743 KnownOne &= ~NewBits;
744 KnownZero &= ~NewBits;
748 case ISD::ANY_EXTEND: {
749 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
750 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
751 KnownZero, KnownOne, TLO, Depth+1))
753 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
756 case ISD::TRUNCATE: {
757 // Simplify the input, using demanded bit information, and compute the known
758 // zero/one bits live out.
759 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
760 KnownZero, KnownOne, TLO, Depth+1))
763 // If the input is only used by this truncate, see if we can shrink it based
764 // on the known demanded bits.
765 if (Op.getOperand(0).Val->hasOneUse()) {
766 SDOperand In = Op.getOperand(0);
767 switch (In.getOpcode()) {
770 // Shrink SRL by a constant if none of the high bits shifted in are
772 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){
773 uint64_t HighBits = MVT::getIntVTBitMask(In.getValueType());
774 HighBits &= ~MVT::getIntVTBitMask(Op.getValueType());
775 HighBits >>= ShAmt->getValue();
777 if (ShAmt->getValue() < MVT::getSizeInBits(Op.getValueType()) &&
778 (DemandedMask & HighBits) == 0) {
779 // None of the shifted in bits are needed. Add a truncate of the
780 // shift input, then shift it.
781 SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE,
784 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(),
785 NewTrunc, In.getOperand(1)));
792 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
793 uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
794 KnownZero &= OutMask;
798 case ISD::AssertZext: {
799 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
800 uint64_t InMask = MVT::getIntVTBitMask(VT);
801 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
802 KnownZero, KnownOne, TLO, Depth+1))
804 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
805 KnownZero |= ~InMask & DemandedMask;
810 case ISD::INTRINSIC_WO_CHAIN:
811 case ISD::INTRINSIC_W_CHAIN:
812 case ISD::INTRINSIC_VOID:
813 // Just use ComputeMaskedBits to compute output bits.
814 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
818 // If we know the value of all of the demanded bits, return this as a
820 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
821 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType()));
826 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
827 /// this predicate to simplify operations downstream. Mask is known to be zero
828 /// for bits that V cannot have.
829 bool TargetLowering::MaskedValueIsZero(SDOperand Op, uint64_t Mask,
830 unsigned Depth) const {
831 uint64_t KnownZero, KnownOne;
832 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
833 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
834 return (KnownZero & Mask) == Mask;
837 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
838 /// known to be either zero or one and return them in the KnownZero/KnownOne
839 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
841 void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
842 uint64_t &KnownZero, uint64_t &KnownOne,
843 unsigned Depth) const {
844 KnownZero = KnownOne = 0; // Don't know anything.
845 if (Depth == 6 || Mask == 0)
846 return; // Limit search depth.
848 uint64_t KnownZero2, KnownOne2;
850 switch (Op.getOpcode()) {
852 // We know all of the bits for a constant!
853 KnownOne = cast<ConstantSDNode>(Op)->getValue() & Mask;
854 KnownZero = ~KnownOne & Mask;
857 // If either the LHS or the RHS are Zero, the result is zero.
858 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
860 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
861 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
862 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
864 // Output known-1 bits are only known if set in both the LHS & RHS.
865 KnownOne &= KnownOne2;
866 // Output known-0 are known to be clear if zero in either the LHS | RHS.
867 KnownZero |= KnownZero2;
870 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
872 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
873 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
874 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
876 // Output known-0 bits are only known if clear in both the LHS & RHS.
877 KnownZero &= KnownZero2;
878 // Output known-1 are known to be set if set in either the LHS | RHS.
879 KnownOne |= KnownOne2;
882 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
883 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
884 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
885 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
887 // Output known-0 bits are known if clear or set in both the LHS & RHS.
888 uint64_t KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
889 // Output known-1 are known to be set if set in only one of the LHS, RHS.
890 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
891 KnownZero = KnownZeroOut;
895 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
896 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
897 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
898 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
900 // Only known if known in both the LHS and RHS.
901 KnownOne &= KnownOne2;
902 KnownZero &= KnownZero2;
905 ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1);
906 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1);
907 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
908 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
910 // Only known if known in both the LHS and RHS.
911 KnownOne &= KnownOne2;
912 KnownZero &= KnownZero2;
915 // If we know the result of a setcc has the top bits zero, use this info.
916 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
917 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
920 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
921 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
922 ComputeMaskedBits(Op.getOperand(0), Mask >> SA->getValue(),
923 KnownZero, KnownOne, Depth+1);
924 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
925 KnownZero <<= SA->getValue();
926 KnownOne <<= SA->getValue();
927 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
931 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
932 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
933 MVT::ValueType VT = Op.getValueType();
934 unsigned ShAmt = SA->getValue();
936 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
937 ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt) & TypeMask,
938 KnownZero, KnownOne, Depth+1);
939 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
940 KnownZero &= TypeMask;
941 KnownOne &= TypeMask;
945 uint64_t HighBits = (1ULL << ShAmt)-1;
946 HighBits <<= MVT::getSizeInBits(VT)-ShAmt;
947 KnownZero |= HighBits; // High bits known zero.
951 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
952 MVT::ValueType VT = Op.getValueType();
953 unsigned ShAmt = SA->getValue();
955 // Compute the new bits that are at the top now.
956 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
958 uint64_t InDemandedMask = (Mask << ShAmt) & TypeMask;
959 // If any of the demanded bits are produced by the sign extension, we also
960 // demand the input sign bit.
961 uint64_t HighBits = (1ULL << ShAmt)-1;
962 HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
964 InDemandedMask |= MVT::getIntVTSignBit(VT);
966 ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
968 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
969 KnownZero &= TypeMask;
970 KnownOne &= TypeMask;
974 // Handle the sign bits.
975 uint64_t SignBit = MVT::getIntVTSignBit(VT);
976 SignBit >>= ShAmt; // Adjust to where it is now in the mask.
978 if (KnownZero & SignBit) {
979 KnownZero |= HighBits; // New bits are known zero.
980 } else if (KnownOne & SignBit) {
981 KnownOne |= HighBits; // New bits are known one.
985 case ISD::SIGN_EXTEND_INREG: {
986 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
988 // Sign extension. Compute the demanded bits in the result that are not
989 // present in the input.
990 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & Mask;
992 uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
993 int64_t InputDemandedBits = Mask & MVT::getIntVTBitMask(EVT);
995 // If the sign extended bits are demanded, we know that the sign
998 InputDemandedBits |= InSignBit;
1000 ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
1001 KnownZero, KnownOne, Depth+1);
1002 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1004 // If the sign bit of the input is known set or clear, then we know the
1005 // top bits of the result.
1006 if (KnownZero & InSignBit) { // Input sign bit known clear
1007 KnownZero |= NewBits;
1008 KnownOne &= ~NewBits;
1009 } else if (KnownOne & InSignBit) { // Input sign bit known set
1010 KnownOne |= NewBits;
1011 KnownZero &= ~NewBits;
1012 } else { // Input sign bit unknown
1013 KnownZero &= ~NewBits;
1014 KnownOne &= ~NewBits;
1021 MVT::ValueType VT = Op.getValueType();
1022 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
1023 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
1028 if (ISD::isZEXTLoad(Op.Val)) {
1029 LoadSDNode *LD = cast<LoadSDNode>(Op);
1030 MVT::ValueType VT = LD->getLoadedVT();
1031 KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask;
1035 case ISD::ZERO_EXTEND: {
1036 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
1037 uint64_t NewBits = (~InMask) & Mask;
1038 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1040 KnownZero |= NewBits & Mask;
1041 KnownOne &= ~NewBits;
1044 case ISD::SIGN_EXTEND: {
1045 MVT::ValueType InVT = Op.getOperand(0).getValueType();
1046 unsigned InBits = MVT::getSizeInBits(InVT);
1047 uint64_t InMask = MVT::getIntVTBitMask(InVT);
1048 uint64_t InSignBit = 1ULL << (InBits-1);
1049 uint64_t NewBits = (~InMask) & Mask;
1050 uint64_t InDemandedBits = Mask & InMask;
1052 // If any of the sign extended bits are demanded, we know that the sign
1055 InDemandedBits |= InSignBit;
1057 ComputeMaskedBits(Op.getOperand(0), InDemandedBits, KnownZero,
1059 // If the sign bit is known zero or one, the top bits match.
1060 if (KnownZero & InSignBit) {
1061 KnownZero |= NewBits;
1062 KnownOne &= ~NewBits;
1063 } else if (KnownOne & InSignBit) {
1064 KnownOne |= NewBits;
1065 KnownZero &= ~NewBits;
1066 } else { // Otherwise, top bits aren't known.
1067 KnownOne &= ~NewBits;
1068 KnownZero &= ~NewBits;
1072 case ISD::ANY_EXTEND: {
1073 MVT::ValueType VT = Op.getOperand(0).getValueType();
1074 ComputeMaskedBits(Op.getOperand(0), Mask & MVT::getIntVTBitMask(VT),
1075 KnownZero, KnownOne, Depth+1);
1078 case ISD::TRUNCATE: {
1079 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
1080 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1081 uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
1082 KnownZero &= OutMask;
1083 KnownOne &= OutMask;
1086 case ISD::AssertZext: {
1087 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1088 uint64_t InMask = MVT::getIntVTBitMask(VT);
1089 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1091 KnownZero |= (~InMask) & Mask;
1095 // If either the LHS or the RHS are Zero, the result is zero.
1096 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1097 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
1098 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1099 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1101 // Output known-0 bits are known if clear or set in both the low clear bits
1102 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
1103 // low 3 bits clear.
1104 uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero),
1105 CountTrailingZeros_64(~KnownZero2));
1107 KnownZero = (1ULL << KnownZeroOut) - 1;
1112 ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0));
1115 // We know that the top bits of C-X are clear if X contains less bits
1116 // than C (i.e. no wrap-around can happen). For example, 20-X is
1117 // positive if we can prove that X is >= 0 and < 16.
1118 MVT::ValueType VT = CLHS->getValueType(0);
1119 if ((CLHS->getValue() & MVT::getIntVTSignBit(VT)) == 0) { // sign bit clear
1120 unsigned NLZ = CountLeadingZeros_64(CLHS->getValue()+1);
1121 uint64_t MaskV = (1ULL << (63-NLZ))-1; // NLZ can't be 64 with no sign bit
1122 MaskV = ~MaskV & MVT::getIntVTBitMask(VT);
1123 ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero, KnownOne, Depth+1);
1125 // If all of the MaskV bits are known to be zero, then we know the output
1126 // top bits are zero, because we now know that the output is from [0-C].
1127 if ((KnownZero & MaskV) == MaskV) {
1128 unsigned NLZ2 = CountLeadingZeros_64(CLHS->getValue());
1129 KnownZero = ~((1ULL << (64-NLZ2))-1) & Mask; // Top bits known zero.
1130 KnownOne = 0; // No one bits known.
1132 KnownZero = KnownOne = 0; // Otherwise, nothing known.
1138 // Allow the target to implement this method for its nodes.
1139 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
1140 case ISD::INTRINSIC_WO_CHAIN:
1141 case ISD::INTRINSIC_W_CHAIN:
1142 case ISD::INTRINSIC_VOID:
1143 computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne);
1149 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
1150 /// in Mask are known to be either zero or one and return them in the
1151 /// KnownZero/KnownOne bitsets.
1152 void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
1154 uint64_t &KnownZero,
1156 unsigned Depth) const {
1157 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1158 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1159 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1160 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1161 "Should use MaskedValueIsZero if you don't know whether Op"
1162 " is a target node!");
1167 /// ComputeNumSignBits - Return the number of times the sign bit of the
1168 /// register is replicated into the other bits. We know that at least 1 bit
1169 /// is always equal to the sign bit (itself), but other cases can give us
1170 /// information. For example, immediately after an "SRA X, 2", we know that
1171 /// the top 3 bits are all equal to each other, so we return 3.
1172 unsigned TargetLowering::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{
1173 MVT::ValueType VT = Op.getValueType();
1174 assert(MVT::isInteger(VT) && "Invalid VT!");
1175 unsigned VTBits = MVT::getSizeInBits(VT);
1179 return 1; // Limit search depth.
1181 switch (Op.getOpcode()) {
1183 case ISD::AssertSext:
1184 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1185 return VTBits-Tmp+1;
1186 case ISD::AssertZext:
1187 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1190 case ISD::Constant: {
1191 uint64_t Val = cast<ConstantSDNode>(Op)->getValue();
1192 // If negative, invert the bits, then look at it.
1193 if (Val & MVT::getIntVTSignBit(VT))
1196 // Shift the bits so they are the leading bits in the int64_t.
1199 // Return # leading zeros. We use 'min' here in case Val was zero before
1200 // shifting. We don't want to return '64' as for an i32 "0".
1201 return std::min(VTBits, CountLeadingZeros_64(Val));
1204 case ISD::SIGN_EXTEND:
1205 Tmp = VTBits-MVT::getSizeInBits(Op.getOperand(0).getValueType());
1206 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
1208 case ISD::SIGN_EXTEND_INREG:
1209 // Max of the input and what this extends.
1210 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1213 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1214 return std::max(Tmp, Tmp2);
1217 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1218 // SRA X, C -> adds C sign bits.
1219 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1220 Tmp += C->getValue();
1221 if (Tmp > VTBits) Tmp = VTBits;
1225 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1226 // shl destroys sign bits.
1227 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1228 if (C->getValue() >= VTBits || // Bad shift.
1229 C->getValue() >= Tmp) break; // Shifted all sign bits out.
1230 return Tmp - C->getValue();
1235 case ISD::XOR: // NOT is handled here.
1236 // Logical binary ops preserve the number of sign bits.
1237 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1238 if (Tmp == 1) return 1; // Early out.
1239 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1240 return std::min(Tmp, Tmp2);
1243 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1244 if (Tmp == 1) return 1; // Early out.
1245 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1246 return std::min(Tmp, Tmp2);
1249 // If setcc returns 0/-1, all bits are sign bits.
1250 if (getSetCCResultContents() == ZeroOrNegativeOneSetCCResult)
1255 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1256 unsigned RotAmt = C->getValue() & (VTBits-1);
1258 // Handle rotate right by N like a rotate left by 32-N.
1259 if (Op.getOpcode() == ISD::ROTR)
1260 RotAmt = (VTBits-RotAmt) & (VTBits-1);
1262 // If we aren't rotating out all of the known-in sign bits, return the
1263 // number that are left. This handles rotl(sext(x), 1) for example.
1264 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1265 if (Tmp > RotAmt+1) return Tmp-RotAmt;
1269 // Add can have at most one carry bit. Thus we know that the output
1270 // is, at worst, one more bit than the inputs.
1271 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1272 if (Tmp == 1) return 1; // Early out.
1274 // Special case decrementing a value (ADD X, -1):
1275 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
1276 if (CRHS->isAllOnesValue()) {
1277 uint64_t KnownZero, KnownOne;
1278 uint64_t Mask = MVT::getIntVTBitMask(VT);
1279 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
1281 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1283 if ((KnownZero|1) == Mask)
1286 // If we are subtracting one from a positive number, there is no carry
1287 // out of the result.
1288 if (KnownZero & MVT::getIntVTSignBit(VT))
1292 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1293 if (Tmp2 == 1) return 1;
1294 return std::min(Tmp, Tmp2)-1;
1298 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1299 if (Tmp2 == 1) return 1;
1302 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
1303 if (CLHS->getValue() == 0) {
1304 uint64_t KnownZero, KnownOne;
1305 uint64_t Mask = MVT::getIntVTBitMask(VT);
1306 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1307 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1309 if ((KnownZero|1) == Mask)
1312 // If the input is known to be positive (the sign bit is known clear),
1313 // the output of the NEG has the same number of sign bits as the input.
1314 if (KnownZero & MVT::getIntVTSignBit(VT))
1317 // Otherwise, we treat this like a SUB.
1320 // Sub can have at most one carry bit. Thus we know that the output
1321 // is, at worst, one more bit than the inputs.
1322 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1323 if (Tmp == 1) return 1; // Early out.
1324 return std::min(Tmp, Tmp2)-1;
1327 // FIXME: it's tricky to do anything useful for this, but it is an important
1328 // case for targets like X86.
1332 // Handle LOADX separately here. EXTLOAD case will fallthrough.
1333 if (Op.getOpcode() == ISD::LOAD) {
1334 LoadSDNode *LD = cast<LoadSDNode>(Op);
1335 unsigned ExtType = LD->getExtensionType();
1338 case ISD::SEXTLOAD: // '17' bits known
1339 Tmp = MVT::getSizeInBits(LD->getLoadedVT());
1340 return VTBits-Tmp+1;
1341 case ISD::ZEXTLOAD: // '16' bits known
1342 Tmp = MVT::getSizeInBits(LD->getLoadedVT());
1347 // Allow the target to implement this method for its nodes.
1348 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1349 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1350 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1351 Op.getOpcode() == ISD::INTRINSIC_VOID) {
1352 unsigned NumBits = ComputeNumSignBitsForTargetNode(Op, Depth);
1353 if (NumBits > 1) return NumBits;
1356 // Finally, if we can prove that the top bits of the result are 0's or 1's,
1357 // use this information.
1358 uint64_t KnownZero, KnownOne;
1359 uint64_t Mask = MVT::getIntVTBitMask(VT);
1360 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
1362 uint64_t SignBit = MVT::getIntVTSignBit(VT);
1363 if (KnownZero & SignBit) { // SignBit is 0
1365 } else if (KnownOne & SignBit) { // SignBit is 1;
1372 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
1373 // the number of identical bits in the top of the input value.
1376 // Return # leading zeros. We use 'min' here in case Val was zero before
1377 // shifting. We don't want to return '64' as for an i32 "0".
1378 return std::min(VTBits, CountLeadingZeros_64(Mask));
1383 /// ComputeNumSignBitsForTargetNode - This method can be implemented by
1384 /// targets that want to expose additional information about sign bits to the
1386 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op,
1387 unsigned Depth) const {
1388 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1389 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1390 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1391 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1392 "Should use ComputeNumSignBits if you don't know whether Op"
1393 " is a target node!");
1398 /// SimplifySetCC - Try to simplify a setcc built with the specified operands
1399 /// and cc. If it is unable to simplify it, return a null SDOperand.
1401 TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
1402 ISD::CondCode Cond, bool foldBooleans,
1403 DAGCombinerInfo &DCI) const {
1404 SelectionDAG &DAG = DCI.DAG;
1406 // These setcc operations always fold.
1410 case ISD::SETFALSE2: return DAG.getConstant(0, VT);
1412 case ISD::SETTRUE2: return DAG.getConstant(1, VT);
1415 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) {
1416 uint64_t C1 = N1C->getValue();
1417 if (isa<ConstantSDNode>(N0.Val)) {
1418 return DAG.FoldSetCC(VT, N0, N1, Cond);
1420 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1421 // equality comparison, then we're just comparing whether X itself is
1423 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1424 N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1425 N0.getOperand(1).getOpcode() == ISD::Constant) {
1426 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
1427 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1428 ShAmt == Log2_32(MVT::getSizeInBits(N0.getValueType()))) {
1429 if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1430 // (srl (ctlz x), 5) == 0 -> X != 0
1431 // (srl (ctlz x), 5) != 1 -> X != 0
1434 // (srl (ctlz x), 5) != 0 -> X == 0
1435 // (srl (ctlz x), 5) == 1 -> X == 0
1438 SDOperand Zero = DAG.getConstant(0, N0.getValueType());
1439 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0),
1444 // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1445 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1446 unsigned InSize = MVT::getSizeInBits(N0.getOperand(0).getValueType());
1448 // If the comparison constant has bits in the upper part, the
1449 // zero-extended value could never match.
1450 if (C1 & (~0ULL << InSize)) {
1451 unsigned VSize = MVT::getSizeInBits(N0.getValueType());
1455 case ISD::SETEQ: return DAG.getConstant(0, VT);
1458 case ISD::SETNE: return DAG.getConstant(1, VT);
1461 // True if the sign bit of C1 is set.
1462 return DAG.getConstant((C1 & (1ULL << VSize)) != 0, VT);
1465 // True if the sign bit of C1 isn't set.
1466 return DAG.getConstant((C1 & (1ULL << VSize)) == 0, VT);
1472 // Otherwise, we can perform the comparison with the low bits.
1480 return DAG.getSetCC(VT, N0.getOperand(0),
1481 DAG.getConstant(C1, N0.getOperand(0).getValueType()),
1484 break; // todo, be more careful with signed comparisons
1486 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1487 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1488 MVT::ValueType ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1489 unsigned ExtSrcTyBits = MVT::getSizeInBits(ExtSrcTy);
1490 MVT::ValueType ExtDstTy = N0.getValueType();
1491 unsigned ExtDstTyBits = MVT::getSizeInBits(ExtDstTy);
1493 // If the extended part has any inconsistent bits, it cannot ever
1494 // compare equal. In other words, they have to be all ones or all
1497 (~0ULL >> (64-ExtSrcTyBits)) & (~0ULL << (ExtDstTyBits-1));
1498 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
1499 return DAG.getConstant(Cond == ISD::SETNE, VT);
1502 MVT::ValueType Op0Ty = N0.getOperand(0).getValueType();
1503 if (Op0Ty == ExtSrcTy) {
1504 ZextOp = N0.getOperand(0);
1506 int64_t Imm = ~0ULL >> (64-ExtSrcTyBits);
1507 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0),
1508 DAG.getConstant(Imm, Op0Ty));
1510 if (!DCI.isCalledByLegalizer())
1511 DCI.AddToWorklist(ZextOp.Val);
1512 // Otherwise, make this a use of a zext.
1513 return DAG.getSetCC(VT, ZextOp,
1514 DAG.getConstant(C1 & (~0ULL>>(64-ExtSrcTyBits)),
1517 } else if ((N1C->getValue() == 0 || N1C->getValue() == 1) &&
1518 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1520 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
1521 if (N0.getOpcode() == ISD::SETCC) {
1522 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getValue() != 1);
1526 // Invert the condition.
1527 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1528 CC = ISD::getSetCCInverse(CC,
1529 MVT::isInteger(N0.getOperand(0).getValueType()));
1530 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC);
1533 if ((N0.getOpcode() == ISD::XOR ||
1534 (N0.getOpcode() == ISD::AND &&
1535 N0.getOperand(0).getOpcode() == ISD::XOR &&
1536 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1537 isa<ConstantSDNode>(N0.getOperand(1)) &&
1538 cast<ConstantSDNode>(N0.getOperand(1))->getValue() == 1) {
1539 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
1540 // can only do this if the top bits are known zero.
1541 if (MaskedValueIsZero(N0, MVT::getIntVTBitMask(N0.getValueType())-1)){
1542 // Okay, get the un-inverted input value.
1544 if (N0.getOpcode() == ISD::XOR)
1545 Val = N0.getOperand(0);
1547 assert(N0.getOpcode() == ISD::AND &&
1548 N0.getOperand(0).getOpcode() == ISD::XOR);
1549 // ((X^1)&1)^1 -> X & 1
1550 Val = DAG.getNode(ISD::AND, N0.getValueType(),
1551 N0.getOperand(0).getOperand(0),
1554 return DAG.getSetCC(VT, Val, N1,
1555 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1560 uint64_t MinVal, MaxVal;
1561 unsigned OperandBitSize = MVT::getSizeInBits(N1C->getValueType(0));
1562 if (ISD::isSignedIntSetCC(Cond)) {
1563 MinVal = 1ULL << (OperandBitSize-1);
1564 if (OperandBitSize != 1) // Avoid X >> 64, which is undefined.
1565 MaxVal = ~0ULL >> (65-OperandBitSize);
1570 MaxVal = ~0ULL >> (64-OperandBitSize);
1573 // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1574 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1575 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
1576 --C1; // X >= C0 --> X > (C0-1)
1577 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()),
1578 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
1581 if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1582 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
1583 ++C1; // X <= C0 --> X < (C0+1)
1584 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()),
1585 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
1588 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1589 return DAG.getConstant(0, VT); // X < MIN --> false
1590 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1591 return DAG.getConstant(1, VT); // X >= MIN --> true
1592 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1593 return DAG.getConstant(0, VT); // X > MAX --> false
1594 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1595 return DAG.getConstant(1, VT); // X <= MAX --> true
1597 // Canonicalize setgt X, Min --> setne X, Min
1598 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1599 return DAG.getSetCC(VT, N0, N1, ISD::SETNE);
1600 // Canonicalize setlt X, Max --> setne X, Max
1601 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1602 return DAG.getSetCC(VT, N0, N1, ISD::SETNE);
1604 // If we have setult X, 1, turn it into seteq X, 0
1605 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1606 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()),
1608 // If we have setugt X, Max-1, turn it into seteq X, Max
1609 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1610 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()),
1613 // If we have "setcc X, C0", check to see if we can shrink the immediate
1616 // SETUGT X, SINTMAX -> SETLT X, 0
1617 if (Cond == ISD::SETUGT && OperandBitSize != 1 &&
1618 C1 == (~0ULL >> (65-OperandBitSize)))
1619 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()),
1622 // FIXME: Implement the rest of these.
1624 // Fold bit comparisons when we can.
1625 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1626 VT == N0.getValueType() && N0.getOpcode() == ISD::AND)
1627 if (ConstantSDNode *AndRHS =
1628 dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1629 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
1630 // Perform the xform if the AND RHS is a single bit.
1631 if (isPowerOf2_64(AndRHS->getValue())) {
1632 return DAG.getNode(ISD::SRL, VT, N0,
1633 DAG.getConstant(Log2_64(AndRHS->getValue()),
1634 getShiftAmountTy()));
1636 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getValue()) {
1637 // (X & 8) == 8 --> (X & 8) >> 3
1638 // Perform the xform if C1 is a single bit.
1639 if (isPowerOf2_64(C1)) {
1640 return DAG.getNode(ISD::SRL, VT, N0,
1641 DAG.getConstant(Log2_64(C1), getShiftAmountTy()));
1646 } else if (isa<ConstantSDNode>(N0.Val)) {
1647 // Ensure that the constant occurs on the RHS.
1648 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
1651 if (isa<ConstantFPSDNode>(N0.Val)) {
1652 // Constant fold or commute setcc.
1653 SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond);
1654 if (O.Val) return O;
1658 // We can always fold X == X for integer setcc's.
1659 if (MVT::isInteger(N0.getValueType()))
1660 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
1661 unsigned UOF = ISD::getUnorderedFlavor(Cond);
1662 if (UOF == 2) // FP operators that are undefined on NaNs.
1663 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
1664 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
1665 return DAG.getConstant(UOF, VT);
1666 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
1667 // if it is not already.
1668 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
1669 if (NewCond != Cond)
1670 return DAG.getSetCC(VT, N0, N1, NewCond);
1673 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1674 MVT::isInteger(N0.getValueType())) {
1675 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
1676 N0.getOpcode() == ISD::XOR) {
1677 // Simplify (X+Y) == (X+Z) --> Y == Z
1678 if (N0.getOpcode() == N1.getOpcode()) {
1679 if (N0.getOperand(0) == N1.getOperand(0))
1680 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond);
1681 if (N0.getOperand(1) == N1.getOperand(1))
1682 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond);
1683 if (DAG.isCommutativeBinOp(N0.getOpcode())) {
1684 // If X op Y == Y op X, try other combinations.
1685 if (N0.getOperand(0) == N1.getOperand(1))
1686 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond);
1687 if (N0.getOperand(1) == N1.getOperand(0))
1688 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond);
1692 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
1693 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1694 // Turn (X+C1) == C2 --> X == C2-C1
1695 if (N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse()) {
1696 return DAG.getSetCC(VT, N0.getOperand(0),
1697 DAG.getConstant(RHSC->getValue()-LHSR->getValue(),
1698 N0.getValueType()), Cond);
1701 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
1702 if (N0.getOpcode() == ISD::XOR)
1703 // If we know that all of the inverted bits are zero, don't bother
1704 // performing the inversion.
1705 if (MaskedValueIsZero(N0.getOperand(0), ~LHSR->getValue()))
1706 return DAG.getSetCC(VT, N0.getOperand(0),
1707 DAG.getConstant(LHSR->getValue()^RHSC->getValue(),
1708 N0.getValueType()), Cond);
1711 // Turn (C1-X) == C2 --> X == C1-C2
1712 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
1713 if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) {
1714 return DAG.getSetCC(VT, N0.getOperand(1),
1715 DAG.getConstant(SUBC->getValue()-RHSC->getValue(),
1716 N0.getValueType()), Cond);
1721 // Simplify (X+Z) == X --> Z == 0
1722 if (N0.getOperand(0) == N1)
1723 return DAG.getSetCC(VT, N0.getOperand(1),
1724 DAG.getConstant(0, N0.getValueType()), Cond);
1725 if (N0.getOperand(1) == N1) {
1726 if (DAG.isCommutativeBinOp(N0.getOpcode()))
1727 return DAG.getSetCC(VT, N0.getOperand(0),
1728 DAG.getConstant(0, N0.getValueType()), Cond);
1730 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
1731 // (Z-X) == X --> Z == X<<1
1732 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(),
1734 DAG.getConstant(1, getShiftAmountTy()));
1735 if (!DCI.isCalledByLegalizer())
1736 DCI.AddToWorklist(SH.Val);
1737 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond);
1742 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
1743 N1.getOpcode() == ISD::XOR) {
1744 // Simplify X == (X+Z) --> Z == 0
1745 if (N1.getOperand(0) == N0) {
1746 return DAG.getSetCC(VT, N1.getOperand(1),
1747 DAG.getConstant(0, N1.getValueType()), Cond);
1748 } else if (N1.getOperand(1) == N0) {
1749 if (DAG.isCommutativeBinOp(N1.getOpcode())) {
1750 return DAG.getSetCC(VT, N1.getOperand(0),
1751 DAG.getConstant(0, N1.getValueType()), Cond);
1753 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
1754 // X == (Z-X) --> X<<1 == Z
1755 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0,
1756 DAG.getConstant(1, getShiftAmountTy()));
1757 if (!DCI.isCalledByLegalizer())
1758 DCI.AddToWorklist(SH.Val);
1759 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond);
1765 // Fold away ALL boolean setcc's.
1767 if (N0.getValueType() == MVT::i1 && foldBooleans) {
1769 default: assert(0 && "Unknown integer setcc!");
1770 case ISD::SETEQ: // X == Y -> (X^Y)^1
1771 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
1772 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1));
1773 if (!DCI.isCalledByLegalizer())
1774 DCI.AddToWorklist(Temp.Val);
1776 case ISD::SETNE: // X != Y --> (X^Y)
1777 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
1779 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y
1780 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y
1781 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
1782 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp);
1783 if (!DCI.isCalledByLegalizer())
1784 DCI.AddToWorklist(Temp.Val);
1786 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X
1787 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X
1788 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1));
1789 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp);
1790 if (!DCI.isCalledByLegalizer())
1791 DCI.AddToWorklist(Temp.Val);
1793 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y
1794 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y
1795 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
1796 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp);
1797 if (!DCI.isCalledByLegalizer())
1798 DCI.AddToWorklist(Temp.Val);
1800 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X
1801 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X
1802 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1));
1803 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp);
1806 if (VT != MVT::i1) {
1807 if (!DCI.isCalledByLegalizer())
1808 DCI.AddToWorklist(N0.Val);
1809 // FIXME: If running after legalize, we probably can't do this.
1810 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0);
1815 // Could not fold it.
1819 SDOperand TargetLowering::
1820 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
1821 // Default implementation: no optimization.
1825 //===----------------------------------------------------------------------===//
1826 // Inline Assembler Implementation Methods
1827 //===----------------------------------------------------------------------===//
1829 TargetLowering::ConstraintType
1830 TargetLowering::getConstraintType(char ConstraintLetter) const {
1831 // FIXME: lots more standard ones to handle.
1832 switch (ConstraintLetter) {
1833 default: return C_Unknown;
1834 case 'r': return C_RegisterClass;
1836 case 'o': // offsetable
1837 case 'V': // not offsetable
1839 case 'i': // Simple Integer or Relocatable Constant
1840 case 'n': // Simple Integer
1841 case 's': // Relocatable Constant
1842 case 'I': // Target registers.
1854 /// isOperandValidForConstraint - Return the specified operand (possibly
1855 /// modified) if the specified SDOperand is valid for the specified target
1856 /// constraint letter, otherwise return null.
1857 SDOperand TargetLowering::isOperandValidForConstraint(SDOperand Op,
1858 char ConstraintLetter,
1859 SelectionDAG &DAG) {
1860 switch (ConstraintLetter) {
1862 case 'i': // Simple Integer or Relocatable Constant
1863 case 'n': // Simple Integer
1864 case 's': // Relocatable Constant
1865 // These are okay if the operand is either a global variable address or a
1866 // simple immediate value. If we have one of these, map to the TargetXXX
1867 // version so that the value itself doesn't get selected.
1868 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1869 // Simple constants are not allowed for 's'.
1870 if (ConstraintLetter != 's')
1871 return DAG.getTargetConstant(C->getValue(), Op.getValueType());
1873 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
1874 if (ConstraintLetter != 'n')
1875 return DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getValueType(),
1880 return SDOperand(0,0);
1883 std::vector<unsigned> TargetLowering::
1884 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1885 MVT::ValueType VT) const {
1886 return std::vector<unsigned>();
1890 std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
1891 getRegForInlineAsmConstraint(const std::string &Constraint,
1892 MVT::ValueType VT) const {
1893 if (Constraint[0] != '{')
1894 return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
1895 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
1897 // Remove the braces from around the name.
1898 std::string RegName(Constraint.begin()+1, Constraint.end()-1);
1900 // Figure out which register class contains this reg.
1901 const MRegisterInfo *RI = TM.getRegisterInfo();
1902 for (MRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
1903 E = RI->regclass_end(); RCI != E; ++RCI) {
1904 const TargetRegisterClass *RC = *RCI;
1906 // If none of the the value types for this register class are valid, we
1907 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1908 bool isLegal = false;
1909 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1911 if (isTypeLegal(*I)) {
1917 if (!isLegal) continue;
1919 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
1921 if (StringsEqualNoCase(RegName, RI->get(*I).Name))
1922 return std::make_pair(*I, RC);
1926 return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
1929 //===----------------------------------------------------------------------===//
1930 // Loop Strength Reduction hooks
1931 //===----------------------------------------------------------------------===//
1933 /// isLegalAddressImmediate - Return true if the integer value or
1934 /// GlobalValue can be used as the offset of the target addressing mode.
1935 bool TargetLowering::isLegalAddressImmediate(int64_t V) const {
1938 bool TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
1943 // Magic for divide replacement
1946 int64_t m; // magic number
1947 int64_t s; // shift amount
1951 uint64_t m; // magic number
1952 int64_t a; // add indicator
1953 int64_t s; // shift amount
1956 /// magic - calculate the magic numbers required to codegen an integer sdiv as
1957 /// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
1959 static ms magic32(int32_t d) {
1961 uint32_t ad, anc, delta, q1, r1, q2, r2, t;
1962 const uint32_t two31 = 0x80000000U;
1966 t = two31 + ((uint32_t)d >> 31);
1967 anc = t - 1 - t%ad; // absolute value of nc
1968 p = 31; // initialize p
1969 q1 = two31/anc; // initialize q1 = 2p/abs(nc)
1970 r1 = two31 - q1*anc; // initialize r1 = rem(2p,abs(nc))
1971 q2 = two31/ad; // initialize q2 = 2p/abs(d)
1972 r2 = two31 - q2*ad; // initialize r2 = rem(2p,abs(d))
1975 q1 = 2*q1; // update q1 = 2p/abs(nc)
1976 r1 = 2*r1; // update r1 = rem(2p/abs(nc))
1977 if (r1 >= anc) { // must be unsigned comparison
1981 q2 = 2*q2; // update q2 = 2p/abs(d)
1982 r2 = 2*r2; // update r2 = rem(2p/abs(d))
1983 if (r2 >= ad) { // must be unsigned comparison
1988 } while (q1 < delta || (q1 == delta && r1 == 0));
1990 mag.m = (int32_t)(q2 + 1); // make sure to sign extend
1991 if (d < 0) mag.m = -mag.m; // resulting magic number
1992 mag.s = p - 32; // resulting shift
1996 /// magicu - calculate the magic numbers required to codegen an integer udiv as
1997 /// a sequence of multiply, add and shifts. Requires that the divisor not be 0.
1998 static mu magicu32(uint32_t d) {
2000 uint32_t nc, delta, q1, r1, q2, r2;
2002 magu.a = 0; // initialize "add" indicator
2004 p = 31; // initialize p
2005 q1 = 0x80000000/nc; // initialize q1 = 2p/nc
2006 r1 = 0x80000000 - q1*nc; // initialize r1 = rem(2p,nc)
2007 q2 = 0x7FFFFFFF/d; // initialize q2 = (2p-1)/d
2008 r2 = 0x7FFFFFFF - q2*d; // initialize r2 = rem((2p-1),d)
2011 if (r1 >= nc - r1 ) {
2012 q1 = 2*q1 + 1; // update q1
2013 r1 = 2*r1 - nc; // update r1
2016 q1 = 2*q1; // update q1
2017 r1 = 2*r1; // update r1
2019 if (r2 + 1 >= d - r2) {
2020 if (q2 >= 0x7FFFFFFF) magu.a = 1;
2021 q2 = 2*q2 + 1; // update q2
2022 r2 = 2*r2 + 1 - d; // update r2
2025 if (q2 >= 0x80000000) magu.a = 1;
2026 q2 = 2*q2; // update q2
2027 r2 = 2*r2 + 1; // update r2
2030 } while (p < 64 && (q1 < delta || (q1 == delta && r1 == 0)));
2031 magu.m = q2 + 1; // resulting magic number
2032 magu.s = p - 32; // resulting shift
2036 /// magic - calculate the magic numbers required to codegen an integer sdiv as
2037 /// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
2039 static ms magic64(int64_t d) {
2041 uint64_t ad, anc, delta, q1, r1, q2, r2, t;
2042 const uint64_t two63 = 9223372036854775808ULL; // 2^63
2045 ad = d >= 0 ? d : -d;
2046 t = two63 + ((uint64_t)d >> 63);
2047 anc = t - 1 - t%ad; // absolute value of nc
2048 p = 63; // initialize p
2049 q1 = two63/anc; // initialize q1 = 2p/abs(nc)
2050 r1 = two63 - q1*anc; // initialize r1 = rem(2p,abs(nc))
2051 q2 = two63/ad; // initialize q2 = 2p/abs(d)
2052 r2 = two63 - q2*ad; // initialize r2 = rem(2p,abs(d))
2055 q1 = 2*q1; // update q1 = 2p/abs(nc)
2056 r1 = 2*r1; // update r1 = rem(2p/abs(nc))
2057 if (r1 >= anc) { // must be unsigned comparison
2061 q2 = 2*q2; // update q2 = 2p/abs(d)
2062 r2 = 2*r2; // update r2 = rem(2p/abs(d))
2063 if (r2 >= ad) { // must be unsigned comparison
2068 } while (q1 < delta || (q1 == delta && r1 == 0));
2071 if (d < 0) mag.m = -mag.m; // resulting magic number
2072 mag.s = p - 64; // resulting shift
2076 /// magicu - calculate the magic numbers required to codegen an integer udiv as
2077 /// a sequence of multiply, add and shifts. Requires that the divisor not be 0.
2078 static mu magicu64(uint64_t d)
2081 uint64_t nc, delta, q1, r1, q2, r2;
2083 magu.a = 0; // initialize "add" indicator
2085 p = 63; // initialize p
2086 q1 = 0x8000000000000000ull/nc; // initialize q1 = 2p/nc
2087 r1 = 0x8000000000000000ull - q1*nc; // initialize r1 = rem(2p,nc)
2088 q2 = 0x7FFFFFFFFFFFFFFFull/d; // initialize q2 = (2p-1)/d
2089 r2 = 0x7FFFFFFFFFFFFFFFull - q2*d; // initialize r2 = rem((2p-1),d)
2092 if (r1 >= nc - r1 ) {
2093 q1 = 2*q1 + 1; // update q1
2094 r1 = 2*r1 - nc; // update r1
2097 q1 = 2*q1; // update q1
2098 r1 = 2*r1; // update r1
2100 if (r2 + 1 >= d - r2) {
2101 if (q2 >= 0x7FFFFFFFFFFFFFFFull) magu.a = 1;
2102 q2 = 2*q2 + 1; // update q2
2103 r2 = 2*r2 + 1 - d; // update r2
2106 if (q2 >= 0x8000000000000000ull) magu.a = 1;
2107 q2 = 2*q2; // update q2
2108 r2 = 2*r2 + 1; // update r2
2111 } while (p < 128 && (q1 < delta || (q1 == delta && r1 == 0)));
2112 magu.m = q2 + 1; // resulting magic number
2113 magu.s = p - 64; // resulting shift
2117 /// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
2118 /// return a DAG expression to select that will generate the same value by
2119 /// multiplying by a magic number. See:
2120 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
2121 SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
2122 std::vector<SDNode*>* Created) const {
2123 MVT::ValueType VT = N->getValueType(0);
2125 // Check to see if we can do this.
2126 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
2127 return SDOperand(); // BuildSDIV only operates on i32 or i64
2128 if (!isOperationLegal(ISD::MULHS, VT))
2129 return SDOperand(); // Make sure the target supports MULHS.
2131 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended();
2132 ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d);
2134 // Multiply the numerator (operand 0) by the magic value
2135 SDOperand Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0),
2136 DAG.getConstant(magics.m, VT));
2137 // If d > 0 and m < 0, add the numerator
2138 if (d > 0 && magics.m < 0) {
2139 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0));
2141 Created->push_back(Q.Val);
2143 // If d < 0 and m > 0, subtract the numerator.
2144 if (d < 0 && magics.m > 0) {
2145 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0));
2147 Created->push_back(Q.Val);
2149 // Shift right algebraic if shift value is nonzero
2151 Q = DAG.getNode(ISD::SRA, VT, Q,
2152 DAG.getConstant(magics.s, getShiftAmountTy()));
2154 Created->push_back(Q.Val);
2156 // Extract the sign bit and add it to the quotient
2158 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(MVT::getSizeInBits(VT)-1,
2159 getShiftAmountTy()));
2161 Created->push_back(T.Val);
2162 return DAG.getNode(ISD::ADD, VT, Q, T);
2165 /// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
2166 /// return a DAG expression to select that will generate the same value by
2167 /// multiplying by a magic number. See:
2168 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
2169 SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
2170 std::vector<SDNode*>* Created) const {
2171 MVT::ValueType VT = N->getValueType(0);
2173 // Check to see if we can do this.
2174 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
2175 return SDOperand(); // BuildUDIV only operates on i32 or i64
2176 if (!isOperationLegal(ISD::MULHU, VT))
2177 return SDOperand(); // Make sure the target supports MULHU.
2179 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue();
2180 mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d);
2182 // Multiply the numerator (operand 0) by the magic value
2183 SDOperand Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0),
2184 DAG.getConstant(magics.m, VT));
2186 Created->push_back(Q.Val);
2188 if (magics.a == 0) {
2189 return DAG.getNode(ISD::SRL, VT, Q,
2190 DAG.getConstant(magics.s, getShiftAmountTy()));
2192 SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q);
2194 Created->push_back(NPQ.Val);
2195 NPQ = DAG.getNode(ISD::SRL, VT, NPQ,
2196 DAG.getConstant(1, getShiftAmountTy()));
2198 Created->push_back(NPQ.Val);
2199 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q);
2201 Created->push_back(NPQ.Val);
2202 return DAG.getNode(ISD::SRL, VT, NPQ,
2203 DAG.getConstant(magics.s-1, getShiftAmountTy()));
2207 MVT::ValueType TargetLowering::getValueType(const Type *Ty) const {
2208 switch (Ty->getTypeID()) {
2209 default: assert(0 && "Unknown type!");
2210 case Type::VoidTyID: return MVT::isVoid;
2211 case Type::IntegerTyID:
2212 switch (cast<IntegerType>(Ty)->getBitWidth()) {
2213 default: assert(0 && "Invalid width for value type");
2214 case 1: return MVT::i1;
2215 case 8: return MVT::i8;
2216 case 16: return MVT::i16;
2217 case 32: return MVT::i32;
2218 case 64: return MVT::i64;
2219 case 128: return MVT::i128;
2222 case Type::FloatTyID: return MVT::f32;
2223 case Type::DoubleTyID: return MVT::f64;
2224 case Type::PointerTyID: return PointerTy;
2225 case Type::VectorTyID: return MVT::Vector;