1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/Target/TargetData.h"
16 #include "llvm/Target/TargetMachine.h"
17 #include "llvm/Target/MRegisterInfo.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Support/MathExtras.h"
24 /// InitLibcallNames - Set default libcall names.
26 static void InitLibcallNames(const char **Names) {
27 Names[RTLIB::SHL_I32] = "__ashlsi3";
28 Names[RTLIB::SHL_I64] = "__ashldi3";
29 Names[RTLIB::SRL_I32] = "__lshrsi3";
30 Names[RTLIB::SRL_I64] = "__lshrdi3";
31 Names[RTLIB::SRA_I32] = "__ashrsi3";
32 Names[RTLIB::SRA_I64] = "__ashrdi3";
33 Names[RTLIB::MUL_I32] = "__mulsi3";
34 Names[RTLIB::MUL_I64] = "__muldi3";
35 Names[RTLIB::SDIV_I32] = "__divsi3";
36 Names[RTLIB::SDIV_I64] = "__divdi3";
37 Names[RTLIB::UDIV_I32] = "__udivsi3";
38 Names[RTLIB::UDIV_I64] = "__udivdi3";
39 Names[RTLIB::SREM_I32] = "__modsi3";
40 Names[RTLIB::SREM_I64] = "__moddi3";
41 Names[RTLIB::UREM_I32] = "__umodsi3";
42 Names[RTLIB::UREM_I64] = "__umoddi3";
43 Names[RTLIB::NEG_I32] = "__negsi2";
44 Names[RTLIB::NEG_I64] = "__negdi2";
45 Names[RTLIB::ADD_F32] = "__addsf3";
46 Names[RTLIB::ADD_F64] = "__adddf3";
47 Names[RTLIB::SUB_F32] = "__subsf3";
48 Names[RTLIB::SUB_F64] = "__subdf3";
49 Names[RTLIB::MUL_F32] = "__mulsf3";
50 Names[RTLIB::MUL_F64] = "__muldf3";
51 Names[RTLIB::DIV_F32] = "__divsf3";
52 Names[RTLIB::DIV_F64] = "__divdf3";
53 Names[RTLIB::REM_F32] = "fmodf";
54 Names[RTLIB::REM_F64] = "fmod";
55 Names[RTLIB::NEG_F32] = "__negsf2";
56 Names[RTLIB::NEG_F64] = "__negdf2";
57 Names[RTLIB::POWI_F32] = "__powisf2";
58 Names[RTLIB::POWI_F64] = "__powidf2";
59 Names[RTLIB::SQRT_F32] = "sqrtf";
60 Names[RTLIB::SQRT_F64] = "sqrt";
61 Names[RTLIB::SIN_F32] = "sinf";
62 Names[RTLIB::SIN_F64] = "sin";
63 Names[RTLIB::COS_F32] = "cosf";
64 Names[RTLIB::COS_F64] = "cos";
65 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
66 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
67 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
68 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
69 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
70 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
71 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
72 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
73 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
74 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
75 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
76 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
77 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
78 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
79 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
80 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
81 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
82 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
83 Names[RTLIB::OEQ_F32] = "__eqsf2";
84 Names[RTLIB::OEQ_F64] = "__eqdf2";
85 Names[RTLIB::UNE_F32] = "__nesf2";
86 Names[RTLIB::UNE_F64] = "__nedf2";
87 Names[RTLIB::OGE_F32] = "__gesf2";
88 Names[RTLIB::OGE_F64] = "__gedf2";
89 Names[RTLIB::OLT_F32] = "__ltsf2";
90 Names[RTLIB::OLT_F64] = "__ltdf2";
91 Names[RTLIB::OLE_F32] = "__lesf2";
92 Names[RTLIB::OLE_F64] = "__ledf2";
93 Names[RTLIB::OGT_F32] = "__gtsf2";
94 Names[RTLIB::OGT_F64] = "__gtdf2";
95 Names[RTLIB::UO_F32] = "__unordsf2";
96 Names[RTLIB::UO_F64] = "__unorddf2";
97 Names[RTLIB::O_F32] = "__unordsf2";
98 Names[RTLIB::O_F64] = "__unorddf2";
101 /// InitCmpLibcallCCs - Set default comparison libcall CC.
103 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
104 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
105 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
106 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
107 CCs[RTLIB::UNE_F32] = ISD::SETNE;
108 CCs[RTLIB::UNE_F64] = ISD::SETNE;
109 CCs[RTLIB::OGE_F32] = ISD::SETGE;
110 CCs[RTLIB::OGE_F64] = ISD::SETGE;
111 CCs[RTLIB::OLT_F32] = ISD::SETLT;
112 CCs[RTLIB::OLT_F64] = ISD::SETLT;
113 CCs[RTLIB::OLE_F32] = ISD::SETLE;
114 CCs[RTLIB::OLE_F64] = ISD::SETLE;
115 CCs[RTLIB::OGT_F32] = ISD::SETGT;
116 CCs[RTLIB::OGT_F64] = ISD::SETGT;
117 CCs[RTLIB::UO_F32] = ISD::SETNE;
118 CCs[RTLIB::UO_F64] = ISD::SETNE;
119 CCs[RTLIB::O_F32] = ISD::SETEQ;
120 CCs[RTLIB::O_F64] = ISD::SETEQ;
123 TargetLowering::TargetLowering(TargetMachine &tm)
124 : TM(tm), TD(TM.getTargetData()) {
125 assert(ISD::BUILTIN_OP_END <= 156 &&
126 "Fixed size array in TargetLowering is not large enough!");
127 // All operations default to being supported.
128 memset(OpActions, 0, sizeof(OpActions));
129 memset(LoadXActions, 0, sizeof(LoadXActions));
130 memset(&StoreXActions, 0, sizeof(StoreXActions));
131 // Initialize all indexed load / store to expand.
132 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
133 for (unsigned IM = (unsigned)ISD::PRE_INC;
134 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
135 setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
136 setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
140 IsLittleEndian = TD->isLittleEndian();
141 UsesGlobalOffsetTable = false;
142 ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType());
143 ShiftAmtHandling = Undefined;
144 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
145 memset(TargetDAGCombineArray, 0,
146 sizeof(TargetDAGCombineArray)/sizeof(TargetDAGCombineArray[0]));
147 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
148 allowUnalignedMemoryAccesses = false;
149 UseUnderscoreSetJmp = false;
150 UseUnderscoreLongJmp = false;
151 SelectIsExpensive = false;
152 IntDivIsCheap = false;
153 Pow2DivIsCheap = false;
154 StackPointerRegisterToSaveRestore = 0;
155 ExceptionPointerRegister = 0;
156 ExceptionSelectorRegister = 0;
157 SchedPreferenceInfo = SchedulingForLatency;
159 JumpBufAlignment = 0;
161 InitLibcallNames(LibcallRoutineNames);
162 InitCmpLibcallCCs(CmpLibcallCCs);
165 TargetLowering::~TargetLowering() {}
167 /// setValueTypeAction - Set the action for a particular value type. This
168 /// assumes an action has not already been set for this value type.
169 static void SetValueTypeAction(MVT::ValueType VT,
170 TargetLowering::LegalizeAction Action,
172 MVT::ValueType *TransformToType,
173 TargetLowering::ValueTypeActionImpl &ValueTypeActions) {
174 ValueTypeActions.setTypeAction(VT, Action);
175 if (Action == TargetLowering::Promote) {
176 MVT::ValueType PromoteTo;
178 PromoteTo = MVT::f64;
180 unsigned LargerReg = VT+1;
181 while (!TLI.isTypeLegal((MVT::ValueType)LargerReg)) {
183 assert(MVT::isInteger((MVT::ValueType)LargerReg) &&
184 "Nothing to promote to??");
186 PromoteTo = (MVT::ValueType)LargerReg;
189 assert(MVT::isInteger(VT) == MVT::isInteger(PromoteTo) &&
190 MVT::isFloatingPoint(VT) == MVT::isFloatingPoint(PromoteTo) &&
191 "Can only promote from int->int or fp->fp!");
192 assert(VT < PromoteTo && "Must promote to a larger type!");
193 TransformToType[VT] = PromoteTo;
194 } else if (Action == TargetLowering::Expand) {
195 // f32 and f64 is each expanded to corresponding integer type of same size.
197 TransformToType[VT] = MVT::i32;
198 else if (VT == MVT::f64)
199 TransformToType[VT] = MVT::i64;
201 assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 &&
202 "Cannot expand this type: target must support SOME integer reg!");
203 // Expand to the next smaller integer type!
204 TransformToType[VT] = (MVT::ValueType)(VT-1);
210 /// computeRegisterProperties - Once all of the register classes are added,
211 /// this allows us to compute derived properties we expose.
212 void TargetLowering::computeRegisterProperties() {
213 assert(MVT::LAST_VALUETYPE <= 32 &&
214 "Too many value types for ValueTypeActions to hold!");
216 // Everything defaults to one.
217 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i)
218 NumElementsForVT[i] = 1;
220 // Find the largest integer register class.
221 unsigned LargestIntReg = MVT::i128;
222 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
223 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
225 // Every integer value type larger than this largest register takes twice as
226 // many registers to represent as the previous ValueType.
227 unsigned ExpandedReg = LargestIntReg; ++LargestIntReg;
228 for (++ExpandedReg; MVT::isInteger((MVT::ValueType)ExpandedReg);++ExpandedReg)
229 NumElementsForVT[ExpandedReg] = 2*NumElementsForVT[ExpandedReg-1];
231 // Inspect all of the ValueType's possible, deciding how to process them.
232 for (unsigned IntReg = MVT::i1; IntReg <= MVT::i128; ++IntReg)
233 // If we are expanding this type, expand it!
234 if (getNumElements((MVT::ValueType)IntReg) != 1)
235 SetValueTypeAction((MVT::ValueType)IntReg, Expand, *this, TransformToType,
237 else if (!isTypeLegal((MVT::ValueType)IntReg))
238 // Otherwise, if we don't have native support, we must promote to a
240 SetValueTypeAction((MVT::ValueType)IntReg, Promote, *this,
241 TransformToType, ValueTypeActions);
243 TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg;
245 // If the target does not have native F64 support, expand it to I64. We will
246 // be generating soft float library calls. If the target does not have native
247 // support for F32, promote it to F64 if it is legal. Otherwise, expand it to
249 if (isTypeLegal(MVT::f64))
250 TransformToType[MVT::f64] = MVT::f64;
252 NumElementsForVT[MVT::f64] = NumElementsForVT[MVT::i64];
253 SetValueTypeAction(MVT::f64, Expand, *this, TransformToType,
256 if (isTypeLegal(MVT::f32))
257 TransformToType[MVT::f32] = MVT::f32;
258 else if (isTypeLegal(MVT::f64))
259 SetValueTypeAction(MVT::f32, Promote, *this, TransformToType,
262 NumElementsForVT[MVT::f32] = NumElementsForVT[MVT::i32];
263 SetValueTypeAction(MVT::f32, Expand, *this, TransformToType,
267 // Set MVT::Vector to always be Expanded
268 SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType,
271 // Loop over all of the legal vector value types, specifying an identity type
273 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
274 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
275 if (isTypeLegal((MVT::ValueType)i))
276 TransformToType[i] = (MVT::ValueType)i;
280 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
284 /// getVectorTypeBreakdown - Packed types are broken down into some number of
285 /// legal first class types. For example, <8 x float> maps to 2 MVT::v4f32
286 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
288 /// This method returns the number and type of the resultant breakdown.
290 unsigned TargetLowering::getVectorTypeBreakdown(const VectorType *PTy,
291 MVT::ValueType &PTyElementVT,
292 MVT::ValueType &PTyLegalElementVT) const {
293 // Figure out the right, legal destination reg to copy into.
294 unsigned NumElts = PTy->getNumElements();
295 MVT::ValueType EltTy = getValueType(PTy->getElementType());
297 unsigned NumVectorRegs = 1;
299 // Divide the input until we get to a supported size. This will always
300 // end with a scalar if the target doesn't support vectors.
301 while (NumElts > 1 && !isTypeLegal(getVectorType(EltTy, NumElts))) {
306 MVT::ValueType VT = getVectorType(EltTy, NumElts);
307 if (!isTypeLegal(VT))
311 MVT::ValueType DestVT = getTypeToTransformTo(VT);
312 PTyLegalElementVT = DestVT;
314 // Value is expanded, e.g. i64 -> i16.
315 return NumVectorRegs*(MVT::getSizeInBits(VT)/MVT::getSizeInBits(DestVT));
317 // Otherwise, promotion or legal types use the same number of registers as
318 // the vector decimated to the appropriate level.
319 return NumVectorRegs;
325 //===----------------------------------------------------------------------===//
326 // Optimization Methods
327 //===----------------------------------------------------------------------===//
329 /// ShrinkDemandedConstant - Check to see if the specified operand of the
330 /// specified instruction is a constant integer. If so, check to see if there
331 /// are any bits set in the constant that are not demanded. If so, shrink the
332 /// constant and return true.
333 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op,
335 // FIXME: ISD::SELECT, ISD::SELECT_CC
336 switch(Op.getOpcode()) {
341 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
342 if ((~Demanded & C->getValue()) != 0) {
343 MVT::ValueType VT = Op.getValueType();
344 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
345 DAG.getConstant(Demanded & C->getValue(),
347 return CombineTo(Op, New);
354 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the
355 /// DemandedMask bits of the result of Op are ever used downstream. If we can
356 /// use this information to simplify Op, create a new simplified DAG node and
357 /// return true, returning the original and new nodes in Old and New. Otherwise,
358 /// analyze the expression and return a mask of KnownOne and KnownZero bits for
359 /// the expression (used to simplify the caller). The KnownZero/One bits may
360 /// only be accurate for those bits in the DemandedMask.
361 bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask,
364 TargetLoweringOpt &TLO,
365 unsigned Depth) const {
366 KnownZero = KnownOne = 0; // Don't know anything.
367 // Other users may use these bits.
368 if (!Op.Val->hasOneUse()) {
370 // If not at the root, Just compute the KnownZero/KnownOne bits to
371 // simplify things downstream.
372 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
375 // If this is the root being simplified, allow it to have multiple uses,
376 // just set the DemandedMask to all bits.
377 DemandedMask = MVT::getIntVTBitMask(Op.getValueType());
378 } else if (DemandedMask == 0) {
379 // Not demanding any bits from Op.
380 if (Op.getOpcode() != ISD::UNDEF)
381 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType()));
383 } else if (Depth == 6) { // Limit search depth.
387 uint64_t KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
388 switch (Op.getOpcode()) {
390 // We know all of the bits for a constant!
391 KnownOne = cast<ConstantSDNode>(Op)->getValue() & DemandedMask;
392 KnownZero = ~KnownOne & DemandedMask;
393 return false; // Don't fall through, will infinitely loop.
395 // If the RHS is a constant, check to see if the LHS would be zero without
396 // using the bits from the RHS. Below, we use knowledge about the RHS to
397 // simplify the LHS, here we're using information from the LHS to simplify
399 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
400 uint64_t LHSZero, LHSOne;
401 ComputeMaskedBits(Op.getOperand(0), DemandedMask,
402 LHSZero, LHSOne, Depth+1);
403 // If the LHS already has zeros where RHSC does, this and is dead.
404 if ((LHSZero & DemandedMask) == (~RHSC->getValue() & DemandedMask))
405 return TLO.CombineTo(Op, Op.getOperand(0));
406 // If any of the set bits in the RHS are known zero on the LHS, shrink
408 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & DemandedMask))
412 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
413 KnownOne, TLO, Depth+1))
415 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
416 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownZero,
417 KnownZero2, KnownOne2, TLO, Depth+1))
419 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
421 // If all of the demanded bits are known one on one side, return the other.
422 // These bits cannot contribute to the result of the 'and'.
423 if ((DemandedMask & ~KnownZero2 & KnownOne)==(DemandedMask & ~KnownZero2))
424 return TLO.CombineTo(Op, Op.getOperand(0));
425 if ((DemandedMask & ~KnownZero & KnownOne2)==(DemandedMask & ~KnownZero))
426 return TLO.CombineTo(Op, Op.getOperand(1));
427 // If all of the demanded bits in the inputs are known zeros, return zero.
428 if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask)
429 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType()));
430 // If the RHS is a constant, see if we can simplify it.
431 if (TLO.ShrinkDemandedConstant(Op, DemandedMask & ~KnownZero2))
434 // Output known-1 bits are only known if set in both the LHS & RHS.
435 KnownOne &= KnownOne2;
436 // Output known-0 are known to be clear if zero in either the LHS | RHS.
437 KnownZero |= KnownZero2;
440 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
441 KnownOne, TLO, Depth+1))
443 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
444 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownOne,
445 KnownZero2, KnownOne2, TLO, Depth+1))
447 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
449 // If all of the demanded bits are known zero on one side, return the other.
450 // These bits cannot contribute to the result of the 'or'.
451 if ((DemandedMask & ~KnownOne2 & KnownZero) == (DemandedMask & ~KnownOne2))
452 return TLO.CombineTo(Op, Op.getOperand(0));
453 if ((DemandedMask & ~KnownOne & KnownZero2) == (DemandedMask & ~KnownOne))
454 return TLO.CombineTo(Op, Op.getOperand(1));
455 // If all of the potentially set bits on one side are known to be set on
456 // the other side, just use the 'other' side.
457 if ((DemandedMask & (~KnownZero) & KnownOne2) ==
458 (DemandedMask & (~KnownZero)))
459 return TLO.CombineTo(Op, Op.getOperand(0));
460 if ((DemandedMask & (~KnownZero2) & KnownOne) ==
461 (DemandedMask & (~KnownZero2)))
462 return TLO.CombineTo(Op, Op.getOperand(1));
463 // If the RHS is a constant, see if we can simplify it.
464 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
467 // Output known-0 bits are only known if clear in both the LHS & RHS.
468 KnownZero &= KnownZero2;
469 // Output known-1 are known to be set if set in either the LHS | RHS.
470 KnownOne |= KnownOne2;
473 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
474 KnownOne, TLO, Depth+1))
476 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
477 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero2,
478 KnownOne2, TLO, Depth+1))
480 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
482 // If all of the demanded bits are known zero on one side, return the other.
483 // These bits cannot contribute to the result of the 'xor'.
484 if ((DemandedMask & KnownZero) == DemandedMask)
485 return TLO.CombineTo(Op, Op.getOperand(0));
486 if ((DemandedMask & KnownZero2) == DemandedMask)
487 return TLO.CombineTo(Op, Op.getOperand(1));
489 // If all of the unknown bits are known to be zero on one side or the other
490 // (but not both) turn this into an *inclusive* or.
491 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
492 if ((DemandedMask & ~KnownZero & ~KnownZero2) == 0)
493 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
497 // Output known-0 bits are known if clear or set in both the LHS & RHS.
498 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
499 // Output known-1 are known to be set if set in only one of the LHS, RHS.
500 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
502 // If all of the demanded bits on one side are known, and all of the set
503 // bits on that side are also known to be set on the other side, turn this
504 // into an AND, as we know the bits will be cleared.
505 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
506 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known
507 if ((KnownOne & KnownOne2) == KnownOne) {
508 MVT::ValueType VT = Op.getValueType();
509 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & DemandedMask, VT);
510 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0),
515 // If the RHS is a constant, see if we can simplify it.
516 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
517 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
520 KnownZero = KnownZeroOut;
521 KnownOne = KnownOneOut;
524 // If we know the result of a setcc has the top bits zero, use this info.
525 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
526 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
529 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero,
530 KnownOne, TLO, Depth+1))
532 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero2,
533 KnownOne2, TLO, Depth+1))
535 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
536 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
538 // If the operands are constants, see if we can simplify them.
539 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
542 // Only known if known in both the LHS and RHS.
543 KnownOne &= KnownOne2;
544 KnownZero &= KnownZero2;
547 if (SimplifyDemandedBits(Op.getOperand(3), DemandedMask, KnownZero,
548 KnownOne, TLO, Depth+1))
550 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero2,
551 KnownOne2, TLO, Depth+1))
553 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
554 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
556 // If the operands are constants, see if we can simplify them.
557 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
560 // Only known if known in both the LHS and RHS.
561 KnownOne &= KnownOne2;
562 KnownZero &= KnownZero2;
565 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
566 unsigned ShAmt = SA->getValue();
567 SDOperand InOp = Op.getOperand(0);
569 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
570 // single shift. We can do this if the bottom bits (which are shifted
571 // out) are never demanded.
572 if (InOp.getOpcode() == ISD::SRL &&
573 isa<ConstantSDNode>(InOp.getOperand(1))) {
574 if (ShAmt && (DemandedMask & ((1ULL << ShAmt)-1)) == 0) {
575 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
576 unsigned Opc = ISD::SHL;
584 TLO.DAG.getConstant(ShAmt-C1, Op.getOperand(1).getValueType());
585 MVT::ValueType VT = Op.getValueType();
586 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT,
587 InOp.getOperand(0), NewSA));
591 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> ShAmt,
592 KnownZero, KnownOne, TLO, Depth+1))
594 KnownZero <<= SA->getValue();
595 KnownOne <<= SA->getValue();
596 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
600 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
601 MVT::ValueType VT = Op.getValueType();
602 unsigned ShAmt = SA->getValue();
603 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
604 unsigned VTSize = MVT::getSizeInBits(VT);
605 SDOperand InOp = Op.getOperand(0);
607 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
608 // single shift. We can do this if the top bits (which are shifted out)
609 // are never demanded.
610 if (InOp.getOpcode() == ISD::SHL &&
611 isa<ConstantSDNode>(InOp.getOperand(1))) {
612 if (ShAmt && (DemandedMask & (~0ULL << (VTSize-ShAmt))) == 0) {
613 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
614 unsigned Opc = ISD::SRL;
622 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
623 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT,
624 InOp.getOperand(0), NewSA));
628 // Compute the new bits that are at the top now.
629 if (SimplifyDemandedBits(InOp, (DemandedMask << ShAmt) & TypeMask,
630 KnownZero, KnownOne, TLO, Depth+1))
632 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
633 KnownZero &= TypeMask;
634 KnownOne &= TypeMask;
638 uint64_t HighBits = (1ULL << ShAmt)-1;
639 HighBits <<= VTSize - ShAmt;
640 KnownZero |= HighBits; // High bits known zero.
644 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
645 MVT::ValueType VT = Op.getValueType();
646 unsigned ShAmt = SA->getValue();
648 // Compute the new bits that are at the top now.
649 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
651 uint64_t InDemandedMask = (DemandedMask << ShAmt) & TypeMask;
653 // If any of the demanded bits are produced by the sign extension, we also
654 // demand the input sign bit.
655 uint64_t HighBits = (1ULL << ShAmt)-1;
656 HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
657 if (HighBits & DemandedMask)
658 InDemandedMask |= MVT::getIntVTSignBit(VT);
660 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
661 KnownZero, KnownOne, TLO, Depth+1))
663 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
664 KnownZero &= TypeMask;
665 KnownOne &= TypeMask;
669 // Handle the sign bits.
670 uint64_t SignBit = MVT::getIntVTSignBit(VT);
671 SignBit >>= ShAmt; // Adjust to where it is now in the mask.
673 // If the input sign bit is known to be zero, or if none of the top bits
674 // are demanded, turn this into an unsigned shift right.
675 if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) {
676 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0),
678 } else if (KnownOne & SignBit) { // New bits are known one.
679 KnownOne |= HighBits;
683 case ISD::SIGN_EXTEND_INREG: {
684 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
686 // Sign extension. Compute the demanded bits in the result that are not
687 // present in the input.
688 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & DemandedMask;
690 // If none of the extended bits are demanded, eliminate the sextinreg.
692 return TLO.CombineTo(Op, Op.getOperand(0));
694 uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
695 int64_t InputDemandedBits = DemandedMask & MVT::getIntVTBitMask(EVT);
697 // Since the sign extended bits are demanded, we know that the sign
699 InputDemandedBits |= InSignBit;
701 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
702 KnownZero, KnownOne, TLO, Depth+1))
704 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
706 // If the sign bit of the input is known set or clear, then we know the
707 // top bits of the result.
709 // If the input sign bit is known zero, convert this into a zero extension.
710 if (KnownZero & InSignBit)
711 return TLO.CombineTo(Op,
712 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT));
714 if (KnownOne & InSignBit) { // Input sign bit known set
716 KnownZero &= ~NewBits;
717 } else { // Input sign bit unknown
718 KnownZero &= ~NewBits;
719 KnownOne &= ~NewBits;
726 MVT::ValueType VT = Op.getValueType();
727 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
728 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
733 if (ISD::isZEXTLoad(Op.Val)) {
734 LoadSDNode *LD = cast<LoadSDNode>(Op);
735 MVT::ValueType VT = LD->getLoadedVT();
736 KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask;
740 case ISD::ZERO_EXTEND: {
741 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
743 // If none of the top bits are demanded, convert this into an any_extend.
744 uint64_t NewBits = (~InMask) & DemandedMask;
746 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND,
750 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
751 KnownZero, KnownOne, TLO, Depth+1))
753 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
754 KnownZero |= NewBits;
757 case ISD::SIGN_EXTEND: {
758 MVT::ValueType InVT = Op.getOperand(0).getValueType();
759 uint64_t InMask = MVT::getIntVTBitMask(InVT);
760 uint64_t InSignBit = MVT::getIntVTSignBit(InVT);
761 uint64_t NewBits = (~InMask) & DemandedMask;
763 // If none of the top bits are demanded, convert this into an any_extend.
765 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(),
768 // Since some of the sign extended bits are demanded, we know that the sign
770 uint64_t InDemandedBits = DemandedMask & InMask;
771 InDemandedBits |= InSignBit;
773 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
774 KnownOne, TLO, Depth+1))
777 // If the sign bit is known zero, convert this to a zero extend.
778 if (KnownZero & InSignBit)
779 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND,
783 // If the sign bit is known one, the top bits match.
784 if (KnownOne & InSignBit) {
786 KnownZero &= ~NewBits;
787 } else { // Otherwise, top bits aren't known.
788 KnownOne &= ~NewBits;
789 KnownZero &= ~NewBits;
793 case ISD::ANY_EXTEND: {
794 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
795 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
796 KnownZero, KnownOne, TLO, Depth+1))
798 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
801 case ISD::TRUNCATE: {
802 // Simplify the input, using demanded bit information, and compute the known
803 // zero/one bits live out.
804 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
805 KnownZero, KnownOne, TLO, Depth+1))
808 // If the input is only used by this truncate, see if we can shrink it based
809 // on the known demanded bits.
810 if (Op.getOperand(0).Val->hasOneUse()) {
811 SDOperand In = Op.getOperand(0);
812 switch (In.getOpcode()) {
815 // Shrink SRL by a constant if none of the high bits shifted in are
817 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){
818 uint64_t HighBits = MVT::getIntVTBitMask(In.getValueType());
819 HighBits &= ~MVT::getIntVTBitMask(Op.getValueType());
820 HighBits >>= ShAmt->getValue();
822 if (ShAmt->getValue() < MVT::getSizeInBits(Op.getValueType()) &&
823 (DemandedMask & HighBits) == 0) {
824 // None of the shifted in bits are needed. Add a truncate of the
825 // shift input, then shift it.
826 SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE,
829 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(),
830 NewTrunc, In.getOperand(1)));
837 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
838 uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
839 KnownZero &= OutMask;
843 case ISD::AssertZext: {
844 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
845 uint64_t InMask = MVT::getIntVTBitMask(VT);
846 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
847 KnownZero, KnownOne, TLO, Depth+1))
849 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
850 KnownZero |= ~InMask & DemandedMask;
855 case ISD::INTRINSIC_WO_CHAIN:
856 case ISD::INTRINSIC_W_CHAIN:
857 case ISD::INTRINSIC_VOID:
858 // Just use ComputeMaskedBits to compute output bits.
859 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
863 // If we know the value of all of the demanded bits, return this as a
865 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
866 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType()));
871 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
872 /// this predicate to simplify operations downstream. Mask is known to be zero
873 /// for bits that V cannot have.
874 bool TargetLowering::MaskedValueIsZero(SDOperand Op, uint64_t Mask,
875 unsigned Depth) const {
876 uint64_t KnownZero, KnownOne;
877 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
878 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
879 return (KnownZero & Mask) == Mask;
882 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
883 /// known to be either zero or one and return them in the KnownZero/KnownOne
884 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
886 void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
887 uint64_t &KnownZero, uint64_t &KnownOne,
888 unsigned Depth) const {
889 KnownZero = KnownOne = 0; // Don't know anything.
890 if (Depth == 6 || Mask == 0)
891 return; // Limit search depth.
893 uint64_t KnownZero2, KnownOne2;
895 switch (Op.getOpcode()) {
897 // We know all of the bits for a constant!
898 KnownOne = cast<ConstantSDNode>(Op)->getValue() & Mask;
899 KnownZero = ~KnownOne & Mask;
902 // If either the LHS or the RHS are Zero, the result is zero.
903 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
905 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
906 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
907 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
909 // Output known-1 bits are only known if set in both the LHS & RHS.
910 KnownOne &= KnownOne2;
911 // Output known-0 are known to be clear if zero in either the LHS | RHS.
912 KnownZero |= KnownZero2;
915 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
917 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
918 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
919 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
921 // Output known-0 bits are only known if clear in both the LHS & RHS.
922 KnownZero &= KnownZero2;
923 // Output known-1 are known to be set if set in either the LHS | RHS.
924 KnownOne |= KnownOne2;
927 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
928 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
929 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
930 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
932 // Output known-0 bits are known if clear or set in both the LHS & RHS.
933 uint64_t KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
934 // Output known-1 are known to be set if set in only one of the LHS, RHS.
935 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
936 KnownZero = KnownZeroOut;
940 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
941 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
942 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
943 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
945 // Only known if known in both the LHS and RHS.
946 KnownOne &= KnownOne2;
947 KnownZero &= KnownZero2;
950 ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1);
951 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1);
952 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
953 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
955 // Only known if known in both the LHS and RHS.
956 KnownOne &= KnownOne2;
957 KnownZero &= KnownZero2;
960 // If we know the result of a setcc has the top bits zero, use this info.
961 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
962 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
965 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
966 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
967 ComputeMaskedBits(Op.getOperand(0), Mask >> SA->getValue(),
968 KnownZero, KnownOne, Depth+1);
969 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
970 KnownZero <<= SA->getValue();
971 KnownOne <<= SA->getValue();
972 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
976 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
977 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
978 MVT::ValueType VT = Op.getValueType();
979 unsigned ShAmt = SA->getValue();
981 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
982 ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt) & TypeMask,
983 KnownZero, KnownOne, Depth+1);
984 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
985 KnownZero &= TypeMask;
986 KnownOne &= TypeMask;
990 uint64_t HighBits = (1ULL << ShAmt)-1;
991 HighBits <<= MVT::getSizeInBits(VT)-ShAmt;
992 KnownZero |= HighBits; // High bits known zero.
996 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
997 MVT::ValueType VT = Op.getValueType();
998 unsigned ShAmt = SA->getValue();
1000 // Compute the new bits that are at the top now.
1001 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
1003 uint64_t InDemandedMask = (Mask << ShAmt) & TypeMask;
1004 // If any of the demanded bits are produced by the sign extension, we also
1005 // demand the input sign bit.
1006 uint64_t HighBits = (1ULL << ShAmt)-1;
1007 HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
1008 if (HighBits & Mask)
1009 InDemandedMask |= MVT::getIntVTSignBit(VT);
1011 ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
1013 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1014 KnownZero &= TypeMask;
1015 KnownOne &= TypeMask;
1016 KnownZero >>= ShAmt;
1019 // Handle the sign bits.
1020 uint64_t SignBit = MVT::getIntVTSignBit(VT);
1021 SignBit >>= ShAmt; // Adjust to where it is now in the mask.
1023 if (KnownZero & SignBit) {
1024 KnownZero |= HighBits; // New bits are known zero.
1025 } else if (KnownOne & SignBit) {
1026 KnownOne |= HighBits; // New bits are known one.
1030 case ISD::SIGN_EXTEND_INREG: {
1031 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1033 // Sign extension. Compute the demanded bits in the result that are not
1034 // present in the input.
1035 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & Mask;
1037 uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
1038 int64_t InputDemandedBits = Mask & MVT::getIntVTBitMask(EVT);
1040 // If the sign extended bits are demanded, we know that the sign
1043 InputDemandedBits |= InSignBit;
1045 ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
1046 KnownZero, KnownOne, Depth+1);
1047 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1049 // If the sign bit of the input is known set or clear, then we know the
1050 // top bits of the result.
1051 if (KnownZero & InSignBit) { // Input sign bit known clear
1052 KnownZero |= NewBits;
1053 KnownOne &= ~NewBits;
1054 } else if (KnownOne & InSignBit) { // Input sign bit known set
1055 KnownOne |= NewBits;
1056 KnownZero &= ~NewBits;
1057 } else { // Input sign bit unknown
1058 KnownZero &= ~NewBits;
1059 KnownOne &= ~NewBits;
1066 MVT::ValueType VT = Op.getValueType();
1067 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
1068 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
1073 if (ISD::isZEXTLoad(Op.Val)) {
1074 LoadSDNode *LD = cast<LoadSDNode>(Op);
1075 MVT::ValueType VT = LD->getLoadedVT();
1076 KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask;
1080 case ISD::ZERO_EXTEND: {
1081 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
1082 uint64_t NewBits = (~InMask) & Mask;
1083 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1085 KnownZero |= NewBits & Mask;
1086 KnownOne &= ~NewBits;
1089 case ISD::SIGN_EXTEND: {
1090 MVT::ValueType InVT = Op.getOperand(0).getValueType();
1091 unsigned InBits = MVT::getSizeInBits(InVT);
1092 uint64_t InMask = MVT::getIntVTBitMask(InVT);
1093 uint64_t InSignBit = 1ULL << (InBits-1);
1094 uint64_t NewBits = (~InMask) & Mask;
1095 uint64_t InDemandedBits = Mask & InMask;
1097 // If any of the sign extended bits are demanded, we know that the sign
1100 InDemandedBits |= InSignBit;
1102 ComputeMaskedBits(Op.getOperand(0), InDemandedBits, KnownZero,
1104 // If the sign bit is known zero or one, the top bits match.
1105 if (KnownZero & InSignBit) {
1106 KnownZero |= NewBits;
1107 KnownOne &= ~NewBits;
1108 } else if (KnownOne & InSignBit) {
1109 KnownOne |= NewBits;
1110 KnownZero &= ~NewBits;
1111 } else { // Otherwise, top bits aren't known.
1112 KnownOne &= ~NewBits;
1113 KnownZero &= ~NewBits;
1117 case ISD::ANY_EXTEND: {
1118 MVT::ValueType VT = Op.getOperand(0).getValueType();
1119 ComputeMaskedBits(Op.getOperand(0), Mask & MVT::getIntVTBitMask(VT),
1120 KnownZero, KnownOne, Depth+1);
1123 case ISD::TRUNCATE: {
1124 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
1125 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1126 uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
1127 KnownZero &= OutMask;
1128 KnownOne &= OutMask;
1131 case ISD::AssertZext: {
1132 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1133 uint64_t InMask = MVT::getIntVTBitMask(VT);
1134 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1136 KnownZero |= (~InMask) & Mask;
1140 // If either the LHS or the RHS are Zero, the result is zero.
1141 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1142 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
1143 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1144 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1146 // Output known-0 bits are known if clear or set in both the low clear bits
1147 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
1148 // low 3 bits clear.
1149 uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero),
1150 CountTrailingZeros_64(~KnownZero2));
1152 KnownZero = (1ULL << KnownZeroOut) - 1;
1157 ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0));
1160 // We know that the top bits of C-X are clear if X contains less bits
1161 // than C (i.e. no wrap-around can happen). For example, 20-X is
1162 // positive if we can prove that X is >= 0 and < 16.
1163 MVT::ValueType VT = CLHS->getValueType(0);
1164 if ((CLHS->getValue() & MVT::getIntVTSignBit(VT)) == 0) { // sign bit clear
1165 unsigned NLZ = CountLeadingZeros_64(CLHS->getValue()+1);
1166 uint64_t MaskV = (1ULL << (63-NLZ))-1; // NLZ can't be 64 with no sign bit
1167 MaskV = ~MaskV & MVT::getIntVTBitMask(VT);
1168 ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero, KnownOne, Depth+1);
1170 // If all of the MaskV bits are known to be zero, then we know the output
1171 // top bits are zero, because we now know that the output is from [0-C].
1172 if ((KnownZero & MaskV) == MaskV) {
1173 unsigned NLZ2 = CountLeadingZeros_64(CLHS->getValue());
1174 KnownZero = ~((1ULL << (64-NLZ2))-1) & Mask; // Top bits known zero.
1175 KnownOne = 0; // No one bits known.
1177 KnownZero = KnownOne = 0; // Otherwise, nothing known.
1183 // Allow the target to implement this method for its nodes.
1184 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
1185 case ISD::INTRINSIC_WO_CHAIN:
1186 case ISD::INTRINSIC_W_CHAIN:
1187 case ISD::INTRINSIC_VOID:
1188 computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne);
1194 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
1195 /// in Mask are known to be either zero or one and return them in the
1196 /// KnownZero/KnownOne bitsets.
1197 void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
1199 uint64_t &KnownZero,
1201 unsigned Depth) const {
1202 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1203 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1204 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1205 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1206 "Should use MaskedValueIsZero if you don't know whether Op"
1207 " is a target node!");
1212 /// ComputeNumSignBits - Return the number of times the sign bit of the
1213 /// register is replicated into the other bits. We know that at least 1 bit
1214 /// is always equal to the sign bit (itself), but other cases can give us
1215 /// information. For example, immediately after an "SRA X, 2", we know that
1216 /// the top 3 bits are all equal to each other, so we return 3.
1217 unsigned TargetLowering::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{
1218 MVT::ValueType VT = Op.getValueType();
1219 assert(MVT::isInteger(VT) && "Invalid VT!");
1220 unsigned VTBits = MVT::getSizeInBits(VT);
1224 return 1; // Limit search depth.
1226 switch (Op.getOpcode()) {
1228 case ISD::AssertSext:
1229 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1230 return VTBits-Tmp+1;
1231 case ISD::AssertZext:
1232 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1235 case ISD::Constant: {
1236 uint64_t Val = cast<ConstantSDNode>(Op)->getValue();
1237 // If negative, invert the bits, then look at it.
1238 if (Val & MVT::getIntVTSignBit(VT))
1241 // Shift the bits so they are the leading bits in the int64_t.
1244 // Return # leading zeros. We use 'min' here in case Val was zero before
1245 // shifting. We don't want to return '64' as for an i32 "0".
1246 return std::min(VTBits, CountLeadingZeros_64(Val));
1249 case ISD::SIGN_EXTEND:
1250 Tmp = VTBits-MVT::getSizeInBits(Op.getOperand(0).getValueType());
1251 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
1253 case ISD::SIGN_EXTEND_INREG:
1254 // Max of the input and what this extends.
1255 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1258 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1259 return std::max(Tmp, Tmp2);
1262 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1263 // SRA X, C -> adds C sign bits.
1264 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1265 Tmp += C->getValue();
1266 if (Tmp > VTBits) Tmp = VTBits;
1270 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1271 // shl destroys sign bits.
1272 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1273 if (C->getValue() >= VTBits || // Bad shift.
1274 C->getValue() >= Tmp) break; // Shifted all sign bits out.
1275 return Tmp - C->getValue();
1280 case ISD::XOR: // NOT is handled here.
1281 // Logical binary ops preserve the number of sign bits.
1282 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1283 if (Tmp == 1) return 1; // Early out.
1284 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1285 return std::min(Tmp, Tmp2);
1288 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1289 if (Tmp == 1) return 1; // Early out.
1290 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1291 return std::min(Tmp, Tmp2);
1294 // If setcc returns 0/-1, all bits are sign bits.
1295 if (getSetCCResultContents() == ZeroOrNegativeOneSetCCResult)
1300 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1301 unsigned RotAmt = C->getValue() & (VTBits-1);
1303 // Handle rotate right by N like a rotate left by 32-N.
1304 if (Op.getOpcode() == ISD::ROTR)
1305 RotAmt = (VTBits-RotAmt) & (VTBits-1);
1307 // If we aren't rotating out all of the known-in sign bits, return the
1308 // number that are left. This handles rotl(sext(x), 1) for example.
1309 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1310 if (Tmp > RotAmt+1) return Tmp-RotAmt;
1314 // Add can have at most one carry bit. Thus we know that the output
1315 // is, at worst, one more bit than the inputs.
1316 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1317 if (Tmp == 1) return 1; // Early out.
1319 // Special case decrementing a value (ADD X, -1):
1320 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
1321 if (CRHS->isAllOnesValue()) {
1322 uint64_t KnownZero, KnownOne;
1323 uint64_t Mask = MVT::getIntVTBitMask(VT);
1324 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
1326 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1328 if ((KnownZero|1) == Mask)
1331 // If we are subtracting one from a positive number, there is no carry
1332 // out of the result.
1333 if (KnownZero & MVT::getIntVTSignBit(VT))
1337 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1338 if (Tmp2 == 1) return 1;
1339 return std::min(Tmp, Tmp2)-1;
1343 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1344 if (Tmp2 == 1) return 1;
1347 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
1348 if (CLHS->getValue() == 0) {
1349 uint64_t KnownZero, KnownOne;
1350 uint64_t Mask = MVT::getIntVTBitMask(VT);
1351 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1352 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1354 if ((KnownZero|1) == Mask)
1357 // If the input is known to be positive (the sign bit is known clear),
1358 // the output of the NEG has the same number of sign bits as the input.
1359 if (KnownZero & MVT::getIntVTSignBit(VT))
1362 // Otherwise, we treat this like a SUB.
1365 // Sub can have at most one carry bit. Thus we know that the output
1366 // is, at worst, one more bit than the inputs.
1367 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1368 if (Tmp == 1) return 1; // Early out.
1369 return std::min(Tmp, Tmp2)-1;
1372 // FIXME: it's tricky to do anything useful for this, but it is an important
1373 // case for targets like X86.
1377 // Handle LOADX separately here. EXTLOAD case will fallthrough.
1378 if (Op.getOpcode() == ISD::LOAD) {
1379 LoadSDNode *LD = cast<LoadSDNode>(Op);
1380 unsigned ExtType = LD->getExtensionType();
1383 case ISD::SEXTLOAD: // '17' bits known
1384 Tmp = MVT::getSizeInBits(LD->getLoadedVT());
1385 return VTBits-Tmp+1;
1386 case ISD::ZEXTLOAD: // '16' bits known
1387 Tmp = MVT::getSizeInBits(LD->getLoadedVT());
1392 // Allow the target to implement this method for its nodes.
1393 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1394 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1395 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1396 Op.getOpcode() == ISD::INTRINSIC_VOID) {
1397 unsigned NumBits = ComputeNumSignBitsForTargetNode(Op, Depth);
1398 if (NumBits > 1) return NumBits;
1401 // Finally, if we can prove that the top bits of the result are 0's or 1's,
1402 // use this information.
1403 uint64_t KnownZero, KnownOne;
1404 uint64_t Mask = MVT::getIntVTBitMask(VT);
1405 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
1407 uint64_t SignBit = MVT::getIntVTSignBit(VT);
1408 if (KnownZero & SignBit) { // SignBit is 0
1410 } else if (KnownOne & SignBit) { // SignBit is 1;
1417 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
1418 // the number of identical bits in the top of the input value.
1421 // Return # leading zeros. We use 'min' here in case Val was zero before
1422 // shifting. We don't want to return '64' as for an i32 "0".
1423 return std::min(VTBits, CountLeadingZeros_64(Mask));
1428 /// ComputeNumSignBitsForTargetNode - This method can be implemented by
1429 /// targets that want to expose additional information about sign bits to the
1431 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op,
1432 unsigned Depth) const {
1433 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1434 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1435 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1436 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1437 "Should use ComputeNumSignBits if you don't know whether Op"
1438 " is a target node!");
1443 /// SimplifySetCC - Try to simplify a setcc built with the specified operands
1444 /// and cc. If it is unable to simplify it, return a null SDOperand.
1446 TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
1447 ISD::CondCode Cond, bool foldBooleans,
1448 DAGCombinerInfo &DCI) const {
1449 SelectionDAG &DAG = DCI.DAG;
1451 // These setcc operations always fold.
1455 case ISD::SETFALSE2: return DAG.getConstant(0, VT);
1457 case ISD::SETTRUE2: return DAG.getConstant(1, VT);
1460 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) {
1461 uint64_t C1 = N1C->getValue();
1462 if (isa<ConstantSDNode>(N0.Val)) {
1463 return DAG.FoldSetCC(VT, N0, N1, Cond);
1465 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1466 // equality comparison, then we're just comparing whether X itself is
1468 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1469 N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1470 N0.getOperand(1).getOpcode() == ISD::Constant) {
1471 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
1472 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1473 ShAmt == Log2_32(MVT::getSizeInBits(N0.getValueType()))) {
1474 if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1475 // (srl (ctlz x), 5) == 0 -> X != 0
1476 // (srl (ctlz x), 5) != 1 -> X != 0
1479 // (srl (ctlz x), 5) != 0 -> X == 0
1480 // (srl (ctlz x), 5) == 1 -> X == 0
1483 SDOperand Zero = DAG.getConstant(0, N0.getValueType());
1484 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0),
1489 // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1490 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1491 unsigned InSize = MVT::getSizeInBits(N0.getOperand(0).getValueType());
1493 // If the comparison constant has bits in the upper part, the
1494 // zero-extended value could never match.
1495 if (C1 & (~0ULL << InSize)) {
1496 unsigned VSize = MVT::getSizeInBits(N0.getValueType());
1500 case ISD::SETEQ: return DAG.getConstant(0, VT);
1503 case ISD::SETNE: return DAG.getConstant(1, VT);
1506 // True if the sign bit of C1 is set.
1507 return DAG.getConstant((C1 & (1ULL << (VSize-1))) != 0, VT);
1510 // True if the sign bit of C1 isn't set.
1511 return DAG.getConstant((C1 & (1ULL << (VSize-1))) == 0, VT);
1517 // Otherwise, we can perform the comparison with the low bits.
1525 return DAG.getSetCC(VT, N0.getOperand(0),
1526 DAG.getConstant(C1, N0.getOperand(0).getValueType()),
1529 break; // todo, be more careful with signed comparisons
1531 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1532 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1533 MVT::ValueType ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1534 unsigned ExtSrcTyBits = MVT::getSizeInBits(ExtSrcTy);
1535 MVT::ValueType ExtDstTy = N0.getValueType();
1536 unsigned ExtDstTyBits = MVT::getSizeInBits(ExtDstTy);
1538 // If the extended part has any inconsistent bits, it cannot ever
1539 // compare equal. In other words, they have to be all ones or all
1542 (~0ULL >> (64-ExtSrcTyBits)) & (~0ULL << (ExtDstTyBits-1));
1543 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
1544 return DAG.getConstant(Cond == ISD::SETNE, VT);
1547 MVT::ValueType Op0Ty = N0.getOperand(0).getValueType();
1548 if (Op0Ty == ExtSrcTy) {
1549 ZextOp = N0.getOperand(0);
1551 int64_t Imm = ~0ULL >> (64-ExtSrcTyBits);
1552 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0),
1553 DAG.getConstant(Imm, Op0Ty));
1555 if (!DCI.isCalledByLegalizer())
1556 DCI.AddToWorklist(ZextOp.Val);
1557 // Otherwise, make this a use of a zext.
1558 return DAG.getSetCC(VT, ZextOp,
1559 DAG.getConstant(C1 & (~0ULL>>(64-ExtSrcTyBits)),
1562 } else if ((N1C->getValue() == 0 || N1C->getValue() == 1) &&
1563 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1565 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
1566 if (N0.getOpcode() == ISD::SETCC) {
1567 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getValue() != 1);
1571 // Invert the condition.
1572 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1573 CC = ISD::getSetCCInverse(CC,
1574 MVT::isInteger(N0.getOperand(0).getValueType()));
1575 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC);
1578 if ((N0.getOpcode() == ISD::XOR ||
1579 (N0.getOpcode() == ISD::AND &&
1580 N0.getOperand(0).getOpcode() == ISD::XOR &&
1581 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1582 isa<ConstantSDNode>(N0.getOperand(1)) &&
1583 cast<ConstantSDNode>(N0.getOperand(1))->getValue() == 1) {
1584 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
1585 // can only do this if the top bits are known zero.
1586 if (MaskedValueIsZero(N0, MVT::getIntVTBitMask(N0.getValueType())-1)){
1587 // Okay, get the un-inverted input value.
1589 if (N0.getOpcode() == ISD::XOR)
1590 Val = N0.getOperand(0);
1592 assert(N0.getOpcode() == ISD::AND &&
1593 N0.getOperand(0).getOpcode() == ISD::XOR);
1594 // ((X^1)&1)^1 -> X & 1
1595 Val = DAG.getNode(ISD::AND, N0.getValueType(),
1596 N0.getOperand(0).getOperand(0),
1599 return DAG.getSetCC(VT, Val, N1,
1600 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1605 uint64_t MinVal, MaxVal;
1606 unsigned OperandBitSize = MVT::getSizeInBits(N1C->getValueType(0));
1607 if (ISD::isSignedIntSetCC(Cond)) {
1608 MinVal = 1ULL << (OperandBitSize-1);
1609 if (OperandBitSize != 1) // Avoid X >> 64, which is undefined.
1610 MaxVal = ~0ULL >> (65-OperandBitSize);
1615 MaxVal = ~0ULL >> (64-OperandBitSize);
1618 // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1619 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1620 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
1621 --C1; // X >= C0 --> X > (C0-1)
1622 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()),
1623 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
1626 if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1627 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
1628 ++C1; // X <= C0 --> X < (C0+1)
1629 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()),
1630 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
1633 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1634 return DAG.getConstant(0, VT); // X < MIN --> false
1635 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1636 return DAG.getConstant(1, VT); // X >= MIN --> true
1637 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1638 return DAG.getConstant(0, VT); // X > MAX --> false
1639 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1640 return DAG.getConstant(1, VT); // X <= MAX --> true
1642 // Canonicalize setgt X, Min --> setne X, Min
1643 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1644 return DAG.getSetCC(VT, N0, N1, ISD::SETNE);
1645 // Canonicalize setlt X, Max --> setne X, Max
1646 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1647 return DAG.getSetCC(VT, N0, N1, ISD::SETNE);
1649 // If we have setult X, 1, turn it into seteq X, 0
1650 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1651 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()),
1653 // If we have setugt X, Max-1, turn it into seteq X, Max
1654 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1655 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()),
1658 // If we have "setcc X, C0", check to see if we can shrink the immediate
1661 // SETUGT X, SINTMAX -> SETLT X, 0
1662 if (Cond == ISD::SETUGT && OperandBitSize != 1 &&
1663 C1 == (~0ULL >> (65-OperandBitSize)))
1664 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()),
1667 // FIXME: Implement the rest of these.
1669 // Fold bit comparisons when we can.
1670 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1671 VT == N0.getValueType() && N0.getOpcode() == ISD::AND)
1672 if (ConstantSDNode *AndRHS =
1673 dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1674 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
1675 // Perform the xform if the AND RHS is a single bit.
1676 if (isPowerOf2_64(AndRHS->getValue())) {
1677 return DAG.getNode(ISD::SRL, VT, N0,
1678 DAG.getConstant(Log2_64(AndRHS->getValue()),
1679 getShiftAmountTy()));
1681 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getValue()) {
1682 // (X & 8) == 8 --> (X & 8) >> 3
1683 // Perform the xform if C1 is a single bit.
1684 if (isPowerOf2_64(C1)) {
1685 return DAG.getNode(ISD::SRL, VT, N0,
1686 DAG.getConstant(Log2_64(C1), getShiftAmountTy()));
1691 } else if (isa<ConstantSDNode>(N0.Val)) {
1692 // Ensure that the constant occurs on the RHS.
1693 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
1696 if (isa<ConstantFPSDNode>(N0.Val)) {
1697 // Constant fold or commute setcc.
1698 SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond);
1699 if (O.Val) return O;
1703 // We can always fold X == X for integer setcc's.
1704 if (MVT::isInteger(N0.getValueType()))
1705 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
1706 unsigned UOF = ISD::getUnorderedFlavor(Cond);
1707 if (UOF == 2) // FP operators that are undefined on NaNs.
1708 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
1709 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
1710 return DAG.getConstant(UOF, VT);
1711 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
1712 // if it is not already.
1713 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
1714 if (NewCond != Cond)
1715 return DAG.getSetCC(VT, N0, N1, NewCond);
1718 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1719 MVT::isInteger(N0.getValueType())) {
1720 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
1721 N0.getOpcode() == ISD::XOR) {
1722 // Simplify (X+Y) == (X+Z) --> Y == Z
1723 if (N0.getOpcode() == N1.getOpcode()) {
1724 if (N0.getOperand(0) == N1.getOperand(0))
1725 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond);
1726 if (N0.getOperand(1) == N1.getOperand(1))
1727 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond);
1728 if (DAG.isCommutativeBinOp(N0.getOpcode())) {
1729 // If X op Y == Y op X, try other combinations.
1730 if (N0.getOperand(0) == N1.getOperand(1))
1731 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond);
1732 if (N0.getOperand(1) == N1.getOperand(0))
1733 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond);
1737 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
1738 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1739 // Turn (X+C1) == C2 --> X == C2-C1
1740 if (N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse()) {
1741 return DAG.getSetCC(VT, N0.getOperand(0),
1742 DAG.getConstant(RHSC->getValue()-LHSR->getValue(),
1743 N0.getValueType()), Cond);
1746 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
1747 if (N0.getOpcode() == ISD::XOR)
1748 // If we know that all of the inverted bits are zero, don't bother
1749 // performing the inversion.
1750 if (MaskedValueIsZero(N0.getOperand(0), ~LHSR->getValue()))
1751 return DAG.getSetCC(VT, N0.getOperand(0),
1752 DAG.getConstant(LHSR->getValue()^RHSC->getValue(),
1753 N0.getValueType()), Cond);
1756 // Turn (C1-X) == C2 --> X == C1-C2
1757 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
1758 if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) {
1759 return DAG.getSetCC(VT, N0.getOperand(1),
1760 DAG.getConstant(SUBC->getValue()-RHSC->getValue(),
1761 N0.getValueType()), Cond);
1766 // Simplify (X+Z) == X --> Z == 0
1767 if (N0.getOperand(0) == N1)
1768 return DAG.getSetCC(VT, N0.getOperand(1),
1769 DAG.getConstant(0, N0.getValueType()), Cond);
1770 if (N0.getOperand(1) == N1) {
1771 if (DAG.isCommutativeBinOp(N0.getOpcode()))
1772 return DAG.getSetCC(VT, N0.getOperand(0),
1773 DAG.getConstant(0, N0.getValueType()), Cond);
1775 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
1776 // (Z-X) == X --> Z == X<<1
1777 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(),
1779 DAG.getConstant(1, getShiftAmountTy()));
1780 if (!DCI.isCalledByLegalizer())
1781 DCI.AddToWorklist(SH.Val);
1782 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond);
1787 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
1788 N1.getOpcode() == ISD::XOR) {
1789 // Simplify X == (X+Z) --> Z == 0
1790 if (N1.getOperand(0) == N0) {
1791 return DAG.getSetCC(VT, N1.getOperand(1),
1792 DAG.getConstant(0, N1.getValueType()), Cond);
1793 } else if (N1.getOperand(1) == N0) {
1794 if (DAG.isCommutativeBinOp(N1.getOpcode())) {
1795 return DAG.getSetCC(VT, N1.getOperand(0),
1796 DAG.getConstant(0, N1.getValueType()), Cond);
1798 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
1799 // X == (Z-X) --> X<<1 == Z
1800 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0,
1801 DAG.getConstant(1, getShiftAmountTy()));
1802 if (!DCI.isCalledByLegalizer())
1803 DCI.AddToWorklist(SH.Val);
1804 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond);
1810 // Fold away ALL boolean setcc's.
1812 if (N0.getValueType() == MVT::i1 && foldBooleans) {
1814 default: assert(0 && "Unknown integer setcc!");
1815 case ISD::SETEQ: // X == Y -> (X^Y)^1
1816 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
1817 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1));
1818 if (!DCI.isCalledByLegalizer())
1819 DCI.AddToWorklist(Temp.Val);
1821 case ISD::SETNE: // X != Y --> (X^Y)
1822 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
1824 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y
1825 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y
1826 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
1827 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp);
1828 if (!DCI.isCalledByLegalizer())
1829 DCI.AddToWorklist(Temp.Val);
1831 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X
1832 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X
1833 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1));
1834 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp);
1835 if (!DCI.isCalledByLegalizer())
1836 DCI.AddToWorklist(Temp.Val);
1838 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y
1839 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y
1840 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
1841 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp);
1842 if (!DCI.isCalledByLegalizer())
1843 DCI.AddToWorklist(Temp.Val);
1845 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X
1846 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X
1847 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1));
1848 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp);
1851 if (VT != MVT::i1) {
1852 if (!DCI.isCalledByLegalizer())
1853 DCI.AddToWorklist(N0.Val);
1854 // FIXME: If running after legalize, we probably can't do this.
1855 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0);
1860 // Could not fold it.
1864 SDOperand TargetLowering::
1865 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
1866 // Default implementation: no optimization.
1870 //===----------------------------------------------------------------------===//
1871 // Inline Assembler Implementation Methods
1872 //===----------------------------------------------------------------------===//
1874 TargetLowering::ConstraintType
1875 TargetLowering::getConstraintType(const std::string &Constraint) const {
1876 // FIXME: lots more standard ones to handle.
1877 if (Constraint.size() == 1) {
1878 switch (Constraint[0]) {
1880 case 'r': return C_RegisterClass;
1882 case 'o': // offsetable
1883 case 'V': // not offsetable
1885 case 'i': // Simple Integer or Relocatable Constant
1886 case 'n': // Simple Integer
1887 case 's': // Relocatable Constant
1888 case 'X': // Allow ANY value.
1889 case 'I': // Target registers.
1901 if (Constraint.size() > 1 && Constraint[0] == '{' &&
1902 Constraint[Constraint.size()-1] == '}')
1907 /// isOperandValidForConstraint - Return the specified operand (possibly
1908 /// modified) if the specified SDOperand is valid for the specified target
1909 /// constraint letter, otherwise return null.
1910 SDOperand TargetLowering::isOperandValidForConstraint(SDOperand Op,
1911 char ConstraintLetter,
1912 SelectionDAG &DAG) {
1913 switch (ConstraintLetter) {
1915 case 'i': // Simple Integer or Relocatable Constant
1916 case 'n': // Simple Integer
1917 case 's': // Relocatable Constant
1918 case 'X': // Allows any operand.
1919 // These are okay if the operand is either a global variable address or a
1920 // simple immediate value. If we have one of these, map to the TargetXXX
1921 // version so that the value itself doesn't get selected.
1922 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1923 // Simple constants are not allowed for 's'.
1924 if (ConstraintLetter != 's')
1925 return DAG.getTargetConstant(C->getValue(), Op.getValueType());
1927 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
1928 if (ConstraintLetter != 'n')
1929 return DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getValueType(),
1934 return SDOperand(0,0);
1937 std::vector<unsigned> TargetLowering::
1938 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1939 MVT::ValueType VT) const {
1940 return std::vector<unsigned>();
1944 std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
1945 getRegForInlineAsmConstraint(const std::string &Constraint,
1946 MVT::ValueType VT) const {
1947 if (Constraint[0] != '{')
1948 return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
1949 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
1951 // Remove the braces from around the name.
1952 std::string RegName(Constraint.begin()+1, Constraint.end()-1);
1954 // Figure out which register class contains this reg.
1955 const MRegisterInfo *RI = TM.getRegisterInfo();
1956 for (MRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
1957 E = RI->regclass_end(); RCI != E; ++RCI) {
1958 const TargetRegisterClass *RC = *RCI;
1960 // If none of the the value types for this register class are valid, we
1961 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1962 bool isLegal = false;
1963 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1965 if (isTypeLegal(*I)) {
1971 if (!isLegal) continue;
1973 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
1975 if (StringsEqualNoCase(RegName, RI->get(*I).Name))
1976 return std::make_pair(*I, RC);
1980 return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
1983 //===----------------------------------------------------------------------===//
1984 // Loop Strength Reduction hooks
1985 //===----------------------------------------------------------------------===//
1987 /// isLegalAddressingMode - Return true if the addressing mode represented
1988 /// by AM is legal for this target, for a load/store of the specified type.
1989 bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
1990 const Type *Ty) const {
1991 // The default implementation of this implements a conservative RISCy, r+r and
1994 // Allows a sign-extended 16-bit immediate field.
1995 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1998 // No global is ever allowed as a base.
2002 // Only support r+r,
2004 case 0: // "r+i" or just "i", depending on HasBaseReg.
2007 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
2009 // Otherwise we have r+r or r+i.
2012 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
2014 // Allow 2*r as r+r.
2021 // Magic for divide replacement
2024 int64_t m; // magic number
2025 int64_t s; // shift amount
2029 uint64_t m; // magic number
2030 int64_t a; // add indicator
2031 int64_t s; // shift amount
2034 /// magic - calculate the magic numbers required to codegen an integer sdiv as
2035 /// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
2037 static ms magic32(int32_t d) {
2039 uint32_t ad, anc, delta, q1, r1, q2, r2, t;
2040 const uint32_t two31 = 0x80000000U;
2044 t = two31 + ((uint32_t)d >> 31);
2045 anc = t - 1 - t%ad; // absolute value of nc
2046 p = 31; // initialize p
2047 q1 = two31/anc; // initialize q1 = 2p/abs(nc)
2048 r1 = two31 - q1*anc; // initialize r1 = rem(2p,abs(nc))
2049 q2 = two31/ad; // initialize q2 = 2p/abs(d)
2050 r2 = two31 - q2*ad; // initialize r2 = rem(2p,abs(d))
2053 q1 = 2*q1; // update q1 = 2p/abs(nc)
2054 r1 = 2*r1; // update r1 = rem(2p/abs(nc))
2055 if (r1 >= anc) { // must be unsigned comparison
2059 q2 = 2*q2; // update q2 = 2p/abs(d)
2060 r2 = 2*r2; // update r2 = rem(2p/abs(d))
2061 if (r2 >= ad) { // must be unsigned comparison
2066 } while (q1 < delta || (q1 == delta && r1 == 0));
2068 mag.m = (int32_t)(q2 + 1); // make sure to sign extend
2069 if (d < 0) mag.m = -mag.m; // resulting magic number
2070 mag.s = p - 32; // resulting shift
2074 /// magicu - calculate the magic numbers required to codegen an integer udiv as
2075 /// a sequence of multiply, add and shifts. Requires that the divisor not be 0.
2076 static mu magicu32(uint32_t d) {
2078 uint32_t nc, delta, q1, r1, q2, r2;
2080 magu.a = 0; // initialize "add" indicator
2082 p = 31; // initialize p
2083 q1 = 0x80000000/nc; // initialize q1 = 2p/nc
2084 r1 = 0x80000000 - q1*nc; // initialize r1 = rem(2p,nc)
2085 q2 = 0x7FFFFFFF/d; // initialize q2 = (2p-1)/d
2086 r2 = 0x7FFFFFFF - q2*d; // initialize r2 = rem((2p-1),d)
2089 if (r1 >= nc - r1 ) {
2090 q1 = 2*q1 + 1; // update q1
2091 r1 = 2*r1 - nc; // update r1
2094 q1 = 2*q1; // update q1
2095 r1 = 2*r1; // update r1
2097 if (r2 + 1 >= d - r2) {
2098 if (q2 >= 0x7FFFFFFF) magu.a = 1;
2099 q2 = 2*q2 + 1; // update q2
2100 r2 = 2*r2 + 1 - d; // update r2
2103 if (q2 >= 0x80000000) magu.a = 1;
2104 q2 = 2*q2; // update q2
2105 r2 = 2*r2 + 1; // update r2
2108 } while (p < 64 && (q1 < delta || (q1 == delta && r1 == 0)));
2109 magu.m = q2 + 1; // resulting magic number
2110 magu.s = p - 32; // resulting shift
2114 /// magic - calculate the magic numbers required to codegen an integer sdiv as
2115 /// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
2117 static ms magic64(int64_t d) {
2119 uint64_t ad, anc, delta, q1, r1, q2, r2, t;
2120 const uint64_t two63 = 9223372036854775808ULL; // 2^63
2123 ad = d >= 0 ? d : -d;
2124 t = two63 + ((uint64_t)d >> 63);
2125 anc = t - 1 - t%ad; // absolute value of nc
2126 p = 63; // initialize p
2127 q1 = two63/anc; // initialize q1 = 2p/abs(nc)
2128 r1 = two63 - q1*anc; // initialize r1 = rem(2p,abs(nc))
2129 q2 = two63/ad; // initialize q2 = 2p/abs(d)
2130 r2 = two63 - q2*ad; // initialize r2 = rem(2p,abs(d))
2133 q1 = 2*q1; // update q1 = 2p/abs(nc)
2134 r1 = 2*r1; // update r1 = rem(2p/abs(nc))
2135 if (r1 >= anc) { // must be unsigned comparison
2139 q2 = 2*q2; // update q2 = 2p/abs(d)
2140 r2 = 2*r2; // update r2 = rem(2p/abs(d))
2141 if (r2 >= ad) { // must be unsigned comparison
2146 } while (q1 < delta || (q1 == delta && r1 == 0));
2149 if (d < 0) mag.m = -mag.m; // resulting magic number
2150 mag.s = p - 64; // resulting shift
2154 /// magicu - calculate the magic numbers required to codegen an integer udiv as
2155 /// a sequence of multiply, add and shifts. Requires that the divisor not be 0.
2156 static mu magicu64(uint64_t d)
2159 uint64_t nc, delta, q1, r1, q2, r2;
2161 magu.a = 0; // initialize "add" indicator
2163 p = 63; // initialize p
2164 q1 = 0x8000000000000000ull/nc; // initialize q1 = 2p/nc
2165 r1 = 0x8000000000000000ull - q1*nc; // initialize r1 = rem(2p,nc)
2166 q2 = 0x7FFFFFFFFFFFFFFFull/d; // initialize q2 = (2p-1)/d
2167 r2 = 0x7FFFFFFFFFFFFFFFull - q2*d; // initialize r2 = rem((2p-1),d)
2170 if (r1 >= nc - r1 ) {
2171 q1 = 2*q1 + 1; // update q1
2172 r1 = 2*r1 - nc; // update r1
2175 q1 = 2*q1; // update q1
2176 r1 = 2*r1; // update r1
2178 if (r2 + 1 >= d - r2) {
2179 if (q2 >= 0x7FFFFFFFFFFFFFFFull) magu.a = 1;
2180 q2 = 2*q2 + 1; // update q2
2181 r2 = 2*r2 + 1 - d; // update r2
2184 if (q2 >= 0x8000000000000000ull) magu.a = 1;
2185 q2 = 2*q2; // update q2
2186 r2 = 2*r2 + 1; // update r2
2189 } while (p < 128 && (q1 < delta || (q1 == delta && r1 == 0)));
2190 magu.m = q2 + 1; // resulting magic number
2191 magu.s = p - 64; // resulting shift
2195 /// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
2196 /// return a DAG expression to select that will generate the same value by
2197 /// multiplying by a magic number. See:
2198 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
2199 SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
2200 std::vector<SDNode*>* Created) const {
2201 MVT::ValueType VT = N->getValueType(0);
2203 // Check to see if we can do this.
2204 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
2205 return SDOperand(); // BuildSDIV only operates on i32 or i64
2206 if (!isOperationLegal(ISD::MULHS, VT))
2207 return SDOperand(); // Make sure the target supports MULHS.
2209 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended();
2210 ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d);
2212 // Multiply the numerator (operand 0) by the magic value
2213 SDOperand Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0),
2214 DAG.getConstant(magics.m, VT));
2215 // If d > 0 and m < 0, add the numerator
2216 if (d > 0 && magics.m < 0) {
2217 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0));
2219 Created->push_back(Q.Val);
2221 // If d < 0 and m > 0, subtract the numerator.
2222 if (d < 0 && magics.m > 0) {
2223 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0));
2225 Created->push_back(Q.Val);
2227 // Shift right algebraic if shift value is nonzero
2229 Q = DAG.getNode(ISD::SRA, VT, Q,
2230 DAG.getConstant(magics.s, getShiftAmountTy()));
2232 Created->push_back(Q.Val);
2234 // Extract the sign bit and add it to the quotient
2236 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(MVT::getSizeInBits(VT)-1,
2237 getShiftAmountTy()));
2239 Created->push_back(T.Val);
2240 return DAG.getNode(ISD::ADD, VT, Q, T);
2243 /// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
2244 /// return a DAG expression to select that will generate the same value by
2245 /// multiplying by a magic number. See:
2246 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
2247 SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
2248 std::vector<SDNode*>* Created) const {
2249 MVT::ValueType VT = N->getValueType(0);
2251 // Check to see if we can do this.
2252 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
2253 return SDOperand(); // BuildUDIV only operates on i32 or i64
2254 if (!isOperationLegal(ISD::MULHU, VT))
2255 return SDOperand(); // Make sure the target supports MULHU.
2257 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue();
2258 mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d);
2260 // Multiply the numerator (operand 0) by the magic value
2261 SDOperand Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0),
2262 DAG.getConstant(magics.m, VT));
2264 Created->push_back(Q.Val);
2266 if (magics.a == 0) {
2267 return DAG.getNode(ISD::SRL, VT, Q,
2268 DAG.getConstant(magics.s, getShiftAmountTy()));
2270 SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q);
2272 Created->push_back(NPQ.Val);
2273 NPQ = DAG.getNode(ISD::SRL, VT, NPQ,
2274 DAG.getConstant(1, getShiftAmountTy()));
2276 Created->push_back(NPQ.Val);
2277 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q);
2279 Created->push_back(NPQ.Val);
2280 return DAG.getNode(ISD::SRL, VT, NPQ,
2281 DAG.getConstant(magics.s-1, getShiftAmountTy()));