1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/Target/TargetData.h"
16 #include "llvm/Target/TargetMachine.h"
17 #include "llvm/Target/MRegisterInfo.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Support/MathExtras.h"
24 /// InitLibcallNames - Set default libcall names.
26 static void InitLibcallNames(const char **Names) {
27 Names[RTLIB::SHL_I32] = "__ashlsi3";
28 Names[RTLIB::SHL_I64] = "__ashldi3";
29 Names[RTLIB::SRL_I32] = "__lshrsi3";
30 Names[RTLIB::SRL_I64] = "__lshrdi3";
31 Names[RTLIB::SRA_I32] = "__ashrsi3";
32 Names[RTLIB::SRA_I64] = "__ashrdi3";
33 Names[RTLIB::MUL_I32] = "__mulsi3";
34 Names[RTLIB::MUL_I64] = "__muldi3";
35 Names[RTLIB::SDIV_I32] = "__divsi3";
36 Names[RTLIB::SDIV_I64] = "__divdi3";
37 Names[RTLIB::UDIV_I32] = "__udivsi3";
38 Names[RTLIB::UDIV_I64] = "__udivdi3";
39 Names[RTLIB::SREM_I32] = "__modsi3";
40 Names[RTLIB::SREM_I64] = "__moddi3";
41 Names[RTLIB::UREM_I32] = "__umodsi3";
42 Names[RTLIB::UREM_I64] = "__umoddi3";
43 Names[RTLIB::NEG_I32] = "__negsi2";
44 Names[RTLIB::NEG_I64] = "__negdi2";
45 Names[RTLIB::ADD_F32] = "__addsf3";
46 Names[RTLIB::ADD_F64] = "__adddf3";
47 Names[RTLIB::SUB_F32] = "__subsf3";
48 Names[RTLIB::SUB_F64] = "__subdf3";
49 Names[RTLIB::MUL_F32] = "__mulsf3";
50 Names[RTLIB::MUL_F64] = "__muldf3";
51 Names[RTLIB::DIV_F32] = "__divsf3";
52 Names[RTLIB::DIV_F64] = "__divdf3";
53 Names[RTLIB::REM_F32] = "fmodf";
54 Names[RTLIB::REM_F64] = "fmod";
55 Names[RTLIB::NEG_F32] = "__negsf2";
56 Names[RTLIB::NEG_F64] = "__negdf2";
57 Names[RTLIB::POWI_F32] = "__powisf2";
58 Names[RTLIB::POWI_F64] = "__powidf2";
59 Names[RTLIB::SQRT_F32] = "sqrtf";
60 Names[RTLIB::SQRT_F64] = "sqrt";
61 Names[RTLIB::SIN_F32] = "sinf";
62 Names[RTLIB::SIN_F64] = "sin";
63 Names[RTLIB::COS_F32] = "cosf";
64 Names[RTLIB::COS_F64] = "cos";
65 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
66 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
67 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
68 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
69 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
70 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
71 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
72 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
73 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
74 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
75 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
76 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
77 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
78 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
79 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
80 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
81 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
82 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
83 Names[RTLIB::OEQ_F32] = "__eqsf2";
84 Names[RTLIB::OEQ_F64] = "__eqdf2";
85 Names[RTLIB::UNE_F32] = "__nesf2";
86 Names[RTLIB::UNE_F64] = "__nedf2";
87 Names[RTLIB::OGE_F32] = "__gesf2";
88 Names[RTLIB::OGE_F64] = "__gedf2";
89 Names[RTLIB::OLT_F32] = "__ltsf2";
90 Names[RTLIB::OLT_F64] = "__ltdf2";
91 Names[RTLIB::OLE_F32] = "__lesf2";
92 Names[RTLIB::OLE_F64] = "__ledf2";
93 Names[RTLIB::OGT_F32] = "__gtsf2";
94 Names[RTLIB::OGT_F64] = "__gtdf2";
95 Names[RTLIB::UO_F32] = "__unordsf2";
96 Names[RTLIB::UO_F64] = "__unorddf2";
97 Names[RTLIB::O_F32] = "__unordsf2";
98 Names[RTLIB::O_F64] = "__unorddf2";
101 /// InitCmpLibcallCCs - Set default comparison libcall CC.
103 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
104 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
105 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
106 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
107 CCs[RTLIB::UNE_F32] = ISD::SETNE;
108 CCs[RTLIB::UNE_F64] = ISD::SETNE;
109 CCs[RTLIB::OGE_F32] = ISD::SETGE;
110 CCs[RTLIB::OGE_F64] = ISD::SETGE;
111 CCs[RTLIB::OLT_F32] = ISD::SETLT;
112 CCs[RTLIB::OLT_F64] = ISD::SETLT;
113 CCs[RTLIB::OLE_F32] = ISD::SETLE;
114 CCs[RTLIB::OLE_F64] = ISD::SETLE;
115 CCs[RTLIB::OGT_F32] = ISD::SETGT;
116 CCs[RTLIB::OGT_F64] = ISD::SETGT;
117 CCs[RTLIB::UO_F32] = ISD::SETNE;
118 CCs[RTLIB::UO_F64] = ISD::SETNE;
119 CCs[RTLIB::O_F32] = ISD::SETEQ;
120 CCs[RTLIB::O_F64] = ISD::SETEQ;
123 TargetLowering::TargetLowering(TargetMachine &tm)
124 : TM(tm), TD(TM.getTargetData()) {
125 assert(ISD::BUILTIN_OP_END <= 156 &&
126 "Fixed size array in TargetLowering is not large enough!");
127 // All operations default to being supported.
128 memset(OpActions, 0, sizeof(OpActions));
129 memset(LoadXActions, 0, sizeof(LoadXActions));
130 memset(&StoreXActions, 0, sizeof(StoreXActions));
131 // Initialize all indexed load / store to expand.
132 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
133 for (unsigned IM = (unsigned)ISD::PRE_INC;
134 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
135 setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
136 setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
140 IsLittleEndian = TD->isLittleEndian();
141 UsesGlobalOffsetTable = false;
142 ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType());
143 ShiftAmtHandling = Undefined;
144 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
145 memset(TargetDAGCombineArray, 0,
146 sizeof(TargetDAGCombineArray)/sizeof(TargetDAGCombineArray[0]));
147 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
148 allowUnalignedMemoryAccesses = false;
149 UseUnderscoreSetJmp = false;
150 UseUnderscoreLongJmp = false;
151 SelectIsExpensive = false;
152 IntDivIsCheap = false;
153 Pow2DivIsCheap = false;
154 StackPointerRegisterToSaveRestore = 0;
155 ExceptionPointerRegister = 0;
156 ExceptionSelectorRegister = 0;
157 SchedPreferenceInfo = SchedulingForLatency;
159 JumpBufAlignment = 0;
160 IfCvtBlockSizeLimit = 2;
162 InitLibcallNames(LibcallRoutineNames);
163 InitCmpLibcallCCs(CmpLibcallCCs);
166 TargetLowering::~TargetLowering() {}
168 /// setValueTypeAction - Set the action for a particular value type. This
169 /// assumes an action has not already been set for this value type.
170 static void SetValueTypeAction(MVT::ValueType VT,
171 TargetLowering::LegalizeAction Action,
173 MVT::ValueType *TransformToType,
174 TargetLowering::ValueTypeActionImpl &ValueTypeActions) {
175 ValueTypeActions.setTypeAction(VT, Action);
176 if (Action == TargetLowering::Promote) {
177 MVT::ValueType PromoteTo;
179 PromoteTo = MVT::f64;
181 unsigned LargerReg = VT+1;
182 while (!TLI.isTypeLegal((MVT::ValueType)LargerReg)) {
184 assert(MVT::isInteger((MVT::ValueType)LargerReg) &&
185 "Nothing to promote to??");
187 PromoteTo = (MVT::ValueType)LargerReg;
190 assert(MVT::isInteger(VT) == MVT::isInteger(PromoteTo) &&
191 MVT::isFloatingPoint(VT) == MVT::isFloatingPoint(PromoteTo) &&
192 "Can only promote from int->int or fp->fp!");
193 assert(VT < PromoteTo && "Must promote to a larger type!");
194 TransformToType[VT] = PromoteTo;
195 } else if (Action == TargetLowering::Expand) {
196 // f32 and f64 is each expanded to corresponding integer type of same size.
198 TransformToType[VT] = MVT::i32;
199 else if (VT == MVT::f64)
200 TransformToType[VT] = MVT::i64;
202 assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 &&
203 "Cannot expand this type: target must support SOME integer reg!");
204 // Expand to the next smaller integer type!
205 TransformToType[VT] = (MVT::ValueType)(VT-1);
211 /// computeRegisterProperties - Once all of the register classes are added,
212 /// this allows us to compute derived properties we expose.
213 void TargetLowering::computeRegisterProperties() {
214 assert(MVT::LAST_VALUETYPE <= 32 &&
215 "Too many value types for ValueTypeActions to hold!");
217 // Everything defaults to one.
218 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i)
219 NumElementsForVT[i] = 1;
221 // Find the largest integer register class.
222 unsigned LargestIntReg = MVT::i128;
223 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
224 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
226 // Every integer value type larger than this largest register takes twice as
227 // many registers to represent as the previous ValueType.
228 unsigned ExpandedReg = LargestIntReg; ++LargestIntReg;
229 for (++ExpandedReg; MVT::isInteger((MVT::ValueType)ExpandedReg);++ExpandedReg)
230 NumElementsForVT[ExpandedReg] = 2*NumElementsForVT[ExpandedReg-1];
232 // Inspect all of the ValueType's possible, deciding how to process them.
233 for (unsigned IntReg = MVT::i1; IntReg <= MVT::i128; ++IntReg)
234 // If we are expanding this type, expand it!
235 if (getNumElements((MVT::ValueType)IntReg) != 1)
236 SetValueTypeAction((MVT::ValueType)IntReg, Expand, *this, TransformToType,
238 else if (!isTypeLegal((MVT::ValueType)IntReg))
239 // Otherwise, if we don't have native support, we must promote to a
241 SetValueTypeAction((MVT::ValueType)IntReg, Promote, *this,
242 TransformToType, ValueTypeActions);
244 TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg;
246 // If the target does not have native F64 support, expand it to I64. We will
247 // be generating soft float library calls. If the target does not have native
248 // support for F32, promote it to F64 if it is legal. Otherwise, expand it to
250 if (isTypeLegal(MVT::f64))
251 TransformToType[MVT::f64] = MVT::f64;
253 NumElementsForVT[MVT::f64] = NumElementsForVT[MVT::i64];
254 SetValueTypeAction(MVT::f64, Expand, *this, TransformToType,
257 if (isTypeLegal(MVT::f32))
258 TransformToType[MVT::f32] = MVT::f32;
259 else if (isTypeLegal(MVT::f64))
260 SetValueTypeAction(MVT::f32, Promote, *this, TransformToType,
263 NumElementsForVT[MVT::f32] = NumElementsForVT[MVT::i32];
264 SetValueTypeAction(MVT::f32, Expand, *this, TransformToType,
268 // Set MVT::Vector to always be Expanded
269 SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType,
272 // Loop over all of the legal vector value types, specifying an identity type
274 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
275 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
276 if (isTypeLegal((MVT::ValueType)i))
277 TransformToType[i] = (MVT::ValueType)i;
281 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
285 /// getVectorTypeBreakdown - Packed types are broken down into some number of
286 /// legal first class types. For example, <8 x float> maps to 2 MVT::v4f32
287 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
289 /// This method returns the number and type of the resultant breakdown.
291 unsigned TargetLowering::getVectorTypeBreakdown(const VectorType *PTy,
292 MVT::ValueType &PTyElementVT,
293 MVT::ValueType &PTyLegalElementVT) const {
294 // Figure out the right, legal destination reg to copy into.
295 unsigned NumElts = PTy->getNumElements();
296 MVT::ValueType EltTy = getValueType(PTy->getElementType());
298 unsigned NumVectorRegs = 1;
300 // Divide the input until we get to a supported size. This will always
301 // end with a scalar if the target doesn't support vectors.
302 while (NumElts > 1 && !isTypeLegal(getVectorType(EltTy, NumElts))) {
307 MVT::ValueType VT = getVectorType(EltTy, NumElts);
308 if (!isTypeLegal(VT))
312 MVT::ValueType DestVT = getTypeToTransformTo(VT);
313 PTyLegalElementVT = DestVT;
315 // Value is expanded, e.g. i64 -> i16.
316 return NumVectorRegs*(MVT::getSizeInBits(VT)/MVT::getSizeInBits(DestVT));
318 // Otherwise, promotion or legal types use the same number of registers as
319 // the vector decimated to the appropriate level.
320 return NumVectorRegs;
326 //===----------------------------------------------------------------------===//
327 // Optimization Methods
328 //===----------------------------------------------------------------------===//
330 /// ShrinkDemandedConstant - Check to see if the specified operand of the
331 /// specified instruction is a constant integer. If so, check to see if there
332 /// are any bits set in the constant that are not demanded. If so, shrink the
333 /// constant and return true.
334 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op,
336 // FIXME: ISD::SELECT, ISD::SELECT_CC
337 switch(Op.getOpcode()) {
342 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
343 if ((~Demanded & C->getValue()) != 0) {
344 MVT::ValueType VT = Op.getValueType();
345 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
346 DAG.getConstant(Demanded & C->getValue(),
348 return CombineTo(Op, New);
355 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the
356 /// DemandedMask bits of the result of Op are ever used downstream. If we can
357 /// use this information to simplify Op, create a new simplified DAG node and
358 /// return true, returning the original and new nodes in Old and New. Otherwise,
359 /// analyze the expression and return a mask of KnownOne and KnownZero bits for
360 /// the expression (used to simplify the caller). The KnownZero/One bits may
361 /// only be accurate for those bits in the DemandedMask.
362 bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask,
365 TargetLoweringOpt &TLO,
366 unsigned Depth) const {
367 KnownZero = KnownOne = 0; // Don't know anything.
368 // Other users may use these bits.
369 if (!Op.Val->hasOneUse()) {
371 // If not at the root, Just compute the KnownZero/KnownOne bits to
372 // simplify things downstream.
373 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
376 // If this is the root being simplified, allow it to have multiple uses,
377 // just set the DemandedMask to all bits.
378 DemandedMask = MVT::getIntVTBitMask(Op.getValueType());
379 } else if (DemandedMask == 0) {
380 // Not demanding any bits from Op.
381 if (Op.getOpcode() != ISD::UNDEF)
382 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType()));
384 } else if (Depth == 6) { // Limit search depth.
388 uint64_t KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
389 switch (Op.getOpcode()) {
391 // We know all of the bits for a constant!
392 KnownOne = cast<ConstantSDNode>(Op)->getValue() & DemandedMask;
393 KnownZero = ~KnownOne & DemandedMask;
394 return false; // Don't fall through, will infinitely loop.
396 // If the RHS is a constant, check to see if the LHS would be zero without
397 // using the bits from the RHS. Below, we use knowledge about the RHS to
398 // simplify the LHS, here we're using information from the LHS to simplify
400 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
401 uint64_t LHSZero, LHSOne;
402 ComputeMaskedBits(Op.getOperand(0), DemandedMask,
403 LHSZero, LHSOne, Depth+1);
404 // If the LHS already has zeros where RHSC does, this and is dead.
405 if ((LHSZero & DemandedMask) == (~RHSC->getValue() & DemandedMask))
406 return TLO.CombineTo(Op, Op.getOperand(0));
407 // If any of the set bits in the RHS are known zero on the LHS, shrink
409 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & DemandedMask))
413 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
414 KnownOne, TLO, Depth+1))
416 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
417 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownZero,
418 KnownZero2, KnownOne2, TLO, Depth+1))
420 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
422 // If all of the demanded bits are known one on one side, return the other.
423 // These bits cannot contribute to the result of the 'and'.
424 if ((DemandedMask & ~KnownZero2 & KnownOne)==(DemandedMask & ~KnownZero2))
425 return TLO.CombineTo(Op, Op.getOperand(0));
426 if ((DemandedMask & ~KnownZero & KnownOne2)==(DemandedMask & ~KnownZero))
427 return TLO.CombineTo(Op, Op.getOperand(1));
428 // If all of the demanded bits in the inputs are known zeros, return zero.
429 if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask)
430 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType()));
431 // If the RHS is a constant, see if we can simplify it.
432 if (TLO.ShrinkDemandedConstant(Op, DemandedMask & ~KnownZero2))
435 // Output known-1 bits are only known if set in both the LHS & RHS.
436 KnownOne &= KnownOne2;
437 // Output known-0 are known to be clear if zero in either the LHS | RHS.
438 KnownZero |= KnownZero2;
441 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
442 KnownOne, TLO, Depth+1))
444 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
445 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownOne,
446 KnownZero2, KnownOne2, TLO, Depth+1))
448 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
450 // If all of the demanded bits are known zero on one side, return the other.
451 // These bits cannot contribute to the result of the 'or'.
452 if ((DemandedMask & ~KnownOne2 & KnownZero) == (DemandedMask & ~KnownOne2))
453 return TLO.CombineTo(Op, Op.getOperand(0));
454 if ((DemandedMask & ~KnownOne & KnownZero2) == (DemandedMask & ~KnownOne))
455 return TLO.CombineTo(Op, Op.getOperand(1));
456 // If all of the potentially set bits on one side are known to be set on
457 // the other side, just use the 'other' side.
458 if ((DemandedMask & (~KnownZero) & KnownOne2) ==
459 (DemandedMask & (~KnownZero)))
460 return TLO.CombineTo(Op, Op.getOperand(0));
461 if ((DemandedMask & (~KnownZero2) & KnownOne) ==
462 (DemandedMask & (~KnownZero2)))
463 return TLO.CombineTo(Op, Op.getOperand(1));
464 // If the RHS is a constant, see if we can simplify it.
465 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
468 // Output known-0 bits are only known if clear in both the LHS & RHS.
469 KnownZero &= KnownZero2;
470 // Output known-1 are known to be set if set in either the LHS | RHS.
471 KnownOne |= KnownOne2;
474 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
475 KnownOne, TLO, Depth+1))
477 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
478 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero2,
479 KnownOne2, TLO, Depth+1))
481 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
483 // If all of the demanded bits are known zero on one side, return the other.
484 // These bits cannot contribute to the result of the 'xor'.
485 if ((DemandedMask & KnownZero) == DemandedMask)
486 return TLO.CombineTo(Op, Op.getOperand(0));
487 if ((DemandedMask & KnownZero2) == DemandedMask)
488 return TLO.CombineTo(Op, Op.getOperand(1));
490 // If all of the unknown bits are known to be zero on one side or the other
491 // (but not both) turn this into an *inclusive* or.
492 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
493 if ((DemandedMask & ~KnownZero & ~KnownZero2) == 0)
494 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
498 // Output known-0 bits are known if clear or set in both the LHS & RHS.
499 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
500 // Output known-1 are known to be set if set in only one of the LHS, RHS.
501 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
503 // If all of the demanded bits on one side are known, and all of the set
504 // bits on that side are also known to be set on the other side, turn this
505 // into an AND, as we know the bits will be cleared.
506 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
507 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known
508 if ((KnownOne & KnownOne2) == KnownOne) {
509 MVT::ValueType VT = Op.getValueType();
510 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & DemandedMask, VT);
511 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0),
516 // If the RHS is a constant, see if we can simplify it.
517 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
518 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
521 KnownZero = KnownZeroOut;
522 KnownOne = KnownOneOut;
525 // If we know the result of a setcc has the top bits zero, use this info.
526 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
527 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
530 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero,
531 KnownOne, TLO, Depth+1))
533 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero2,
534 KnownOne2, TLO, Depth+1))
536 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
537 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
539 // If the operands are constants, see if we can simplify them.
540 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
543 // Only known if known in both the LHS and RHS.
544 KnownOne &= KnownOne2;
545 KnownZero &= KnownZero2;
548 if (SimplifyDemandedBits(Op.getOperand(3), DemandedMask, KnownZero,
549 KnownOne, TLO, Depth+1))
551 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero2,
552 KnownOne2, TLO, Depth+1))
554 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
555 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
557 // If the operands are constants, see if we can simplify them.
558 if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
561 // Only known if known in both the LHS and RHS.
562 KnownOne &= KnownOne2;
563 KnownZero &= KnownZero2;
566 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
567 unsigned ShAmt = SA->getValue();
568 SDOperand InOp = Op.getOperand(0);
570 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
571 // single shift. We can do this if the bottom bits (which are shifted
572 // out) are never demanded.
573 if (InOp.getOpcode() == ISD::SRL &&
574 isa<ConstantSDNode>(InOp.getOperand(1))) {
575 if (ShAmt && (DemandedMask & ((1ULL << ShAmt)-1)) == 0) {
576 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
577 unsigned Opc = ISD::SHL;
585 TLO.DAG.getConstant(ShAmt-C1, Op.getOperand(1).getValueType());
586 MVT::ValueType VT = Op.getValueType();
587 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT,
588 InOp.getOperand(0), NewSA));
592 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> ShAmt,
593 KnownZero, KnownOne, TLO, Depth+1))
595 KnownZero <<= SA->getValue();
596 KnownOne <<= SA->getValue();
597 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
601 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
602 MVT::ValueType VT = Op.getValueType();
603 unsigned ShAmt = SA->getValue();
604 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
605 unsigned VTSize = MVT::getSizeInBits(VT);
606 SDOperand InOp = Op.getOperand(0);
608 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
609 // single shift. We can do this if the top bits (which are shifted out)
610 // are never demanded.
611 if (InOp.getOpcode() == ISD::SHL &&
612 isa<ConstantSDNode>(InOp.getOperand(1))) {
613 if (ShAmt && (DemandedMask & (~0ULL << (VTSize-ShAmt))) == 0) {
614 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
615 unsigned Opc = ISD::SRL;
623 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
624 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT,
625 InOp.getOperand(0), NewSA));
629 // Compute the new bits that are at the top now.
630 if (SimplifyDemandedBits(InOp, (DemandedMask << ShAmt) & TypeMask,
631 KnownZero, KnownOne, TLO, Depth+1))
633 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
634 KnownZero &= TypeMask;
635 KnownOne &= TypeMask;
639 uint64_t HighBits = (1ULL << ShAmt)-1;
640 HighBits <<= VTSize - ShAmt;
641 KnownZero |= HighBits; // High bits known zero.
645 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
646 MVT::ValueType VT = Op.getValueType();
647 unsigned ShAmt = SA->getValue();
649 // Compute the new bits that are at the top now.
650 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
652 uint64_t InDemandedMask = (DemandedMask << ShAmt) & TypeMask;
654 // If any of the demanded bits are produced by the sign extension, we also
655 // demand the input sign bit.
656 uint64_t HighBits = (1ULL << ShAmt)-1;
657 HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
658 if (HighBits & DemandedMask)
659 InDemandedMask |= MVT::getIntVTSignBit(VT);
661 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
662 KnownZero, KnownOne, TLO, Depth+1))
664 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
665 KnownZero &= TypeMask;
666 KnownOne &= TypeMask;
670 // Handle the sign bits.
671 uint64_t SignBit = MVT::getIntVTSignBit(VT);
672 SignBit >>= ShAmt; // Adjust to where it is now in the mask.
674 // If the input sign bit is known to be zero, or if none of the top bits
675 // are demanded, turn this into an unsigned shift right.
676 if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) {
677 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0),
679 } else if (KnownOne & SignBit) { // New bits are known one.
680 KnownOne |= HighBits;
684 case ISD::SIGN_EXTEND_INREG: {
685 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
687 // Sign extension. Compute the demanded bits in the result that are not
688 // present in the input.
689 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & DemandedMask;
691 // If none of the extended bits are demanded, eliminate the sextinreg.
693 return TLO.CombineTo(Op, Op.getOperand(0));
695 uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
696 int64_t InputDemandedBits = DemandedMask & MVT::getIntVTBitMask(EVT);
698 // Since the sign extended bits are demanded, we know that the sign
700 InputDemandedBits |= InSignBit;
702 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
703 KnownZero, KnownOne, TLO, Depth+1))
705 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
707 // If the sign bit of the input is known set or clear, then we know the
708 // top bits of the result.
710 // If the input sign bit is known zero, convert this into a zero extension.
711 if (KnownZero & InSignBit)
712 return TLO.CombineTo(Op,
713 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT));
715 if (KnownOne & InSignBit) { // Input sign bit known set
717 KnownZero &= ~NewBits;
718 } else { // Input sign bit unknown
719 KnownZero &= ~NewBits;
720 KnownOne &= ~NewBits;
727 MVT::ValueType VT = Op.getValueType();
728 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
729 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
734 if (ISD::isZEXTLoad(Op.Val)) {
735 LoadSDNode *LD = cast<LoadSDNode>(Op);
736 MVT::ValueType VT = LD->getLoadedVT();
737 KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask;
741 case ISD::ZERO_EXTEND: {
742 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
744 // If none of the top bits are demanded, convert this into an any_extend.
745 uint64_t NewBits = (~InMask) & DemandedMask;
747 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND,
751 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
752 KnownZero, KnownOne, TLO, Depth+1))
754 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
755 KnownZero |= NewBits;
758 case ISD::SIGN_EXTEND: {
759 MVT::ValueType InVT = Op.getOperand(0).getValueType();
760 uint64_t InMask = MVT::getIntVTBitMask(InVT);
761 uint64_t InSignBit = MVT::getIntVTSignBit(InVT);
762 uint64_t NewBits = (~InMask) & DemandedMask;
764 // If none of the top bits are demanded, convert this into an any_extend.
766 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(),
769 // Since some of the sign extended bits are demanded, we know that the sign
771 uint64_t InDemandedBits = DemandedMask & InMask;
772 InDemandedBits |= InSignBit;
774 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
775 KnownOne, TLO, Depth+1))
778 // If the sign bit is known zero, convert this to a zero extend.
779 if (KnownZero & InSignBit)
780 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND,
784 // If the sign bit is known one, the top bits match.
785 if (KnownOne & InSignBit) {
787 KnownZero &= ~NewBits;
788 } else { // Otherwise, top bits aren't known.
789 KnownOne &= ~NewBits;
790 KnownZero &= ~NewBits;
794 case ISD::ANY_EXTEND: {
795 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
796 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
797 KnownZero, KnownOne, TLO, Depth+1))
799 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
802 case ISD::TRUNCATE: {
803 // Simplify the input, using demanded bit information, and compute the known
804 // zero/one bits live out.
805 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
806 KnownZero, KnownOne, TLO, Depth+1))
809 // If the input is only used by this truncate, see if we can shrink it based
810 // on the known demanded bits.
811 if (Op.getOperand(0).Val->hasOneUse()) {
812 SDOperand In = Op.getOperand(0);
813 switch (In.getOpcode()) {
816 // Shrink SRL by a constant if none of the high bits shifted in are
818 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){
819 uint64_t HighBits = MVT::getIntVTBitMask(In.getValueType());
820 HighBits &= ~MVT::getIntVTBitMask(Op.getValueType());
821 HighBits >>= ShAmt->getValue();
823 if (ShAmt->getValue() < MVT::getSizeInBits(Op.getValueType()) &&
824 (DemandedMask & HighBits) == 0) {
825 // None of the shifted in bits are needed. Add a truncate of the
826 // shift input, then shift it.
827 SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE,
830 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(),
831 NewTrunc, In.getOperand(1)));
838 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
839 uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
840 KnownZero &= OutMask;
844 case ISD::AssertZext: {
845 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
846 uint64_t InMask = MVT::getIntVTBitMask(VT);
847 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
848 KnownZero, KnownOne, TLO, Depth+1))
850 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
851 KnownZero |= ~InMask & DemandedMask;
856 case ISD::INTRINSIC_WO_CHAIN:
857 case ISD::INTRINSIC_W_CHAIN:
858 case ISD::INTRINSIC_VOID:
859 // Just use ComputeMaskedBits to compute output bits.
860 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
864 // If we know the value of all of the demanded bits, return this as a
866 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
867 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType()));
872 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
873 /// this predicate to simplify operations downstream. Mask is known to be zero
874 /// for bits that V cannot have.
875 bool TargetLowering::MaskedValueIsZero(SDOperand Op, uint64_t Mask,
876 unsigned Depth) const {
877 uint64_t KnownZero, KnownOne;
878 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
879 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
880 return (KnownZero & Mask) == Mask;
883 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
884 /// known to be either zero or one and return them in the KnownZero/KnownOne
885 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
887 void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
888 uint64_t &KnownZero, uint64_t &KnownOne,
889 unsigned Depth) const {
890 KnownZero = KnownOne = 0; // Don't know anything.
891 if (Depth == 6 || Mask == 0)
892 return; // Limit search depth.
894 uint64_t KnownZero2, KnownOne2;
896 switch (Op.getOpcode()) {
898 // We know all of the bits for a constant!
899 KnownOne = cast<ConstantSDNode>(Op)->getValue() & Mask;
900 KnownZero = ~KnownOne & Mask;
903 // If either the LHS or the RHS are Zero, the result is zero.
904 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
906 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
907 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
908 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
910 // Output known-1 bits are only known if set in both the LHS & RHS.
911 KnownOne &= KnownOne2;
912 // Output known-0 are known to be clear if zero in either the LHS | RHS.
913 KnownZero |= KnownZero2;
916 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
918 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
919 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
920 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
922 // Output known-0 bits are only known if clear in both the LHS & RHS.
923 KnownZero &= KnownZero2;
924 // Output known-1 are known to be set if set in either the LHS | RHS.
925 KnownOne |= KnownOne2;
928 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
929 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
930 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
931 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
933 // Output known-0 bits are known if clear or set in both the LHS & RHS.
934 uint64_t KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
935 // Output known-1 are known to be set if set in only one of the LHS, RHS.
936 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
937 KnownZero = KnownZeroOut;
941 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
942 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
943 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
944 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
946 // Only known if known in both the LHS and RHS.
947 KnownOne &= KnownOne2;
948 KnownZero &= KnownZero2;
951 ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1);
952 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1);
953 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
954 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
956 // Only known if known in both the LHS and RHS.
957 KnownOne &= KnownOne2;
958 KnownZero &= KnownZero2;
961 // If we know the result of a setcc has the top bits zero, use this info.
962 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
963 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
966 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
967 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
968 ComputeMaskedBits(Op.getOperand(0), Mask >> SA->getValue(),
969 KnownZero, KnownOne, Depth+1);
970 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
971 KnownZero <<= SA->getValue();
972 KnownOne <<= SA->getValue();
973 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
977 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
978 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
979 MVT::ValueType VT = Op.getValueType();
980 unsigned ShAmt = SA->getValue();
982 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
983 ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt) & TypeMask,
984 KnownZero, KnownOne, Depth+1);
985 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
986 KnownZero &= TypeMask;
987 KnownOne &= TypeMask;
991 uint64_t HighBits = (1ULL << ShAmt)-1;
992 HighBits <<= MVT::getSizeInBits(VT)-ShAmt;
993 KnownZero |= HighBits; // High bits known zero.
997 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
998 MVT::ValueType VT = Op.getValueType();
999 unsigned ShAmt = SA->getValue();
1001 // Compute the new bits that are at the top now.
1002 uint64_t TypeMask = MVT::getIntVTBitMask(VT);
1004 uint64_t InDemandedMask = (Mask << ShAmt) & TypeMask;
1005 // If any of the demanded bits are produced by the sign extension, we also
1006 // demand the input sign bit.
1007 uint64_t HighBits = (1ULL << ShAmt)-1;
1008 HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
1009 if (HighBits & Mask)
1010 InDemandedMask |= MVT::getIntVTSignBit(VT);
1012 ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
1014 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1015 KnownZero &= TypeMask;
1016 KnownOne &= TypeMask;
1017 KnownZero >>= ShAmt;
1020 // Handle the sign bits.
1021 uint64_t SignBit = MVT::getIntVTSignBit(VT);
1022 SignBit >>= ShAmt; // Adjust to where it is now in the mask.
1024 if (KnownZero & SignBit) {
1025 KnownZero |= HighBits; // New bits are known zero.
1026 } else if (KnownOne & SignBit) {
1027 KnownOne |= HighBits; // New bits are known one.
1031 case ISD::SIGN_EXTEND_INREG: {
1032 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1034 // Sign extension. Compute the demanded bits in the result that are not
1035 // present in the input.
1036 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & Mask;
1038 uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
1039 int64_t InputDemandedBits = Mask & MVT::getIntVTBitMask(EVT);
1041 // If the sign extended bits are demanded, we know that the sign
1044 InputDemandedBits |= InSignBit;
1046 ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
1047 KnownZero, KnownOne, Depth+1);
1048 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1050 // If the sign bit of the input is known set or clear, then we know the
1051 // top bits of the result.
1052 if (KnownZero & InSignBit) { // Input sign bit known clear
1053 KnownZero |= NewBits;
1054 KnownOne &= ~NewBits;
1055 } else if (KnownOne & InSignBit) { // Input sign bit known set
1056 KnownOne |= NewBits;
1057 KnownZero &= ~NewBits;
1058 } else { // Input sign bit unknown
1059 KnownZero &= ~NewBits;
1060 KnownOne &= ~NewBits;
1067 MVT::ValueType VT = Op.getValueType();
1068 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
1069 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
1074 if (ISD::isZEXTLoad(Op.Val)) {
1075 LoadSDNode *LD = cast<LoadSDNode>(Op);
1076 MVT::ValueType VT = LD->getLoadedVT();
1077 KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask;
1081 case ISD::ZERO_EXTEND: {
1082 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
1083 uint64_t NewBits = (~InMask) & Mask;
1084 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1086 KnownZero |= NewBits & Mask;
1087 KnownOne &= ~NewBits;
1090 case ISD::SIGN_EXTEND: {
1091 MVT::ValueType InVT = Op.getOperand(0).getValueType();
1092 unsigned InBits = MVT::getSizeInBits(InVT);
1093 uint64_t InMask = MVT::getIntVTBitMask(InVT);
1094 uint64_t InSignBit = 1ULL << (InBits-1);
1095 uint64_t NewBits = (~InMask) & Mask;
1096 uint64_t InDemandedBits = Mask & InMask;
1098 // If any of the sign extended bits are demanded, we know that the sign
1101 InDemandedBits |= InSignBit;
1103 ComputeMaskedBits(Op.getOperand(0), InDemandedBits, KnownZero,
1105 // If the sign bit is known zero or one, the top bits match.
1106 if (KnownZero & InSignBit) {
1107 KnownZero |= NewBits;
1108 KnownOne &= ~NewBits;
1109 } else if (KnownOne & InSignBit) {
1110 KnownOne |= NewBits;
1111 KnownZero &= ~NewBits;
1112 } else { // Otherwise, top bits aren't known.
1113 KnownOne &= ~NewBits;
1114 KnownZero &= ~NewBits;
1118 case ISD::ANY_EXTEND: {
1119 MVT::ValueType VT = Op.getOperand(0).getValueType();
1120 ComputeMaskedBits(Op.getOperand(0), Mask & MVT::getIntVTBitMask(VT),
1121 KnownZero, KnownOne, Depth+1);
1124 case ISD::TRUNCATE: {
1125 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
1126 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1127 uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
1128 KnownZero &= OutMask;
1129 KnownOne &= OutMask;
1132 case ISD::AssertZext: {
1133 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1134 uint64_t InMask = MVT::getIntVTBitMask(VT);
1135 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1137 KnownZero |= (~InMask) & Mask;
1141 // If either the LHS or the RHS are Zero, the result is zero.
1142 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1143 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
1144 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1145 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1147 // Output known-0 bits are known if clear or set in both the low clear bits
1148 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
1149 // low 3 bits clear.
1150 uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero),
1151 CountTrailingZeros_64(~KnownZero2));
1153 KnownZero = (1ULL << KnownZeroOut) - 1;
1158 ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0));
1161 // We know that the top bits of C-X are clear if X contains less bits
1162 // than C (i.e. no wrap-around can happen). For example, 20-X is
1163 // positive if we can prove that X is >= 0 and < 16.
1164 MVT::ValueType VT = CLHS->getValueType(0);
1165 if ((CLHS->getValue() & MVT::getIntVTSignBit(VT)) == 0) { // sign bit clear
1166 unsigned NLZ = CountLeadingZeros_64(CLHS->getValue()+1);
1167 uint64_t MaskV = (1ULL << (63-NLZ))-1; // NLZ can't be 64 with no sign bit
1168 MaskV = ~MaskV & MVT::getIntVTBitMask(VT);
1169 ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero, KnownOne, Depth+1);
1171 // If all of the MaskV bits are known to be zero, then we know the output
1172 // top bits are zero, because we now know that the output is from [0-C].
1173 if ((KnownZero & MaskV) == MaskV) {
1174 unsigned NLZ2 = CountLeadingZeros_64(CLHS->getValue());
1175 KnownZero = ~((1ULL << (64-NLZ2))-1) & Mask; // Top bits known zero.
1176 KnownOne = 0; // No one bits known.
1178 KnownZero = KnownOne = 0; // Otherwise, nothing known.
1184 // Allow the target to implement this method for its nodes.
1185 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
1186 case ISD::INTRINSIC_WO_CHAIN:
1187 case ISD::INTRINSIC_W_CHAIN:
1188 case ISD::INTRINSIC_VOID:
1189 computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne);
1195 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
1196 /// in Mask are known to be either zero or one and return them in the
1197 /// KnownZero/KnownOne bitsets.
1198 void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
1200 uint64_t &KnownZero,
1202 unsigned Depth) const {
1203 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1204 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1205 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1206 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1207 "Should use MaskedValueIsZero if you don't know whether Op"
1208 " is a target node!");
1213 /// ComputeNumSignBits - Return the number of times the sign bit of the
1214 /// register is replicated into the other bits. We know that at least 1 bit
1215 /// is always equal to the sign bit (itself), but other cases can give us
1216 /// information. For example, immediately after an "SRA X, 2", we know that
1217 /// the top 3 bits are all equal to each other, so we return 3.
1218 unsigned TargetLowering::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{
1219 MVT::ValueType VT = Op.getValueType();
1220 assert(MVT::isInteger(VT) && "Invalid VT!");
1221 unsigned VTBits = MVT::getSizeInBits(VT);
1225 return 1; // Limit search depth.
1227 switch (Op.getOpcode()) {
1229 case ISD::AssertSext:
1230 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1231 return VTBits-Tmp+1;
1232 case ISD::AssertZext:
1233 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1236 case ISD::Constant: {
1237 uint64_t Val = cast<ConstantSDNode>(Op)->getValue();
1238 // If negative, invert the bits, then look at it.
1239 if (Val & MVT::getIntVTSignBit(VT))
1242 // Shift the bits so they are the leading bits in the int64_t.
1245 // Return # leading zeros. We use 'min' here in case Val was zero before
1246 // shifting. We don't want to return '64' as for an i32 "0".
1247 return std::min(VTBits, CountLeadingZeros_64(Val));
1250 case ISD::SIGN_EXTEND:
1251 Tmp = VTBits-MVT::getSizeInBits(Op.getOperand(0).getValueType());
1252 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
1254 case ISD::SIGN_EXTEND_INREG:
1255 // Max of the input and what this extends.
1256 Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
1259 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1260 return std::max(Tmp, Tmp2);
1263 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1264 // SRA X, C -> adds C sign bits.
1265 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1266 Tmp += C->getValue();
1267 if (Tmp > VTBits) Tmp = VTBits;
1271 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1272 // shl destroys sign bits.
1273 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1274 if (C->getValue() >= VTBits || // Bad shift.
1275 C->getValue() >= Tmp) break; // Shifted all sign bits out.
1276 return Tmp - C->getValue();
1281 case ISD::XOR: // NOT is handled here.
1282 // Logical binary ops preserve the number of sign bits.
1283 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1284 if (Tmp == 1) return 1; // Early out.
1285 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1286 return std::min(Tmp, Tmp2);
1289 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1290 if (Tmp == 1) return 1; // Early out.
1291 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1292 return std::min(Tmp, Tmp2);
1295 // If setcc returns 0/-1, all bits are sign bits.
1296 if (getSetCCResultContents() == ZeroOrNegativeOneSetCCResult)
1301 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1302 unsigned RotAmt = C->getValue() & (VTBits-1);
1304 // Handle rotate right by N like a rotate left by 32-N.
1305 if (Op.getOpcode() == ISD::ROTR)
1306 RotAmt = (VTBits-RotAmt) & (VTBits-1);
1308 // If we aren't rotating out all of the known-in sign bits, return the
1309 // number that are left. This handles rotl(sext(x), 1) for example.
1310 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1311 if (Tmp > RotAmt+1) return Tmp-RotAmt;
1315 // Add can have at most one carry bit. Thus we know that the output
1316 // is, at worst, one more bit than the inputs.
1317 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1318 if (Tmp == 1) return 1; // Early out.
1320 // Special case decrementing a value (ADD X, -1):
1321 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
1322 if (CRHS->isAllOnesValue()) {
1323 uint64_t KnownZero, KnownOne;
1324 uint64_t Mask = MVT::getIntVTBitMask(VT);
1325 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
1327 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1329 if ((KnownZero|1) == Mask)
1332 // If we are subtracting one from a positive number, there is no carry
1333 // out of the result.
1334 if (KnownZero & MVT::getIntVTSignBit(VT))
1338 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1339 if (Tmp2 == 1) return 1;
1340 return std::min(Tmp, Tmp2)-1;
1344 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
1345 if (Tmp2 == 1) return 1;
1348 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
1349 if (CLHS->getValue() == 0) {
1350 uint64_t KnownZero, KnownOne;
1351 uint64_t Mask = MVT::getIntVTBitMask(VT);
1352 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1353 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1355 if ((KnownZero|1) == Mask)
1358 // If the input is known to be positive (the sign bit is known clear),
1359 // the output of the NEG has the same number of sign bits as the input.
1360 if (KnownZero & MVT::getIntVTSignBit(VT))
1363 // Otherwise, we treat this like a SUB.
1366 // Sub can have at most one carry bit. Thus we know that the output
1367 // is, at worst, one more bit than the inputs.
1368 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
1369 if (Tmp == 1) return 1; // Early out.
1370 return std::min(Tmp, Tmp2)-1;
1373 // FIXME: it's tricky to do anything useful for this, but it is an important
1374 // case for targets like X86.
1378 // Handle LOADX separately here. EXTLOAD case will fallthrough.
1379 if (Op.getOpcode() == ISD::LOAD) {
1380 LoadSDNode *LD = cast<LoadSDNode>(Op);
1381 unsigned ExtType = LD->getExtensionType();
1384 case ISD::SEXTLOAD: // '17' bits known
1385 Tmp = MVT::getSizeInBits(LD->getLoadedVT());
1386 return VTBits-Tmp+1;
1387 case ISD::ZEXTLOAD: // '16' bits known
1388 Tmp = MVT::getSizeInBits(LD->getLoadedVT());
1393 // Allow the target to implement this method for its nodes.
1394 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1395 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1396 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1397 Op.getOpcode() == ISD::INTRINSIC_VOID) {
1398 unsigned NumBits = ComputeNumSignBitsForTargetNode(Op, Depth);
1399 if (NumBits > 1) return NumBits;
1402 // Finally, if we can prove that the top bits of the result are 0's or 1's,
1403 // use this information.
1404 uint64_t KnownZero, KnownOne;
1405 uint64_t Mask = MVT::getIntVTBitMask(VT);
1406 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
1408 uint64_t SignBit = MVT::getIntVTSignBit(VT);
1409 if (KnownZero & SignBit) { // SignBit is 0
1411 } else if (KnownOne & SignBit) { // SignBit is 1;
1418 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
1419 // the number of identical bits in the top of the input value.
1422 // Return # leading zeros. We use 'min' here in case Val was zero before
1423 // shifting. We don't want to return '64' as for an i32 "0".
1424 return std::min(VTBits, CountLeadingZeros_64(Mask));
1429 /// ComputeNumSignBitsForTargetNode - This method can be implemented by
1430 /// targets that want to expose additional information about sign bits to the
1432 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op,
1433 unsigned Depth) const {
1434 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1435 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1436 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1437 Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1438 "Should use ComputeNumSignBits if you don't know whether Op"
1439 " is a target node!");
1444 /// SimplifySetCC - Try to simplify a setcc built with the specified operands
1445 /// and cc. If it is unable to simplify it, return a null SDOperand.
1447 TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
1448 ISD::CondCode Cond, bool foldBooleans,
1449 DAGCombinerInfo &DCI) const {
1450 SelectionDAG &DAG = DCI.DAG;
1452 // These setcc operations always fold.
1456 case ISD::SETFALSE2: return DAG.getConstant(0, VT);
1458 case ISD::SETTRUE2: return DAG.getConstant(1, VT);
1461 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) {
1462 uint64_t C1 = N1C->getValue();
1463 if (isa<ConstantSDNode>(N0.Val)) {
1464 return DAG.FoldSetCC(VT, N0, N1, Cond);
1466 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1467 // equality comparison, then we're just comparing whether X itself is
1469 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1470 N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1471 N0.getOperand(1).getOpcode() == ISD::Constant) {
1472 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
1473 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1474 ShAmt == Log2_32(MVT::getSizeInBits(N0.getValueType()))) {
1475 if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1476 // (srl (ctlz x), 5) == 0 -> X != 0
1477 // (srl (ctlz x), 5) != 1 -> X != 0
1480 // (srl (ctlz x), 5) != 0 -> X == 0
1481 // (srl (ctlz x), 5) == 1 -> X == 0
1484 SDOperand Zero = DAG.getConstant(0, N0.getValueType());
1485 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0),
1490 // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1491 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1492 unsigned InSize = MVT::getSizeInBits(N0.getOperand(0).getValueType());
1494 // If the comparison constant has bits in the upper part, the
1495 // zero-extended value could never match.
1496 if (C1 & (~0ULL << InSize)) {
1497 unsigned VSize = MVT::getSizeInBits(N0.getValueType());
1501 case ISD::SETEQ: return DAG.getConstant(0, VT);
1504 case ISD::SETNE: return DAG.getConstant(1, VT);
1507 // True if the sign bit of C1 is set.
1508 return DAG.getConstant((C1 & (1ULL << (VSize-1))) != 0, VT);
1511 // True if the sign bit of C1 isn't set.
1512 return DAG.getConstant((C1 & (1ULL << (VSize-1))) == 0, VT);
1518 // Otherwise, we can perform the comparison with the low bits.
1526 return DAG.getSetCC(VT, N0.getOperand(0),
1527 DAG.getConstant(C1, N0.getOperand(0).getValueType()),
1530 break; // todo, be more careful with signed comparisons
1532 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1533 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1534 MVT::ValueType ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1535 unsigned ExtSrcTyBits = MVT::getSizeInBits(ExtSrcTy);
1536 MVT::ValueType ExtDstTy = N0.getValueType();
1537 unsigned ExtDstTyBits = MVT::getSizeInBits(ExtDstTy);
1539 // If the extended part has any inconsistent bits, it cannot ever
1540 // compare equal. In other words, they have to be all ones or all
1543 (~0ULL >> (64-ExtSrcTyBits)) & (~0ULL << (ExtDstTyBits-1));
1544 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
1545 return DAG.getConstant(Cond == ISD::SETNE, VT);
1548 MVT::ValueType Op0Ty = N0.getOperand(0).getValueType();
1549 if (Op0Ty == ExtSrcTy) {
1550 ZextOp = N0.getOperand(0);
1552 int64_t Imm = ~0ULL >> (64-ExtSrcTyBits);
1553 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0),
1554 DAG.getConstant(Imm, Op0Ty));
1556 if (!DCI.isCalledByLegalizer())
1557 DCI.AddToWorklist(ZextOp.Val);
1558 // Otherwise, make this a use of a zext.
1559 return DAG.getSetCC(VT, ZextOp,
1560 DAG.getConstant(C1 & (~0ULL>>(64-ExtSrcTyBits)),
1563 } else if ((N1C->getValue() == 0 || N1C->getValue() == 1) &&
1564 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1566 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
1567 if (N0.getOpcode() == ISD::SETCC) {
1568 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getValue() != 1);
1572 // Invert the condition.
1573 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1574 CC = ISD::getSetCCInverse(CC,
1575 MVT::isInteger(N0.getOperand(0).getValueType()));
1576 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC);
1579 if ((N0.getOpcode() == ISD::XOR ||
1580 (N0.getOpcode() == ISD::AND &&
1581 N0.getOperand(0).getOpcode() == ISD::XOR &&
1582 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1583 isa<ConstantSDNode>(N0.getOperand(1)) &&
1584 cast<ConstantSDNode>(N0.getOperand(1))->getValue() == 1) {
1585 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
1586 // can only do this if the top bits are known zero.
1587 if (MaskedValueIsZero(N0, MVT::getIntVTBitMask(N0.getValueType())-1)){
1588 // Okay, get the un-inverted input value.
1590 if (N0.getOpcode() == ISD::XOR)
1591 Val = N0.getOperand(0);
1593 assert(N0.getOpcode() == ISD::AND &&
1594 N0.getOperand(0).getOpcode() == ISD::XOR);
1595 // ((X^1)&1)^1 -> X & 1
1596 Val = DAG.getNode(ISD::AND, N0.getValueType(),
1597 N0.getOperand(0).getOperand(0),
1600 return DAG.getSetCC(VT, Val, N1,
1601 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1606 uint64_t MinVal, MaxVal;
1607 unsigned OperandBitSize = MVT::getSizeInBits(N1C->getValueType(0));
1608 if (ISD::isSignedIntSetCC(Cond)) {
1609 MinVal = 1ULL << (OperandBitSize-1);
1610 if (OperandBitSize != 1) // Avoid X >> 64, which is undefined.
1611 MaxVal = ~0ULL >> (65-OperandBitSize);
1616 MaxVal = ~0ULL >> (64-OperandBitSize);
1619 // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1620 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1621 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
1622 --C1; // X >= C0 --> X > (C0-1)
1623 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()),
1624 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
1627 if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1628 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
1629 ++C1; // X <= C0 --> X < (C0+1)
1630 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()),
1631 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
1634 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1635 return DAG.getConstant(0, VT); // X < MIN --> false
1636 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1637 return DAG.getConstant(1, VT); // X >= MIN --> true
1638 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1639 return DAG.getConstant(0, VT); // X > MAX --> false
1640 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1641 return DAG.getConstant(1, VT); // X <= MAX --> true
1643 // Canonicalize setgt X, Min --> setne X, Min
1644 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1645 return DAG.getSetCC(VT, N0, N1, ISD::SETNE);
1646 // Canonicalize setlt X, Max --> setne X, Max
1647 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1648 return DAG.getSetCC(VT, N0, N1, ISD::SETNE);
1650 // If we have setult X, 1, turn it into seteq X, 0
1651 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1652 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()),
1654 // If we have setugt X, Max-1, turn it into seteq X, Max
1655 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1656 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()),
1659 // If we have "setcc X, C0", check to see if we can shrink the immediate
1662 // SETUGT X, SINTMAX -> SETLT X, 0
1663 if (Cond == ISD::SETUGT && OperandBitSize != 1 &&
1664 C1 == (~0ULL >> (65-OperandBitSize)))
1665 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()),
1668 // FIXME: Implement the rest of these.
1670 // Fold bit comparisons when we can.
1671 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1672 VT == N0.getValueType() && N0.getOpcode() == ISD::AND)
1673 if (ConstantSDNode *AndRHS =
1674 dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1675 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
1676 // Perform the xform if the AND RHS is a single bit.
1677 if (isPowerOf2_64(AndRHS->getValue())) {
1678 return DAG.getNode(ISD::SRL, VT, N0,
1679 DAG.getConstant(Log2_64(AndRHS->getValue()),
1680 getShiftAmountTy()));
1682 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getValue()) {
1683 // (X & 8) == 8 --> (X & 8) >> 3
1684 // Perform the xform if C1 is a single bit.
1685 if (isPowerOf2_64(C1)) {
1686 return DAG.getNode(ISD::SRL, VT, N0,
1687 DAG.getConstant(Log2_64(C1), getShiftAmountTy()));
1692 } else if (isa<ConstantSDNode>(N0.Val)) {
1693 // Ensure that the constant occurs on the RHS.
1694 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
1697 if (isa<ConstantFPSDNode>(N0.Val)) {
1698 // Constant fold or commute setcc.
1699 SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond);
1700 if (O.Val) return O;
1704 // We can always fold X == X for integer setcc's.
1705 if (MVT::isInteger(N0.getValueType()))
1706 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
1707 unsigned UOF = ISD::getUnorderedFlavor(Cond);
1708 if (UOF == 2) // FP operators that are undefined on NaNs.
1709 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
1710 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
1711 return DAG.getConstant(UOF, VT);
1712 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
1713 // if it is not already.
1714 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
1715 if (NewCond != Cond)
1716 return DAG.getSetCC(VT, N0, N1, NewCond);
1719 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1720 MVT::isInteger(N0.getValueType())) {
1721 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
1722 N0.getOpcode() == ISD::XOR) {
1723 // Simplify (X+Y) == (X+Z) --> Y == Z
1724 if (N0.getOpcode() == N1.getOpcode()) {
1725 if (N0.getOperand(0) == N1.getOperand(0))
1726 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond);
1727 if (N0.getOperand(1) == N1.getOperand(1))
1728 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond);
1729 if (DAG.isCommutativeBinOp(N0.getOpcode())) {
1730 // If X op Y == Y op X, try other combinations.
1731 if (N0.getOperand(0) == N1.getOperand(1))
1732 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond);
1733 if (N0.getOperand(1) == N1.getOperand(0))
1734 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond);
1738 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
1739 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1740 // Turn (X+C1) == C2 --> X == C2-C1
1741 if (N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse()) {
1742 return DAG.getSetCC(VT, N0.getOperand(0),
1743 DAG.getConstant(RHSC->getValue()-LHSR->getValue(),
1744 N0.getValueType()), Cond);
1747 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
1748 if (N0.getOpcode() == ISD::XOR)
1749 // If we know that all of the inverted bits are zero, don't bother
1750 // performing the inversion.
1751 if (MaskedValueIsZero(N0.getOperand(0), ~LHSR->getValue()))
1752 return DAG.getSetCC(VT, N0.getOperand(0),
1753 DAG.getConstant(LHSR->getValue()^RHSC->getValue(),
1754 N0.getValueType()), Cond);
1757 // Turn (C1-X) == C2 --> X == C1-C2
1758 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
1759 if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) {
1760 return DAG.getSetCC(VT, N0.getOperand(1),
1761 DAG.getConstant(SUBC->getValue()-RHSC->getValue(),
1762 N0.getValueType()), Cond);
1767 // Simplify (X+Z) == X --> Z == 0
1768 if (N0.getOperand(0) == N1)
1769 return DAG.getSetCC(VT, N0.getOperand(1),
1770 DAG.getConstant(0, N0.getValueType()), Cond);
1771 if (N0.getOperand(1) == N1) {
1772 if (DAG.isCommutativeBinOp(N0.getOpcode()))
1773 return DAG.getSetCC(VT, N0.getOperand(0),
1774 DAG.getConstant(0, N0.getValueType()), Cond);
1776 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
1777 // (Z-X) == X --> Z == X<<1
1778 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(),
1780 DAG.getConstant(1, getShiftAmountTy()));
1781 if (!DCI.isCalledByLegalizer())
1782 DCI.AddToWorklist(SH.Val);
1783 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond);
1788 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
1789 N1.getOpcode() == ISD::XOR) {
1790 // Simplify X == (X+Z) --> Z == 0
1791 if (N1.getOperand(0) == N0) {
1792 return DAG.getSetCC(VT, N1.getOperand(1),
1793 DAG.getConstant(0, N1.getValueType()), Cond);
1794 } else if (N1.getOperand(1) == N0) {
1795 if (DAG.isCommutativeBinOp(N1.getOpcode())) {
1796 return DAG.getSetCC(VT, N1.getOperand(0),
1797 DAG.getConstant(0, N1.getValueType()), Cond);
1799 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
1800 // X == (Z-X) --> X<<1 == Z
1801 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0,
1802 DAG.getConstant(1, getShiftAmountTy()));
1803 if (!DCI.isCalledByLegalizer())
1804 DCI.AddToWorklist(SH.Val);
1805 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond);
1811 // Fold away ALL boolean setcc's.
1813 if (N0.getValueType() == MVT::i1 && foldBooleans) {
1815 default: assert(0 && "Unknown integer setcc!");
1816 case ISD::SETEQ: // X == Y -> (X^Y)^1
1817 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
1818 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1));
1819 if (!DCI.isCalledByLegalizer())
1820 DCI.AddToWorklist(Temp.Val);
1822 case ISD::SETNE: // X != Y --> (X^Y)
1823 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
1825 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y
1826 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y
1827 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
1828 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp);
1829 if (!DCI.isCalledByLegalizer())
1830 DCI.AddToWorklist(Temp.Val);
1832 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X
1833 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X
1834 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1));
1835 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp);
1836 if (!DCI.isCalledByLegalizer())
1837 DCI.AddToWorklist(Temp.Val);
1839 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y
1840 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y
1841 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
1842 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp);
1843 if (!DCI.isCalledByLegalizer())
1844 DCI.AddToWorklist(Temp.Val);
1846 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X
1847 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X
1848 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1));
1849 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp);
1852 if (VT != MVT::i1) {
1853 if (!DCI.isCalledByLegalizer())
1854 DCI.AddToWorklist(N0.Val);
1855 // FIXME: If running after legalize, we probably can't do this.
1856 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0);
1861 // Could not fold it.
1865 SDOperand TargetLowering::
1866 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
1867 // Default implementation: no optimization.
1871 //===----------------------------------------------------------------------===//
1872 // Inline Assembler Implementation Methods
1873 //===----------------------------------------------------------------------===//
1875 TargetLowering::ConstraintType
1876 TargetLowering::getConstraintType(const std::string &Constraint) const {
1877 // FIXME: lots more standard ones to handle.
1878 if (Constraint.size() == 1) {
1879 switch (Constraint[0]) {
1881 case 'r': return C_RegisterClass;
1883 case 'o': // offsetable
1884 case 'V': // not offsetable
1886 case 'i': // Simple Integer or Relocatable Constant
1887 case 'n': // Simple Integer
1888 case 's': // Relocatable Constant
1889 case 'X': // Allow ANY value.
1890 case 'I': // Target registers.
1902 if (Constraint.size() > 1 && Constraint[0] == '{' &&
1903 Constraint[Constraint.size()-1] == '}')
1908 /// isOperandValidForConstraint - Return the specified operand (possibly
1909 /// modified) if the specified SDOperand is valid for the specified target
1910 /// constraint letter, otherwise return null.
1911 SDOperand TargetLowering::isOperandValidForConstraint(SDOperand Op,
1912 char ConstraintLetter,
1913 SelectionDAG &DAG) {
1914 switch (ConstraintLetter) {
1916 case 'i': // Simple Integer or Relocatable Constant
1917 case 'n': // Simple Integer
1918 case 's': // Relocatable Constant
1919 case 'X': { // Allows any operand.
1920 // These operands are interested in values of the form (GV+C), where C may
1921 // be folded in as an offset of GV, or it may be explicitly added. Also, it
1922 // is possible and fine if either GV or C are missing.
1923 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1924 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
1926 // If we have "(add GV, C)", pull out GV/C
1927 if (Op.getOpcode() == ISD::ADD) {
1928 C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
1929 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
1930 if (C == 0 || GA == 0) {
1931 C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
1932 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
1934 if (C == 0 || GA == 0)
1938 // If we find a valid operand, map to the TargetXXX version so that the
1939 // value itself doesn't get selected.
1940 if (GA) { // Either &GV or &GV+C
1941 if (ConstraintLetter != 'n') {
1942 int64_t Offs = GA->getOffset();
1943 if (C) Offs += C->getValue();
1944 return DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getValueType(),
1948 if (C) { // just C, no GV.
1949 // Simple constants are not allowed for 's'.
1950 if (ConstraintLetter != 's')
1951 return DAG.getTargetConstant(C->getValue(), Op.getValueType());
1956 return SDOperand(0,0);
1959 std::vector<unsigned> TargetLowering::
1960 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1961 MVT::ValueType VT) const {
1962 return std::vector<unsigned>();
1966 std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
1967 getRegForInlineAsmConstraint(const std::string &Constraint,
1968 MVT::ValueType VT) const {
1969 if (Constraint[0] != '{')
1970 return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
1971 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
1973 // Remove the braces from around the name.
1974 std::string RegName(Constraint.begin()+1, Constraint.end()-1);
1976 // Figure out which register class contains this reg.
1977 const MRegisterInfo *RI = TM.getRegisterInfo();
1978 for (MRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
1979 E = RI->regclass_end(); RCI != E; ++RCI) {
1980 const TargetRegisterClass *RC = *RCI;
1982 // If none of the the value types for this register class are valid, we
1983 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1984 bool isLegal = false;
1985 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1987 if (isTypeLegal(*I)) {
1993 if (!isLegal) continue;
1995 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
1997 if (StringsEqualNoCase(RegName, RI->get(*I).Name))
1998 return std::make_pair(*I, RC);
2002 return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
2005 //===----------------------------------------------------------------------===//
2006 // Loop Strength Reduction hooks
2007 //===----------------------------------------------------------------------===//
2009 /// isLegalAddressingMode - Return true if the addressing mode represented
2010 /// by AM is legal for this target, for a load/store of the specified type.
2011 bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
2012 const Type *Ty) const {
2013 // The default implementation of this implements a conservative RISCy, r+r and
2016 // Allows a sign-extended 16-bit immediate field.
2017 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
2020 // No global is ever allowed as a base.
2024 // Only support r+r,
2026 case 0: // "r+i" or just "i", depending on HasBaseReg.
2029 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
2031 // Otherwise we have r+r or r+i.
2034 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
2036 // Allow 2*r as r+r.
2043 // Magic for divide replacement
2046 int64_t m; // magic number
2047 int64_t s; // shift amount
2051 uint64_t m; // magic number
2052 int64_t a; // add indicator
2053 int64_t s; // shift amount
2056 /// magic - calculate the magic numbers required to codegen an integer sdiv as
2057 /// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
2059 static ms magic32(int32_t d) {
2061 uint32_t ad, anc, delta, q1, r1, q2, r2, t;
2062 const uint32_t two31 = 0x80000000U;
2066 t = two31 + ((uint32_t)d >> 31);
2067 anc = t - 1 - t%ad; // absolute value of nc
2068 p = 31; // initialize p
2069 q1 = two31/anc; // initialize q1 = 2p/abs(nc)
2070 r1 = two31 - q1*anc; // initialize r1 = rem(2p,abs(nc))
2071 q2 = two31/ad; // initialize q2 = 2p/abs(d)
2072 r2 = two31 - q2*ad; // initialize r2 = rem(2p,abs(d))
2075 q1 = 2*q1; // update q1 = 2p/abs(nc)
2076 r1 = 2*r1; // update r1 = rem(2p/abs(nc))
2077 if (r1 >= anc) { // must be unsigned comparison
2081 q2 = 2*q2; // update q2 = 2p/abs(d)
2082 r2 = 2*r2; // update r2 = rem(2p/abs(d))
2083 if (r2 >= ad) { // must be unsigned comparison
2088 } while (q1 < delta || (q1 == delta && r1 == 0));
2090 mag.m = (int32_t)(q2 + 1); // make sure to sign extend
2091 if (d < 0) mag.m = -mag.m; // resulting magic number
2092 mag.s = p - 32; // resulting shift
2096 /// magicu - calculate the magic numbers required to codegen an integer udiv as
2097 /// a sequence of multiply, add and shifts. Requires that the divisor not be 0.
2098 static mu magicu32(uint32_t d) {
2100 uint32_t nc, delta, q1, r1, q2, r2;
2102 magu.a = 0; // initialize "add" indicator
2104 p = 31; // initialize p
2105 q1 = 0x80000000/nc; // initialize q1 = 2p/nc
2106 r1 = 0x80000000 - q1*nc; // initialize r1 = rem(2p,nc)
2107 q2 = 0x7FFFFFFF/d; // initialize q2 = (2p-1)/d
2108 r2 = 0x7FFFFFFF - q2*d; // initialize r2 = rem((2p-1),d)
2111 if (r1 >= nc - r1 ) {
2112 q1 = 2*q1 + 1; // update q1
2113 r1 = 2*r1 - nc; // update r1
2116 q1 = 2*q1; // update q1
2117 r1 = 2*r1; // update r1
2119 if (r2 + 1 >= d - r2) {
2120 if (q2 >= 0x7FFFFFFF) magu.a = 1;
2121 q2 = 2*q2 + 1; // update q2
2122 r2 = 2*r2 + 1 - d; // update r2
2125 if (q2 >= 0x80000000) magu.a = 1;
2126 q2 = 2*q2; // update q2
2127 r2 = 2*r2 + 1; // update r2
2130 } while (p < 64 && (q1 < delta || (q1 == delta && r1 == 0)));
2131 magu.m = q2 + 1; // resulting magic number
2132 magu.s = p - 32; // resulting shift
2136 /// magic - calculate the magic numbers required to codegen an integer sdiv as
2137 /// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
2139 static ms magic64(int64_t d) {
2141 uint64_t ad, anc, delta, q1, r1, q2, r2, t;
2142 const uint64_t two63 = 9223372036854775808ULL; // 2^63
2145 ad = d >= 0 ? d : -d;
2146 t = two63 + ((uint64_t)d >> 63);
2147 anc = t - 1 - t%ad; // absolute value of nc
2148 p = 63; // initialize p
2149 q1 = two63/anc; // initialize q1 = 2p/abs(nc)
2150 r1 = two63 - q1*anc; // initialize r1 = rem(2p,abs(nc))
2151 q2 = two63/ad; // initialize q2 = 2p/abs(d)
2152 r2 = two63 - q2*ad; // initialize r2 = rem(2p,abs(d))
2155 q1 = 2*q1; // update q1 = 2p/abs(nc)
2156 r1 = 2*r1; // update r1 = rem(2p/abs(nc))
2157 if (r1 >= anc) { // must be unsigned comparison
2161 q2 = 2*q2; // update q2 = 2p/abs(d)
2162 r2 = 2*r2; // update r2 = rem(2p/abs(d))
2163 if (r2 >= ad) { // must be unsigned comparison
2168 } while (q1 < delta || (q1 == delta && r1 == 0));
2171 if (d < 0) mag.m = -mag.m; // resulting magic number
2172 mag.s = p - 64; // resulting shift
2176 /// magicu - calculate the magic numbers required to codegen an integer udiv as
2177 /// a sequence of multiply, add and shifts. Requires that the divisor not be 0.
2178 static mu magicu64(uint64_t d)
2181 uint64_t nc, delta, q1, r1, q2, r2;
2183 magu.a = 0; // initialize "add" indicator
2185 p = 63; // initialize p
2186 q1 = 0x8000000000000000ull/nc; // initialize q1 = 2p/nc
2187 r1 = 0x8000000000000000ull - q1*nc; // initialize r1 = rem(2p,nc)
2188 q2 = 0x7FFFFFFFFFFFFFFFull/d; // initialize q2 = (2p-1)/d
2189 r2 = 0x7FFFFFFFFFFFFFFFull - q2*d; // initialize r2 = rem((2p-1),d)
2192 if (r1 >= nc - r1 ) {
2193 q1 = 2*q1 + 1; // update q1
2194 r1 = 2*r1 - nc; // update r1
2197 q1 = 2*q1; // update q1
2198 r1 = 2*r1; // update r1
2200 if (r2 + 1 >= d - r2) {
2201 if (q2 >= 0x7FFFFFFFFFFFFFFFull) magu.a = 1;
2202 q2 = 2*q2 + 1; // update q2
2203 r2 = 2*r2 + 1 - d; // update r2
2206 if (q2 >= 0x8000000000000000ull) magu.a = 1;
2207 q2 = 2*q2; // update q2
2208 r2 = 2*r2 + 1; // update r2
2211 } while (p < 128 && (q1 < delta || (q1 == delta && r1 == 0)));
2212 magu.m = q2 + 1; // resulting magic number
2213 magu.s = p - 64; // resulting shift
2217 /// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
2218 /// return a DAG expression to select that will generate the same value by
2219 /// multiplying by a magic number. See:
2220 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
2221 SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
2222 std::vector<SDNode*>* Created) const {
2223 MVT::ValueType VT = N->getValueType(0);
2225 // Check to see if we can do this.
2226 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
2227 return SDOperand(); // BuildSDIV only operates on i32 or i64
2228 if (!isOperationLegal(ISD::MULHS, VT))
2229 return SDOperand(); // Make sure the target supports MULHS.
2231 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended();
2232 ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d);
2234 // Multiply the numerator (operand 0) by the magic value
2235 SDOperand Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0),
2236 DAG.getConstant(magics.m, VT));
2237 // If d > 0 and m < 0, add the numerator
2238 if (d > 0 && magics.m < 0) {
2239 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0));
2241 Created->push_back(Q.Val);
2243 // If d < 0 and m > 0, subtract the numerator.
2244 if (d < 0 && magics.m > 0) {
2245 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0));
2247 Created->push_back(Q.Val);
2249 // Shift right algebraic if shift value is nonzero
2251 Q = DAG.getNode(ISD::SRA, VT, Q,
2252 DAG.getConstant(magics.s, getShiftAmountTy()));
2254 Created->push_back(Q.Val);
2256 // Extract the sign bit and add it to the quotient
2258 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(MVT::getSizeInBits(VT)-1,
2259 getShiftAmountTy()));
2261 Created->push_back(T.Val);
2262 return DAG.getNode(ISD::ADD, VT, Q, T);
2265 /// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
2266 /// return a DAG expression to select that will generate the same value by
2267 /// multiplying by a magic number. See:
2268 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
2269 SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
2270 std::vector<SDNode*>* Created) const {
2271 MVT::ValueType VT = N->getValueType(0);
2273 // Check to see if we can do this.
2274 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
2275 return SDOperand(); // BuildUDIV only operates on i32 or i64
2276 if (!isOperationLegal(ISD::MULHU, VT))
2277 return SDOperand(); // Make sure the target supports MULHU.
2279 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue();
2280 mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d);
2282 // Multiply the numerator (operand 0) by the magic value
2283 SDOperand Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0),
2284 DAG.getConstant(magics.m, VT));
2286 Created->push_back(Q.Val);
2288 if (magics.a == 0) {
2289 return DAG.getNode(ISD::SRL, VT, Q,
2290 DAG.getConstant(magics.s, getShiftAmountTy()));
2292 SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q);
2294 Created->push_back(NPQ.Val);
2295 NPQ = DAG.getNode(ISD::SRL, VT, NPQ,
2296 DAG.getConstant(1, getShiftAmountTy()));
2298 Created->push_back(NPQ.Val);
2299 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q);
2301 Created->push_back(NPQ.Val);
2302 return DAG.getNode(ISD::SRL, VT, NPQ,
2303 DAG.getConstant(magics.s-1, getShiftAmountTy()));