1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Assembly/Writer.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/DebugInfo.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/ManagedStatic.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/Mutex.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetIntrinsicInfo.h"
46 #include "llvm/Target/TargetLowering.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Target/TargetRegisterInfo.h"
50 #include "llvm/Target/TargetSelectionDAGInfo.h"
55 /// makeVTList - Return an instance of the SDVTList struct initialized with the
56 /// specified members.
57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
58 SDVTList Res = {VTs, NumVTs};
62 // Default null implementations of the callbacks.
63 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
64 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
66 //===----------------------------------------------------------------------===//
67 // ConstantFPSDNode Class
68 //===----------------------------------------------------------------------===//
70 /// isExactlyValue - We don't rely on operator== working on double values, as
71 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
72 /// As such, this method can be used to do an exact bit-for-bit comparison of
73 /// two floating point values.
74 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
75 return getValueAPF().bitwiseIsEqual(V);
78 bool ConstantFPSDNode::isValueValidForType(EVT VT,
80 assert(VT.isFloatingPoint() && "Can only convert between FP types");
82 // convert modifies in place, so make a copy.
83 APFloat Val2 = APFloat(Val);
85 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
86 APFloat::rmNearestTiesToEven,
91 //===----------------------------------------------------------------------===//
93 //===----------------------------------------------------------------------===//
95 /// isBuildVectorAllOnes - Return true if the specified node is a
96 /// BUILD_VECTOR where all of the elements are ~0 or undef.
97 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
98 // Look through a bit convert.
99 if (N->getOpcode() == ISD::BITCAST)
100 N = N->getOperand(0).getNode();
102 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
104 unsigned i = 0, e = N->getNumOperands();
106 // Skip over all of the undef values.
107 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
110 // Do not accept an all-undef vector.
111 if (i == e) return false;
113 // Do not accept build_vectors that aren't all constants or which have non-~0
114 // elements. We have to be a bit careful here, as the type of the constant
115 // may not be the same as the type of the vector elements due to type
116 // legalization (the elements are promoted to a legal type for the target and
117 // a vector of a type may be legal when the base element type is not).
118 // We only want to check enough bits to cover the vector elements, because
119 // we care if the resultant vector is all ones, not whether the individual
121 SDValue NotZero = N->getOperand(i);
122 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
123 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
124 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
126 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
127 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
132 // Okay, we have at least one ~0 value, check to see if the rest match or are
133 // undefs. Even with the above element type twiddling, this should be OK, as
134 // the same type legalization should have applied to all the elements.
135 for (++i; i != e; ++i)
136 if (N->getOperand(i) != NotZero &&
137 N->getOperand(i).getOpcode() != ISD::UNDEF)
143 /// isBuildVectorAllZeros - Return true if the specified node is a
144 /// BUILD_VECTOR where all of the elements are 0 or undef.
145 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
146 // Look through a bit convert.
147 if (N->getOpcode() == ISD::BITCAST)
148 N = N->getOperand(0).getNode();
150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
152 unsigned i = 0, e = N->getNumOperands();
154 // Skip over all of the undef values.
155 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
158 // Do not accept an all-undef vector.
159 if (i == e) return false;
161 // Do not accept build_vectors that aren't all constants or which have non-0
163 SDValue Zero = N->getOperand(i);
164 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
165 if (!CN->isNullValue())
167 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
168 if (!CFPN->getValueAPF().isPosZero())
173 // Okay, we have at least one 0 value, check to see if the rest match or are
175 for (++i; i != e; ++i)
176 if (N->getOperand(i) != Zero &&
177 N->getOperand(i).getOpcode() != ISD::UNDEF)
182 /// isScalarToVector - Return true if the specified node is a
183 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
184 /// element is not an undef.
185 bool ISD::isScalarToVector(const SDNode *N) {
186 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
189 if (N->getOpcode() != ISD::BUILD_VECTOR)
191 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
193 unsigned NumElems = N->getNumOperands();
196 for (unsigned i = 1; i < NumElems; ++i) {
197 SDValue V = N->getOperand(i);
198 if (V.getOpcode() != ISD::UNDEF)
204 /// allOperandsUndef - Return true if the node has at least one operand
205 /// and all operands of the specified node are ISD::UNDEF.
206 bool ISD::allOperandsUndef(const SDNode *N) {
207 // Return false if the node has no operands.
208 // This is "logically inconsistent" with the definition of "all" but
209 // is probably the desired behavior.
210 if (N->getNumOperands() == 0)
213 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
214 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
220 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
221 /// when given the operation for (X op Y).
222 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
223 // To perform this operation, we just need to swap the L and G bits of the
225 unsigned OldL = (Operation >> 2) & 1;
226 unsigned OldG = (Operation >> 1) & 1;
227 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
228 (OldL << 1) | // New G bit
229 (OldG << 2)); // New L bit.
232 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
233 /// 'op' is a valid SetCC operation.
234 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
235 unsigned Operation = Op;
237 Operation ^= 7; // Flip L, G, E bits, but not U.
239 Operation ^= 15; // Flip all of the condition bits.
241 if (Operation > ISD::SETTRUE2)
242 Operation &= ~8; // Don't let N and U bits get set.
244 return ISD::CondCode(Operation);
248 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
249 /// signed operation and 2 if the result is an unsigned comparison. Return zero
250 /// if the operation does not depend on the sign of the input (setne and seteq).
251 static int isSignedOp(ISD::CondCode Opcode) {
253 default: llvm_unreachable("Illegal integer setcc operation!");
255 case ISD::SETNE: return 0;
259 case ISD::SETGE: return 1;
263 case ISD::SETUGE: return 2;
267 /// getSetCCOrOperation - Return the result of a logical OR between different
268 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
269 /// returns SETCC_INVALID if it is not possible to represent the resultant
271 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
273 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
274 // Cannot fold a signed integer setcc with an unsigned integer setcc.
275 return ISD::SETCC_INVALID;
277 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
279 // If the N and U bits get set then the resultant comparison DOES suddenly
280 // care about orderedness, and is true when ordered.
281 if (Op > ISD::SETTRUE2)
282 Op &= ~16; // Clear the U bit if the N bit is set.
284 // Canonicalize illegal integer setcc's.
285 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
288 return ISD::CondCode(Op);
291 /// getSetCCAndOperation - Return the result of a logical AND between different
292 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
293 /// function returns zero if it is not possible to represent the resultant
295 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
297 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
298 // Cannot fold a signed setcc with an unsigned setcc.
299 return ISD::SETCC_INVALID;
301 // Combine all of the condition bits.
302 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
304 // Canonicalize illegal integer setcc's.
308 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
309 case ISD::SETOEQ: // SETEQ & SETU[LG]E
310 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
311 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
312 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
319 //===----------------------------------------------------------------------===//
320 // SDNode Profile Support
321 //===----------------------------------------------------------------------===//
323 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
325 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
329 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
330 /// solely with their pointer.
331 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
332 ID.AddPointer(VTList.VTs);
335 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
337 static void AddNodeIDOperands(FoldingSetNodeID &ID,
338 const SDValue *Ops, unsigned NumOps) {
339 for (; NumOps; --NumOps, ++Ops) {
340 ID.AddPointer(Ops->getNode());
341 ID.AddInteger(Ops->getResNo());
345 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
347 static void AddNodeIDOperands(FoldingSetNodeID &ID,
348 const SDUse *Ops, unsigned NumOps) {
349 for (; NumOps; --NumOps, ++Ops) {
350 ID.AddPointer(Ops->getNode());
351 ID.AddInteger(Ops->getResNo());
355 static void AddNodeIDNode(FoldingSetNodeID &ID,
356 unsigned short OpC, SDVTList VTList,
357 const SDValue *OpList, unsigned N) {
358 AddNodeIDOpcode(ID, OpC);
359 AddNodeIDValueTypes(ID, VTList);
360 AddNodeIDOperands(ID, OpList, N);
363 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
365 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
366 switch (N->getOpcode()) {
367 case ISD::TargetExternalSymbol:
368 case ISD::ExternalSymbol:
369 llvm_unreachable("Should only be used on nodes with operands");
370 default: break; // Normal nodes don't need extra info.
371 case ISD::TargetConstant:
373 ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
375 case ISD::TargetConstantFP:
376 case ISD::ConstantFP: {
377 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
380 case ISD::TargetGlobalAddress:
381 case ISD::GlobalAddress:
382 case ISD::TargetGlobalTLSAddress:
383 case ISD::GlobalTLSAddress: {
384 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
385 ID.AddPointer(GA->getGlobal());
386 ID.AddInteger(GA->getOffset());
387 ID.AddInteger(GA->getTargetFlags());
388 ID.AddInteger(GA->getAddressSpace());
391 case ISD::BasicBlock:
392 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
395 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
397 case ISD::RegisterMask:
398 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
401 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
403 case ISD::FrameIndex:
404 case ISD::TargetFrameIndex:
405 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
408 case ISD::TargetJumpTable:
409 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
410 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
412 case ISD::ConstantPool:
413 case ISD::TargetConstantPool: {
414 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
415 ID.AddInteger(CP->getAlignment());
416 ID.AddInteger(CP->getOffset());
417 if (CP->isMachineConstantPoolEntry())
418 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
420 ID.AddPointer(CP->getConstVal());
421 ID.AddInteger(CP->getTargetFlags());
424 case ISD::TargetIndex: {
425 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
426 ID.AddInteger(TI->getIndex());
427 ID.AddInteger(TI->getOffset());
428 ID.AddInteger(TI->getTargetFlags());
432 const LoadSDNode *LD = cast<LoadSDNode>(N);
433 ID.AddInteger(LD->getMemoryVT().getRawBits());
434 ID.AddInteger(LD->getRawSubclassData());
435 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
439 const StoreSDNode *ST = cast<StoreSDNode>(N);
440 ID.AddInteger(ST->getMemoryVT().getRawBits());
441 ID.AddInteger(ST->getRawSubclassData());
442 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
445 case ISD::ATOMIC_CMP_SWAP:
446 case ISD::ATOMIC_SWAP:
447 case ISD::ATOMIC_LOAD_ADD:
448 case ISD::ATOMIC_LOAD_SUB:
449 case ISD::ATOMIC_LOAD_AND:
450 case ISD::ATOMIC_LOAD_OR:
451 case ISD::ATOMIC_LOAD_XOR:
452 case ISD::ATOMIC_LOAD_NAND:
453 case ISD::ATOMIC_LOAD_MIN:
454 case ISD::ATOMIC_LOAD_MAX:
455 case ISD::ATOMIC_LOAD_UMIN:
456 case ISD::ATOMIC_LOAD_UMAX:
457 case ISD::ATOMIC_LOAD:
458 case ISD::ATOMIC_STORE: {
459 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
460 ID.AddInteger(AT->getMemoryVT().getRawBits());
461 ID.AddInteger(AT->getRawSubclassData());
462 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
465 case ISD::PREFETCH: {
466 const MemSDNode *PF = cast<MemSDNode>(N);
467 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
470 case ISD::VECTOR_SHUFFLE: {
471 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
472 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
474 ID.AddInteger(SVN->getMaskElt(i));
477 case ISD::TargetBlockAddress:
478 case ISD::BlockAddress: {
479 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
480 ID.AddPointer(BA->getBlockAddress());
481 ID.AddInteger(BA->getOffset());
482 ID.AddInteger(BA->getTargetFlags());
485 } // end switch (N->getOpcode())
487 // Target specific memory nodes could also have address spaces to check.
488 if (N->isTargetMemoryOpcode())
489 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
492 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
494 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
495 AddNodeIDOpcode(ID, N->getOpcode());
496 // Add the return value info.
497 AddNodeIDValueTypes(ID, N->getVTList());
498 // Add the operand info.
499 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
501 // Handle SDNode leafs with special info.
502 AddNodeIDCustom(ID, N);
505 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
506 /// the CSE map that carries volatility, temporalness, indexing mode, and
507 /// extension/truncation information.
509 static inline unsigned
510 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
511 bool isNonTemporal, bool isInvariant) {
512 assert((ConvType & 3) == ConvType &&
513 "ConvType may not require more than 2 bits!");
514 assert((AM & 7) == AM &&
515 "AM may not require more than 3 bits!");
519 (isNonTemporal << 6) |
523 //===----------------------------------------------------------------------===//
524 // SelectionDAG Class
525 //===----------------------------------------------------------------------===//
527 /// doNotCSE - Return true if CSE should not be performed for this node.
528 static bool doNotCSE(SDNode *N) {
529 if (N->getValueType(0) == MVT::Glue)
530 return true; // Never CSE anything that produces a flag.
532 switch (N->getOpcode()) {
534 case ISD::HANDLENODE:
536 return true; // Never CSE these nodes.
539 // Check that remaining values produced are not flags.
540 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
541 if (N->getValueType(i) == MVT::Glue)
542 return true; // Never CSE anything that produces a flag.
547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
549 void SelectionDAG::RemoveDeadNodes() {
550 // Create a dummy node (which is not added to allnodes), that adds a reference
551 // to the root node, preventing it from being deleted.
552 HandleSDNode Dummy(getRoot());
554 SmallVector<SDNode*, 128> DeadNodes;
556 // Add all obviously-dead nodes to the DeadNodes worklist.
557 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
559 DeadNodes.push_back(I);
561 RemoveDeadNodes(DeadNodes);
563 // If the root changed (e.g. it was a dead load, update the root).
564 setRoot(Dummy.getValue());
567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
568 /// given list, and any nodes that become unreachable as a result.
569 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
571 // Process the worklist, deleting the nodes and adding their uses to the
573 while (!DeadNodes.empty()) {
574 SDNode *N = DeadNodes.pop_back_val();
576 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
577 DUL->NodeDeleted(N, 0);
579 // Take the node out of the appropriate CSE map.
580 RemoveNodeFromCSEMaps(N);
582 // Next, brutally remove the operand list. This is safe to do, as there are
583 // no cycles in the graph.
584 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
586 SDNode *Operand = Use.getNode();
589 // Now that we removed this operand, see if there are no uses of it left.
590 if (Operand->use_empty())
591 DeadNodes.push_back(Operand);
598 void SelectionDAG::RemoveDeadNode(SDNode *N){
599 SmallVector<SDNode*, 16> DeadNodes(1, N);
601 // Create a dummy node that adds a reference to the root node, preventing
602 // it from being deleted. (This matters if the root is an operand of the
604 HandleSDNode Dummy(getRoot());
606 RemoveDeadNodes(DeadNodes);
609 void SelectionDAG::DeleteNode(SDNode *N) {
610 // First take this out of the appropriate CSE map.
611 RemoveNodeFromCSEMaps(N);
613 // Finally, remove uses due to operands of this node, remove from the
614 // AllNodes list, and delete the node.
615 DeleteNodeNotInCSEMaps(N);
618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
619 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
620 assert(N->use_empty() && "Cannot delete a node that is not dead!");
622 // Drop all of the operands and decrement used node's use counts.
628 void SelectionDAG::DeallocateNode(SDNode *N) {
629 if (N->OperandsNeedDelete)
630 delete[] N->OperandList;
632 // Set the opcode to DELETED_NODE to help catch bugs when node
633 // memory is reallocated.
634 N->NodeType = ISD::DELETED_NODE;
636 NodeAllocator.Deallocate(AllNodes.remove(N));
638 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
639 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
640 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
641 DbgVals[i]->setIsInvalidated();
644 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
645 /// correspond to it. This is useful when we're about to delete or repurpose
646 /// the node. We don't want future request for structurally identical nodes
647 /// to return N anymore.
648 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
650 switch (N->getOpcode()) {
651 case ISD::HANDLENODE: return false; // noop.
653 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
654 "Cond code doesn't exist!");
655 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
656 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
658 case ISD::ExternalSymbol:
659 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
661 case ISD::TargetExternalSymbol: {
662 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
663 Erased = TargetExternalSymbols.erase(
664 std::pair<std::string,unsigned char>(ESN->getSymbol(),
665 ESN->getTargetFlags()));
668 case ISD::VALUETYPE: {
669 EVT VT = cast<VTSDNode>(N)->getVT();
670 if (VT.isExtended()) {
671 Erased = ExtendedValueTypeNodes.erase(VT);
673 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
674 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
679 // Remove it from the CSE Map.
680 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
681 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
682 Erased = CSEMap.RemoveNode(N);
686 // Verify that the node was actually in one of the CSE maps, unless it has a
687 // flag result (which cannot be CSE'd) or is one of the special cases that are
688 // not subject to CSE.
689 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
690 !N->isMachineOpcode() && !doNotCSE(N)) {
693 llvm_unreachable("Node is not in map!");
699 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
700 /// maps and modified in place. Add it back to the CSE maps, unless an identical
701 /// node already exists, in which case transfer all its users to the existing
702 /// node. This transfer can potentially trigger recursive merging.
705 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
706 // For node types that aren't CSE'd, just act as if no identical node
709 SDNode *Existing = CSEMap.GetOrInsertNode(N);
711 // If there was already an existing matching node, use ReplaceAllUsesWith
712 // to replace the dead one with the existing one. This can cause
713 // recursive merging of other unrelated nodes down the line.
714 ReplaceAllUsesWith(N, Existing);
716 // N is now dead. Inform the listeners and delete it.
717 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
718 DUL->NodeDeleted(N, Existing);
719 DeleteNodeNotInCSEMaps(N);
724 // If the node doesn't already exist, we updated it. Inform listeners.
725 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
729 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
730 /// were replaced with those specified. If this node is never memoized,
731 /// return null, otherwise return a pointer to the slot it would take. If a
732 /// node already exists with these operands, the slot will be non-null.
733 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
738 SDValue Ops[] = { Op };
740 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
741 AddNodeIDCustom(ID, N);
742 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
746 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
747 /// were replaced with those specified. If this node is never memoized,
748 /// return null, otherwise return a pointer to the slot it would take. If a
749 /// node already exists with these operands, the slot will be non-null.
750 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
751 SDValue Op1, SDValue Op2,
756 SDValue Ops[] = { Op1, Op2 };
758 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
759 AddNodeIDCustom(ID, N);
760 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
765 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
766 /// were replaced with those specified. If this node is never memoized,
767 /// return null, otherwise return a pointer to the slot it would take. If a
768 /// node already exists with these operands, the slot will be non-null.
769 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
770 const SDValue *Ops,unsigned NumOps,
776 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
777 AddNodeIDCustom(ID, N);
778 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
783 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
784 static void VerifyNodeCommon(SDNode *N) {
785 switch (N->getOpcode()) {
788 case ISD::BUILD_PAIR: {
789 EVT VT = N->getValueType(0);
790 assert(N->getNumValues() == 1 && "Too many results!");
791 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
792 "Wrong return type!");
793 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
794 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
795 "Mismatched operand types!");
796 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
797 "Wrong operand type!");
798 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
799 "Wrong return type size");
802 case ISD::BUILD_VECTOR: {
803 assert(N->getNumValues() == 1 && "Too many results!");
804 assert(N->getValueType(0).isVector() && "Wrong return type!");
805 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
806 "Wrong number of operands!");
807 EVT EltVT = N->getValueType(0).getVectorElementType();
808 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
809 assert((I->getValueType() == EltVT ||
810 (EltVT.isInteger() && I->getValueType().isInteger() &&
811 EltVT.bitsLE(I->getValueType()))) &&
812 "Wrong operand type!");
813 assert(I->getValueType() == N->getOperand(0).getValueType() &&
814 "Operands must all have the same type");
821 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
822 static void VerifySDNode(SDNode *N) {
823 // The SDNode allocators cannot be used to allocate nodes with fields that are
824 // not present in an SDNode!
825 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
826 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
827 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
828 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
829 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
830 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
831 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
832 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
833 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
834 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
835 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
836 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
837 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
838 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
839 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
840 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
841 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
842 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
843 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
848 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
850 static void VerifyMachineNode(SDNode *N) {
851 // The MachineNode allocators cannot be used to allocate nodes with fields
852 // that are not present in a MachineNode!
853 // Currently there are no such nodes.
859 /// getEVTAlignment - Compute the default alignment value for the
862 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
863 Type *Ty = VT == MVT::iPTR ?
864 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
865 VT.getTypeForEVT(*getContext());
867 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
870 // EntryNode could meaningfully have debug info if we can find it...
871 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
872 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TTI(0), OptLevel(OL),
873 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
874 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
876 AllNodes.push_back(&EntryNode);
877 DbgInfo = new SDDbgInfo();
880 void SelectionDAG::init(MachineFunction &mf, const TargetTransformInfo *tti) {
883 Context = &mf.getFunction()->getContext();
886 SelectionDAG::~SelectionDAG() {
887 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
892 void SelectionDAG::allnodes_clear() {
893 assert(&*AllNodes.begin() == &EntryNode);
894 AllNodes.remove(AllNodes.begin());
895 while (!AllNodes.empty())
896 DeallocateNode(AllNodes.begin());
899 void SelectionDAG::clear() {
901 OperandAllocator.Reset();
904 ExtendedValueTypeNodes.clear();
905 ExternalSymbols.clear();
906 TargetExternalSymbols.clear();
907 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
908 static_cast<CondCodeSDNode*>(0));
909 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
910 static_cast<SDNode*>(0));
912 EntryNode.UseList = 0;
913 AllNodes.push_back(&EntryNode);
914 Root = getEntryNode();
918 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
919 return VT.bitsGT(Op.getValueType()) ?
920 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
921 getNode(ISD::TRUNCATE, DL, VT, Op);
924 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
925 return VT.bitsGT(Op.getValueType()) ?
926 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
927 getNode(ISD::TRUNCATE, DL, VT, Op);
930 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
931 return VT.bitsGT(Op.getValueType()) ?
932 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
933 getNode(ISD::TRUNCATE, DL, VT, Op);
936 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
937 assert(!VT.isVector() &&
938 "getZeroExtendInReg should use the vector element type instead of "
940 if (Op.getValueType() == VT) return Op;
941 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
942 APInt Imm = APInt::getLowBitsSet(BitWidth,
944 return getNode(ISD::AND, DL, Op.getValueType(), Op,
945 getConstant(Imm, Op.getValueType()));
948 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
950 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
951 EVT EltVT = VT.getScalarType();
953 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
954 return getNode(ISD::XOR, DL, VT, Val, NegOne);
957 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
958 EVT EltVT = VT.getScalarType();
959 assert((EltVT.getSizeInBits() >= 64 ||
960 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
961 "getConstant with a uint64_t value that doesn't fit in the type!");
962 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
965 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
966 return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
969 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
970 assert(VT.isInteger() && "Cannot create FP integer constant!");
972 EVT EltVT = VT.getScalarType();
973 const ConstantInt *Elt = &Val;
975 const TargetLowering *TLI = TM.getTargetLowering();
977 // In some cases the vector type is legal but the element type is illegal and
978 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
979 // inserted value (the type does not need to match the vector element type).
980 // Any extra bits introduced will be truncated away.
981 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
982 TargetLowering::TypePromoteInteger) {
983 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
984 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
985 Elt = ConstantInt::get(*getContext(), NewVal);
987 // In other cases the element type is illegal and needs to be expanded, for
988 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
989 // the value into n parts and use a vector type with n-times the elements.
990 // Then bitcast to the type requested.
991 // Legalizing constants too early makes the DAGCombiner's job harder so we
992 // only legalize if the DAG tells us we must produce legal types.
993 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
994 TLI->getTypeAction(*getContext(), EltVT) ==
995 TargetLowering::TypeExpandInteger) {
996 APInt NewVal = Elt->getValue();
997 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
998 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
999 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1000 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1002 // Check the temporary vector is the correct size. If this fails then
1003 // getTypeToTransformTo() probably returned a type whose size (in bits)
1004 // isn't a power-of-2 factor of the requested type size.
1005 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1007 SmallVector<SDValue, 2> EltParts;
1008 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1009 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1010 .trunc(ViaEltSizeInBits),
1014 // EltParts is currently in little endian order. If we actually want
1015 // big-endian order then reverse it now.
1016 if (TLI->isBigEndian())
1017 std::reverse(EltParts.begin(), EltParts.end());
1019 // The elements must be reversed when the element order is different
1020 // to the endianness of the elements (because the BITCAST is itself a
1021 // vector shuffle in this situation). However, we do not need any code to
1022 // perform this reversal because getConstant() is producing a vector
1024 // This situation occurs in MIPS MSA.
1026 SmallVector<SDValue, 8> Ops;
1027 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1028 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1030 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1031 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1032 &Ops[0], Ops.size()));
1036 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1037 "APInt size does not match type size!");
1038 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1039 FoldingSetNodeID ID;
1040 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1044 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1046 return SDValue(N, 0);
1049 N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT);
1050 CSEMap.InsertNode(N, IP);
1051 AllNodes.push_back(N);
1054 SDValue Result(N, 0);
1055 if (VT.isVector()) {
1056 SmallVector<SDValue, 8> Ops;
1057 Ops.assign(VT.getVectorNumElements(), Result);
1058 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1063 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1064 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
1068 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1069 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1072 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1073 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1075 EVT EltVT = VT.getScalarType();
1077 // Do the map lookup using the actual bit pattern for the floating point
1078 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1079 // we don't have issues with SNANs.
1080 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1081 FoldingSetNodeID ID;
1082 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1086 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1088 return SDValue(N, 0);
1091 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1092 CSEMap.InsertNode(N, IP);
1093 AllNodes.push_back(N);
1096 SDValue Result(N, 0);
1097 if (VT.isVector()) {
1098 SmallVector<SDValue, 8> Ops;
1099 Ops.assign(VT.getVectorNumElements(), Result);
1100 // FIXME SDLoc info might be appropriate here
1101 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1106 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1107 EVT EltVT = VT.getScalarType();
1108 if (EltVT==MVT::f32)
1109 return getConstantFP(APFloat((float)Val), VT, isTarget);
1110 else if (EltVT==MVT::f64)
1111 return getConstantFP(APFloat(Val), VT, isTarget);
1112 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1115 APFloat apf = APFloat(Val);
1116 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1118 return getConstantFP(apf, VT, isTarget);
1120 llvm_unreachable("Unsupported type in getConstantFP");
1123 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1124 EVT VT, int64_t Offset,
1126 unsigned char TargetFlags) {
1127 assert((TargetFlags == 0 || isTargetGA) &&
1128 "Cannot set target flags on target-independent globals");
1130 // Truncate (with sign-extension) the offset value to the pointer size.
1131 unsigned BitWidth = TM.getTargetLowering()->getPointerTy().getSizeInBits();
1133 Offset = SignExtend64(Offset, BitWidth);
1135 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1137 // If GV is an alias then use the aliasee for determining thread-localness.
1138 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1139 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
1143 if (GVar && GVar->isThreadLocal())
1144 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1146 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1148 FoldingSetNodeID ID;
1149 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1151 ID.AddInteger(Offset);
1152 ID.AddInteger(TargetFlags);
1153 ID.AddInteger(GV->getType()->getAddressSpace());
1155 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1156 return SDValue(E, 0);
1158 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1159 DL.getDebugLoc(), GV, VT,
1160 Offset, TargetFlags);
1161 CSEMap.InsertNode(N, IP);
1162 AllNodes.push_back(N);
1163 return SDValue(N, 0);
1166 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1167 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1168 FoldingSetNodeID ID;
1169 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1172 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1173 return SDValue(E, 0);
1175 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1176 CSEMap.InsertNode(N, IP);
1177 AllNodes.push_back(N);
1178 return SDValue(N, 0);
1181 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1182 unsigned char TargetFlags) {
1183 assert((TargetFlags == 0 || isTarget) &&
1184 "Cannot set target flags on target-independent jump tables");
1185 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1186 FoldingSetNodeID ID;
1187 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1189 ID.AddInteger(TargetFlags);
1191 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1192 return SDValue(E, 0);
1194 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1196 CSEMap.InsertNode(N, IP);
1197 AllNodes.push_back(N);
1198 return SDValue(N, 0);
1201 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1202 unsigned Alignment, int Offset,
1204 unsigned char TargetFlags) {
1205 assert((TargetFlags == 0 || isTarget) &&
1206 "Cannot set target flags on target-independent globals");
1209 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1210 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1211 FoldingSetNodeID ID;
1212 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1213 ID.AddInteger(Alignment);
1214 ID.AddInteger(Offset);
1216 ID.AddInteger(TargetFlags);
1218 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1219 return SDValue(E, 0);
1221 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1222 Alignment, TargetFlags);
1223 CSEMap.InsertNode(N, IP);
1224 AllNodes.push_back(N);
1225 return SDValue(N, 0);
1229 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1230 unsigned Alignment, int Offset,
1232 unsigned char TargetFlags) {
1233 assert((TargetFlags == 0 || isTarget) &&
1234 "Cannot set target flags on target-independent globals");
1237 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1238 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1239 FoldingSetNodeID ID;
1240 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1241 ID.AddInteger(Alignment);
1242 ID.AddInteger(Offset);
1243 C->addSelectionDAGCSEId(ID);
1244 ID.AddInteger(TargetFlags);
1246 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1247 return SDValue(E, 0);
1249 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1250 Alignment, TargetFlags);
1251 CSEMap.InsertNode(N, IP);
1252 AllNodes.push_back(N);
1253 return SDValue(N, 0);
1256 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1257 unsigned char TargetFlags) {
1258 FoldingSetNodeID ID;
1259 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
1260 ID.AddInteger(Index);
1261 ID.AddInteger(Offset);
1262 ID.AddInteger(TargetFlags);
1264 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1265 return SDValue(E, 0);
1267 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1269 CSEMap.InsertNode(N, IP);
1270 AllNodes.push_back(N);
1271 return SDValue(N, 0);
1274 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1275 FoldingSetNodeID ID;
1276 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1279 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1280 return SDValue(E, 0);
1282 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1283 CSEMap.InsertNode(N, IP);
1284 AllNodes.push_back(N);
1285 return SDValue(N, 0);
1288 SDValue SelectionDAG::getValueType(EVT VT) {
1289 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1290 ValueTypeNodes.size())
1291 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1293 SDNode *&N = VT.isExtended() ?
1294 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1296 if (N) return SDValue(N, 0);
1297 N = new (NodeAllocator) VTSDNode(VT);
1298 AllNodes.push_back(N);
1299 return SDValue(N, 0);
1302 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1303 SDNode *&N = ExternalSymbols[Sym];
1304 if (N) return SDValue(N, 0);
1305 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1306 AllNodes.push_back(N);
1307 return SDValue(N, 0);
1310 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1311 unsigned char TargetFlags) {
1313 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1315 if (N) return SDValue(N, 0);
1316 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1317 AllNodes.push_back(N);
1318 return SDValue(N, 0);
1321 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1322 if ((unsigned)Cond >= CondCodeNodes.size())
1323 CondCodeNodes.resize(Cond+1);
1325 if (CondCodeNodes[Cond] == 0) {
1326 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1327 CondCodeNodes[Cond] = N;
1328 AllNodes.push_back(N);
1331 return SDValue(CondCodeNodes[Cond], 0);
1334 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1335 // the shuffle mask M that point at N1 to point at N2, and indices that point
1336 // N2 to point at N1.
1337 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1339 int NElts = M.size();
1340 for (int i = 0; i != NElts; ++i) {
1348 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1349 SDValue N2, const int *Mask) {
1350 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1351 "Invalid VECTOR_SHUFFLE");
1353 // Canonicalize shuffle undef, undef -> undef
1354 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1355 return getUNDEF(VT);
1357 // Validate that all indices in Mask are within the range of the elements
1358 // input to the shuffle.
1359 unsigned NElts = VT.getVectorNumElements();
1360 SmallVector<int, 8> MaskVec;
1361 for (unsigned i = 0; i != NElts; ++i) {
1362 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1363 MaskVec.push_back(Mask[i]);
1366 // Canonicalize shuffle v, v -> v, undef
1369 for (unsigned i = 0; i != NElts; ++i)
1370 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1373 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1374 if (N1.getOpcode() == ISD::UNDEF)
1375 commuteShuffle(N1, N2, MaskVec);
1377 // Canonicalize all index into lhs, -> shuffle lhs, undef
1378 // Canonicalize all index into rhs, -> shuffle rhs, undef
1379 bool AllLHS = true, AllRHS = true;
1380 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1381 for (unsigned i = 0; i != NElts; ++i) {
1382 if (MaskVec[i] >= (int)NElts) {
1387 } else if (MaskVec[i] >= 0) {
1391 if (AllLHS && AllRHS)
1392 return getUNDEF(VT);
1393 if (AllLHS && !N2Undef)
1397 commuteShuffle(N1, N2, MaskVec);
1400 // If Identity shuffle return that node.
1401 bool Identity = true;
1402 for (unsigned i = 0; i != NElts; ++i) {
1403 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1405 if (Identity && NElts)
1408 FoldingSetNodeID ID;
1409 SDValue Ops[2] = { N1, N2 };
1410 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1411 for (unsigned i = 0; i != NElts; ++i)
1412 ID.AddInteger(MaskVec[i]);
1415 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1416 return SDValue(E, 0);
1418 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1419 // SDNode doesn't have access to it. This memory will be "leaked" when
1420 // the node is deallocated, but recovered when the NodeAllocator is released.
1421 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1422 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1424 ShuffleVectorSDNode *N =
1425 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1426 dl.getDebugLoc(), N1, N2,
1428 CSEMap.InsertNode(N, IP);
1429 AllNodes.push_back(N);
1430 return SDValue(N, 0);
1433 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1434 SDValue Val, SDValue DTy,
1435 SDValue STy, SDValue Rnd, SDValue Sat,
1436 ISD::CvtCode Code) {
1437 // If the src and dest types are the same and the conversion is between
1438 // integer types of the same sign or two floats, no conversion is necessary.
1440 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1443 FoldingSetNodeID ID;
1444 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1445 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1447 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1448 return SDValue(E, 0);
1450 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1453 CSEMap.InsertNode(N, IP);
1454 AllNodes.push_back(N);
1455 return SDValue(N, 0);
1458 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1459 FoldingSetNodeID ID;
1460 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1461 ID.AddInteger(RegNo);
1463 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1464 return SDValue(E, 0);
1466 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1467 CSEMap.InsertNode(N, IP);
1468 AllNodes.push_back(N);
1469 return SDValue(N, 0);
1472 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1473 FoldingSetNodeID ID;
1474 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
1475 ID.AddPointer(RegMask);
1477 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1478 return SDValue(E, 0);
1480 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1481 CSEMap.InsertNode(N, IP);
1482 AllNodes.push_back(N);
1483 return SDValue(N, 0);
1486 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1487 FoldingSetNodeID ID;
1488 SDValue Ops[] = { Root };
1489 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1490 ID.AddPointer(Label);
1492 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1493 return SDValue(E, 0);
1495 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1496 dl.getDebugLoc(), Root, Label);
1497 CSEMap.InsertNode(N, IP);
1498 AllNodes.push_back(N);
1499 return SDValue(N, 0);
1503 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1506 unsigned char TargetFlags) {
1507 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1509 FoldingSetNodeID ID;
1510 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1512 ID.AddInteger(Offset);
1513 ID.AddInteger(TargetFlags);
1515 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1516 return SDValue(E, 0);
1518 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1520 CSEMap.InsertNode(N, IP);
1521 AllNodes.push_back(N);
1522 return SDValue(N, 0);
1525 SDValue SelectionDAG::getSrcValue(const Value *V) {
1526 assert((!V || V->getType()->isPointerTy()) &&
1527 "SrcValue is not a pointer?");
1529 FoldingSetNodeID ID;
1530 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1534 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1535 return SDValue(E, 0);
1537 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1538 CSEMap.InsertNode(N, IP);
1539 AllNodes.push_back(N);
1540 return SDValue(N, 0);
1543 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1544 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1545 FoldingSetNodeID ID;
1546 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
1550 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1551 return SDValue(E, 0);
1553 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1554 CSEMap.InsertNode(N, IP);
1555 AllNodes.push_back(N);
1556 return SDValue(N, 0);
1559 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
1560 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1561 unsigned SrcAS, unsigned DestAS) {
1562 SDValue Ops[] = {Ptr};
1563 FoldingSetNodeID ID;
1564 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), &Ops[0], 1);
1565 ID.AddInteger(SrcAS);
1566 ID.AddInteger(DestAS);
1569 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1570 return SDValue(E, 0);
1572 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1574 VT, Ptr, SrcAS, DestAS);
1575 CSEMap.InsertNode(N, IP);
1576 AllNodes.push_back(N);
1577 return SDValue(N, 0);
1580 /// getShiftAmountOperand - Return the specified value casted to
1581 /// the target's desired shift amount type.
1582 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1583 EVT OpTy = Op.getValueType();
1584 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
1585 if (OpTy == ShTy || OpTy.isVector()) return Op;
1587 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1588 return getNode(Opcode, SDLoc(Op), ShTy, Op);
1591 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1592 /// specified value type.
1593 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1594 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1595 unsigned ByteSize = VT.getStoreSize();
1596 Type *Ty = VT.getTypeForEVT(*getContext());
1597 const TargetLowering *TLI = TM.getTargetLowering();
1598 unsigned StackAlign =
1599 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1601 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1602 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1605 /// CreateStackTemporary - Create a stack temporary suitable for holding
1606 /// either of the specified value types.
1607 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1608 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1609 VT2.getStoreSizeInBits())/8;
1610 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1611 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1612 const TargetLowering *TLI = TM.getTargetLowering();
1613 const DataLayout *TD = TLI->getDataLayout();
1614 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1615 TD->getPrefTypeAlignment(Ty2));
1617 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1618 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1619 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1622 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1623 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1624 // These setcc operations always fold.
1628 case ISD::SETFALSE2: return getConstant(0, VT);
1630 case ISD::SETTRUE2: {
1631 const TargetLowering *TLI = TM.getTargetLowering();
1632 TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
1634 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1647 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1651 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1652 const APInt &C2 = N2C->getAPIntValue();
1653 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1654 const APInt &C1 = N1C->getAPIntValue();
1657 default: llvm_unreachable("Unknown integer setcc!");
1658 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1659 case ISD::SETNE: return getConstant(C1 != C2, VT);
1660 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1661 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1662 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1663 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1664 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1665 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1666 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1667 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1671 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1672 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1673 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1676 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1677 return getUNDEF(VT);
1679 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1680 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1681 return getUNDEF(VT);
1683 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1684 R==APFloat::cmpLessThan, VT);
1685 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1686 return getUNDEF(VT);
1688 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1689 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1690 return getUNDEF(VT);
1692 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1693 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1694 return getUNDEF(VT);
1696 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1697 R==APFloat::cmpEqual, VT);
1698 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1699 return getUNDEF(VT);
1701 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1702 R==APFloat::cmpEqual, VT);
1703 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1704 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1705 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1706 R==APFloat::cmpEqual, VT);
1707 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1708 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1709 R==APFloat::cmpLessThan, VT);
1710 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1711 R==APFloat::cmpUnordered, VT);
1712 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1713 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1716 // Ensure that the constant occurs on the RHS.
1717 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1718 MVT CompVT = N1.getValueType().getSimpleVT();
1719 if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
1722 return getSetCC(dl, VT, N2, N1, SwappedCond);
1726 // Could not fold it.
1730 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1731 /// use this predicate to simplify operations downstream.
1732 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1733 // This predicate is not safe for vector operations.
1734 if (Op.getValueType().isVector())
1737 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1738 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1741 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1742 /// this predicate to simplify operations downstream. Mask is known to be zero
1743 /// for bits that V cannot have.
1744 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1745 unsigned Depth) const {
1746 APInt KnownZero, KnownOne;
1747 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1748 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1749 return (KnownZero & Mask) == Mask;
1752 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
1753 /// known to be either zero or one and return them in the KnownZero/KnownOne
1754 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1756 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1757 APInt &KnownOne, unsigned Depth) const {
1758 const TargetLowering *TLI = TM.getTargetLowering();
1759 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1761 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1763 return; // Limit search depth.
1765 APInt KnownZero2, KnownOne2;
1767 switch (Op.getOpcode()) {
1769 // We know all of the bits for a constant!
1770 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1771 KnownZero = ~KnownOne;
1774 // If either the LHS or the RHS are Zero, the result is zero.
1775 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1776 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1777 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1778 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1780 // Output known-1 bits are only known if set in both the LHS & RHS.
1781 KnownOne &= KnownOne2;
1782 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1783 KnownZero |= KnownZero2;
1786 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1787 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1788 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1789 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1791 // Output known-0 bits are only known if clear in both the LHS & RHS.
1792 KnownZero &= KnownZero2;
1793 // Output known-1 are known to be set if set in either the LHS | RHS.
1794 KnownOne |= KnownOne2;
1797 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1798 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1799 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1800 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1802 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1803 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1804 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1805 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1806 KnownZero = KnownZeroOut;
1810 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1811 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1812 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1813 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1815 // If low bits are zero in either operand, output low known-0 bits.
1816 // Also compute a conserative estimate for high known-0 bits.
1817 // More trickiness is possible, but this is sufficient for the
1818 // interesting case of alignment computation.
1819 KnownOne.clearAllBits();
1820 unsigned TrailZ = KnownZero.countTrailingOnes() +
1821 KnownZero2.countTrailingOnes();
1822 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1823 KnownZero2.countLeadingOnes(),
1824 BitWidth) - BitWidth;
1826 TrailZ = std::min(TrailZ, BitWidth);
1827 LeadZ = std::min(LeadZ, BitWidth);
1828 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1829 APInt::getHighBitsSet(BitWidth, LeadZ);
1833 // For the purposes of computing leading zeros we can conservatively
1834 // treat a udiv as a logical right shift by the power of 2 known to
1835 // be less than the denominator.
1836 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1837 unsigned LeadZ = KnownZero2.countLeadingOnes();
1839 KnownOne2.clearAllBits();
1840 KnownZero2.clearAllBits();
1841 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1842 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1843 if (RHSUnknownLeadingOnes != BitWidth)
1844 LeadZ = std::min(BitWidth,
1845 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1847 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1851 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1852 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1853 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1854 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1856 // Only known if known in both the LHS and RHS.
1857 KnownOne &= KnownOne2;
1858 KnownZero &= KnownZero2;
1860 case ISD::SELECT_CC:
1861 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1862 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1863 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1864 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1866 // Only known if known in both the LHS and RHS.
1867 KnownOne &= KnownOne2;
1868 KnownZero &= KnownZero2;
1876 if (Op.getResNo() != 1)
1878 // The boolean result conforms to getBooleanContents. Fall through.
1880 // If we know the result of a setcc has the top bits zero, use this info.
1881 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
1882 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1883 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1886 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1887 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1888 unsigned ShAmt = SA->getZExtValue();
1890 // If the shift count is an invalid immediate, don't do anything.
1891 if (ShAmt >= BitWidth)
1894 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1895 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1896 KnownZero <<= ShAmt;
1898 // low bits known zero.
1899 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1903 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1904 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1905 unsigned ShAmt = SA->getZExtValue();
1907 // If the shift count is an invalid immediate, don't do anything.
1908 if (ShAmt >= BitWidth)
1911 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1912 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1913 KnownZero = KnownZero.lshr(ShAmt);
1914 KnownOne = KnownOne.lshr(ShAmt);
1916 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1917 KnownZero |= HighBits; // High bits known zero.
1921 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1922 unsigned ShAmt = SA->getZExtValue();
1924 // If the shift count is an invalid immediate, don't do anything.
1925 if (ShAmt >= BitWidth)
1928 // If any of the demanded bits are produced by the sign extension, we also
1929 // demand the input sign bit.
1930 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1932 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1933 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1934 KnownZero = KnownZero.lshr(ShAmt);
1935 KnownOne = KnownOne.lshr(ShAmt);
1937 // Handle the sign bits.
1938 APInt SignBit = APInt::getSignBit(BitWidth);
1939 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1941 if (KnownZero.intersects(SignBit)) {
1942 KnownZero |= HighBits; // New bits are known zero.
1943 } else if (KnownOne.intersects(SignBit)) {
1944 KnownOne |= HighBits; // New bits are known one.
1948 case ISD::SIGN_EXTEND_INREG: {
1949 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1950 unsigned EBits = EVT.getScalarType().getSizeInBits();
1952 // Sign extension. Compute the demanded bits in the result that are not
1953 // present in the input.
1954 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1956 APInt InSignBit = APInt::getSignBit(EBits);
1957 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
1959 // If the sign extended bits are demanded, we know that the sign
1961 InSignBit = InSignBit.zext(BitWidth);
1962 if (NewBits.getBoolValue())
1963 InputDemandedBits |= InSignBit;
1965 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1966 KnownOne &= InputDemandedBits;
1967 KnownZero &= InputDemandedBits;
1968 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1970 // If the sign bit of the input is known set or clear, then we know the
1971 // top bits of the result.
1972 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
1973 KnownZero |= NewBits;
1974 KnownOne &= ~NewBits;
1975 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
1976 KnownOne |= NewBits;
1977 KnownZero &= ~NewBits;
1978 } else { // Input sign bit unknown
1979 KnownZero &= ~NewBits;
1980 KnownOne &= ~NewBits;
1985 case ISD::CTTZ_ZERO_UNDEF:
1987 case ISD::CTLZ_ZERO_UNDEF:
1989 unsigned LowBits = Log2_32(BitWidth)+1;
1990 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1991 KnownOne.clearAllBits();
1995 LoadSDNode *LD = cast<LoadSDNode>(Op);
1996 // If this is a ZEXTLoad and we are looking at the loaded value.
1997 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
1998 EVT VT = LD->getMemoryVT();
1999 unsigned MemBits = VT.getScalarType().getSizeInBits();
2000 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2001 } else if (const MDNode *Ranges = LD->getRanges()) {
2002 computeMaskedBitsLoad(*Ranges, KnownZero);
2006 case ISD::ZERO_EXTEND: {
2007 EVT InVT = Op.getOperand(0).getValueType();
2008 unsigned InBits = InVT.getScalarType().getSizeInBits();
2009 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2010 KnownZero = KnownZero.trunc(InBits);
2011 KnownOne = KnownOne.trunc(InBits);
2012 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2013 KnownZero = KnownZero.zext(BitWidth);
2014 KnownOne = KnownOne.zext(BitWidth);
2015 KnownZero |= NewBits;
2018 case ISD::SIGN_EXTEND: {
2019 EVT InVT = Op.getOperand(0).getValueType();
2020 unsigned InBits = InVT.getScalarType().getSizeInBits();
2021 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2023 KnownZero = KnownZero.trunc(InBits);
2024 KnownOne = KnownOne.trunc(InBits);
2025 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2027 // Note if the sign bit is known to be zero or one.
2028 bool SignBitKnownZero = KnownZero.isNegative();
2029 bool SignBitKnownOne = KnownOne.isNegative();
2030 assert(!(SignBitKnownZero && SignBitKnownOne) &&
2031 "Sign bit can't be known to be both zero and one!");
2033 KnownZero = KnownZero.zext(BitWidth);
2034 KnownOne = KnownOne.zext(BitWidth);
2036 // If the sign bit is known zero or one, the top bits match.
2037 if (SignBitKnownZero)
2038 KnownZero |= NewBits;
2039 else if (SignBitKnownOne)
2040 KnownOne |= NewBits;
2043 case ISD::ANY_EXTEND: {
2044 EVT InVT = Op.getOperand(0).getValueType();
2045 unsigned InBits = InVT.getScalarType().getSizeInBits();
2046 KnownZero = KnownZero.trunc(InBits);
2047 KnownOne = KnownOne.trunc(InBits);
2048 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2049 KnownZero = KnownZero.zext(BitWidth);
2050 KnownOne = KnownOne.zext(BitWidth);
2053 case ISD::TRUNCATE: {
2054 EVT InVT = Op.getOperand(0).getValueType();
2055 unsigned InBits = InVT.getScalarType().getSizeInBits();
2056 KnownZero = KnownZero.zext(InBits);
2057 KnownOne = KnownOne.zext(InBits);
2058 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2059 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2060 KnownZero = KnownZero.trunc(BitWidth);
2061 KnownOne = KnownOne.trunc(BitWidth);
2064 case ISD::AssertZext: {
2065 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2066 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2067 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2068 KnownZero |= (~InMask);
2069 KnownOne &= (~KnownZero);
2073 // All bits are zero except the low bit.
2074 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2078 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2079 // We know that the top bits of C-X are clear if X contains less bits
2080 // than C (i.e. no wrap-around can happen). For example, 20-X is
2081 // positive if we can prove that X is >= 0 and < 16.
2082 if (CLHS->getAPIntValue().isNonNegative()) {
2083 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2084 // NLZ can't be BitWidth with no sign bit
2085 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2086 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2088 // If all of the MaskV bits are known to be zero, then we know the
2089 // output top bits are zero, because we now know that the output is
2091 if ((KnownZero2 & MaskV) == MaskV) {
2092 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2093 // Top bits known zero.
2094 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2102 // Output known-0 bits are known if clear or set in both the low clear bits
2103 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2104 // low 3 bits clear.
2105 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2106 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2107 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2109 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2110 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2111 KnownZeroOut = std::min(KnownZeroOut,
2112 KnownZero2.countTrailingOnes());
2114 if (Op.getOpcode() == ISD::ADD) {
2115 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2119 // With ADDE, a carry bit may be added in, so we can only use this
2120 // information if we know (at least) that the low two bits are clear. We
2121 // then return to the caller that the low bit is unknown but that other bits
2123 if (KnownZeroOut >= 2) // ADDE
2124 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2128 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2129 const APInt &RA = Rem->getAPIntValue().abs();
2130 if (RA.isPowerOf2()) {
2131 APInt LowBits = RA - 1;
2132 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2134 // The low bits of the first operand are unchanged by the srem.
2135 KnownZero = KnownZero2 & LowBits;
2136 KnownOne = KnownOne2 & LowBits;
2138 // If the first operand is non-negative or has all low bits zero, then
2139 // the upper bits are all zero.
2140 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2141 KnownZero |= ~LowBits;
2143 // If the first operand is negative and not all low bits are zero, then
2144 // the upper bits are all one.
2145 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2146 KnownOne |= ~LowBits;
2147 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2152 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2153 const APInt &RA = Rem->getAPIntValue();
2154 if (RA.isPowerOf2()) {
2155 APInt LowBits = (RA - 1);
2156 KnownZero |= ~LowBits;
2157 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2158 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2163 // Since the result is less than or equal to either operand, any leading
2164 // zero bits in either operand must also exist in the result.
2165 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2166 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2168 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2169 KnownZero2.countLeadingOnes());
2170 KnownOne.clearAllBits();
2171 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2174 case ISD::FrameIndex:
2175 case ISD::TargetFrameIndex:
2176 if (unsigned Align = InferPtrAlignment(Op)) {
2177 // The low bits are known zero if the pointer is aligned.
2178 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2184 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2187 case ISD::INTRINSIC_WO_CHAIN:
2188 case ISD::INTRINSIC_W_CHAIN:
2189 case ISD::INTRINSIC_VOID:
2190 // Allow the target to implement this method for its nodes.
2191 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2196 /// ComputeNumSignBits - Return the number of times the sign bit of the
2197 /// register is replicated into the other bits. We know that at least 1 bit
2198 /// is always equal to the sign bit (itself), but other cases can give us
2199 /// information. For example, immediately after an "SRA X, 2", we know that
2200 /// the top 3 bits are all equal to each other, so we return 3.
2201 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2202 const TargetLowering *TLI = TM.getTargetLowering();
2203 EVT VT = Op.getValueType();
2204 assert(VT.isInteger() && "Invalid VT!");
2205 unsigned VTBits = VT.getScalarType().getSizeInBits();
2207 unsigned FirstAnswer = 1;
2210 return 1; // Limit search depth.
2212 switch (Op.getOpcode()) {
2214 case ISD::AssertSext:
2215 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2216 return VTBits-Tmp+1;
2217 case ISD::AssertZext:
2218 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2221 case ISD::Constant: {
2222 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2223 return Val.getNumSignBits();
2226 case ISD::SIGN_EXTEND:
2228 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2229 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2231 case ISD::SIGN_EXTEND_INREG:
2232 // Max of the input and what this extends.
2234 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2237 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2238 return std::max(Tmp, Tmp2);
2241 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2242 // SRA X, C -> adds C sign bits.
2243 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2244 Tmp += C->getZExtValue();
2245 if (Tmp > VTBits) Tmp = VTBits;
2249 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2250 // shl destroys sign bits.
2251 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2252 if (C->getZExtValue() >= VTBits || // Bad shift.
2253 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2254 return Tmp - C->getZExtValue();
2259 case ISD::XOR: // NOT is handled here.
2260 // Logical binary ops preserve the number of sign bits at the worst.
2261 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2263 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2264 FirstAnswer = std::min(Tmp, Tmp2);
2265 // We computed what we know about the sign bits as our first
2266 // answer. Now proceed to the generic code that uses
2267 // ComputeMaskedBits, and pick whichever answer is better.
2272 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2273 if (Tmp == 1) return 1; // Early out.
2274 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2275 return std::min(Tmp, Tmp2);
2283 if (Op.getResNo() != 1)
2285 // The boolean result conforms to getBooleanContents. Fall through.
2287 // If setcc returns 0/-1, all bits are sign bits.
2288 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
2289 TargetLowering::ZeroOrNegativeOneBooleanContent)
2294 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2295 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2297 // Handle rotate right by N like a rotate left by 32-N.
2298 if (Op.getOpcode() == ISD::ROTR)
2299 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2301 // If we aren't rotating out all of the known-in sign bits, return the
2302 // number that are left. This handles rotl(sext(x), 1) for example.
2303 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2304 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2308 // Add can have at most one carry bit. Thus we know that the output
2309 // is, at worst, one more bit than the inputs.
2310 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2311 if (Tmp == 1) return 1; // Early out.
2313 // Special case decrementing a value (ADD X, -1):
2314 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2315 if (CRHS->isAllOnesValue()) {
2316 APInt KnownZero, KnownOne;
2317 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2319 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2321 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2324 // If we are subtracting one from a positive number, there is no carry
2325 // out of the result.
2326 if (KnownZero.isNegative())
2330 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2331 if (Tmp2 == 1) return 1;
2332 return std::min(Tmp, Tmp2)-1;
2335 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2336 if (Tmp2 == 1) return 1;
2339 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2340 if (CLHS->isNullValue()) {
2341 APInt KnownZero, KnownOne;
2342 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2343 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2345 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2348 // If the input is known to be positive (the sign bit is known clear),
2349 // the output of the NEG has the same number of sign bits as the input.
2350 if (KnownZero.isNegative())
2353 // Otherwise, we treat this like a SUB.
2356 // Sub can have at most one carry bit. Thus we know that the output
2357 // is, at worst, one more bit than the inputs.
2358 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2359 if (Tmp == 1) return 1; // Early out.
2360 return std::min(Tmp, Tmp2)-1;
2362 // FIXME: it's tricky to do anything useful for this, but it is an important
2363 // case for targets like X86.
2367 // If we are looking at the loaded value of the SDNode.
2368 if (Op.getResNo() == 0) {
2369 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2370 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2371 unsigned ExtType = LD->getExtensionType();
2374 case ISD::SEXTLOAD: // '17' bits known
2375 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2376 return VTBits-Tmp+1;
2377 case ISD::ZEXTLOAD: // '16' bits known
2378 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2384 // Allow the target to implement this method for its nodes.
2385 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2386 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2387 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2388 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2389 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth);
2390 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2393 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2394 // use this information.
2395 APInt KnownZero, KnownOne;
2396 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2399 if (KnownZero.isNegative()) { // sign bit is 0
2401 } else if (KnownOne.isNegative()) { // sign bit is 1;
2408 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2409 // the number of identical bits in the top of the input value.
2411 Mask <<= Mask.getBitWidth()-VTBits;
2412 // Return # leading zeros. We use 'min' here in case Val was zero before
2413 // shifting. We don't want to return '64' as for an i32 "0".
2414 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2417 /// isBaseWithConstantOffset - Return true if the specified operand is an
2418 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2419 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2420 /// semantics as an ADD. This handles the equivalence:
2421 /// X|Cst == X+Cst iff X&Cst = 0.
2422 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2423 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2424 !isa<ConstantSDNode>(Op.getOperand(1)))
2427 if (Op.getOpcode() == ISD::OR &&
2428 !MaskedValueIsZero(Op.getOperand(0),
2429 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2436 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2437 // If we're told that NaNs won't happen, assume they won't.
2438 if (getTarget().Options.NoNaNsFPMath)
2441 // If the value is a constant, we can obviously see if it is a NaN or not.
2442 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2443 return !C->getValueAPF().isNaN();
2445 // TODO: Recognize more cases here.
2450 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2451 // If the value is a constant, we can obviously see if it is a zero or not.
2452 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2453 return !C->isZero();
2455 // TODO: Recognize more cases here.
2456 switch (Op.getOpcode()) {
2459 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2460 return !C->isNullValue();
2467 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2468 // Check the obvious case.
2469 if (A == B) return true;
2471 // For for negative and positive zero.
2472 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2473 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2474 if (CA->isZero() && CB->isZero()) return true;
2476 // Otherwise they may not be equal.
2480 /// getNode - Gets or creates the specified node.
2482 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2483 FoldingSetNodeID ID;
2484 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2486 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2487 return SDValue(E, 0);
2489 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2490 DL.getDebugLoc(), getVTList(VT));
2491 CSEMap.InsertNode(N, IP);
2493 AllNodes.push_back(N);
2497 return SDValue(N, 0);
2500 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2501 EVT VT, SDValue Operand) {
2502 // Constant fold unary operations with an integer constant operand.
2503 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2504 const APInt &Val = C->getAPIntValue();
2507 case ISD::SIGN_EXTEND:
2508 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
2509 case ISD::ANY_EXTEND:
2510 case ISD::ZERO_EXTEND:
2512 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
2513 case ISD::UINT_TO_FP:
2514 case ISD::SINT_TO_FP: {
2515 APFloat apf(EVTToAPFloatSemantics(VT),
2516 APInt::getNullValue(VT.getSizeInBits()));
2517 (void)apf.convertFromAPInt(Val,
2518 Opcode==ISD::SINT_TO_FP,
2519 APFloat::rmNearestTiesToEven);
2520 return getConstantFP(apf, VT);
2523 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2524 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2525 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2526 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2529 return getConstant(Val.byteSwap(), VT);
2531 return getConstant(Val.countPopulation(), VT);
2533 case ISD::CTLZ_ZERO_UNDEF:
2534 return getConstant(Val.countLeadingZeros(), VT);
2536 case ISD::CTTZ_ZERO_UNDEF:
2537 return getConstant(Val.countTrailingZeros(), VT);
2541 // Constant fold unary operations with a floating point constant operand.
2542 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2543 APFloat V = C->getValueAPF(); // make copy
2547 return getConstantFP(V, VT);
2550 return getConstantFP(V, VT);
2552 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2553 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2554 return getConstantFP(V, VT);
2558 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2559 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2560 return getConstantFP(V, VT);
2564 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2565 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2566 return getConstantFP(V, VT);
2569 case ISD::FP_EXTEND: {
2571 // This can return overflow, underflow, or inexact; we don't care.
2572 // FIXME need to be more flexible about rounding mode.
2573 (void)V.convert(EVTToAPFloatSemantics(VT),
2574 APFloat::rmNearestTiesToEven, &ignored);
2575 return getConstantFP(V, VT);
2577 case ISD::FP_TO_SINT:
2578 case ISD::FP_TO_UINT: {
2581 assert(integerPartWidth >= 64);
2582 // FIXME need to be more flexible about rounding mode.
2583 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2584 Opcode==ISD::FP_TO_SINT,
2585 APFloat::rmTowardZero, &ignored);
2586 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2588 APInt api(VT.getSizeInBits(), x);
2589 return getConstant(api, VT);
2592 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2593 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2594 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2595 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2600 unsigned OpOpcode = Operand.getNode()->getOpcode();
2602 case ISD::TokenFactor:
2603 case ISD::MERGE_VALUES:
2604 case ISD::CONCAT_VECTORS:
2605 return Operand; // Factor, merge or concat of one node? No need.
2606 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2607 case ISD::FP_EXTEND:
2608 assert(VT.isFloatingPoint() &&
2609 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2610 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2611 assert((!VT.isVector() ||
2612 VT.getVectorNumElements() ==
2613 Operand.getValueType().getVectorNumElements()) &&
2614 "Vector element count mismatch!");
2615 if (Operand.getOpcode() == ISD::UNDEF)
2616 return getUNDEF(VT);
2618 case ISD::SIGN_EXTEND:
2619 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2620 "Invalid SIGN_EXTEND!");
2621 if (Operand.getValueType() == VT) return Operand; // noop extension
2622 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2623 "Invalid sext node, dst < src!");
2624 assert((!VT.isVector() ||
2625 VT.getVectorNumElements() ==
2626 Operand.getValueType().getVectorNumElements()) &&
2627 "Vector element count mismatch!");
2628 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2629 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2630 else if (OpOpcode == ISD::UNDEF)
2631 // sext(undef) = 0, because the top bits will all be the same.
2632 return getConstant(0, VT);
2634 case ISD::ZERO_EXTEND:
2635 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2636 "Invalid ZERO_EXTEND!");
2637 if (Operand.getValueType() == VT) return Operand; // noop extension
2638 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2639 "Invalid zext node, dst < src!");
2640 assert((!VT.isVector() ||
2641 VT.getVectorNumElements() ==
2642 Operand.getValueType().getVectorNumElements()) &&
2643 "Vector element count mismatch!");
2644 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2645 return getNode(ISD::ZERO_EXTEND, DL, VT,
2646 Operand.getNode()->getOperand(0));
2647 else if (OpOpcode == ISD::UNDEF)
2648 // zext(undef) = 0, because the top bits will be zero.
2649 return getConstant(0, VT);
2651 case ISD::ANY_EXTEND:
2652 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2653 "Invalid ANY_EXTEND!");
2654 if (Operand.getValueType() == VT) return Operand; // noop extension
2655 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2656 "Invalid anyext node, dst < src!");
2657 assert((!VT.isVector() ||
2658 VT.getVectorNumElements() ==
2659 Operand.getValueType().getVectorNumElements()) &&
2660 "Vector element count mismatch!");
2662 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2663 OpOpcode == ISD::ANY_EXTEND)
2664 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2665 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2666 else if (OpOpcode == ISD::UNDEF)
2667 return getUNDEF(VT);
2669 // (ext (trunx x)) -> x
2670 if (OpOpcode == ISD::TRUNCATE) {
2671 SDValue OpOp = Operand.getNode()->getOperand(0);
2672 if (OpOp.getValueType() == VT)
2677 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2678 "Invalid TRUNCATE!");
2679 if (Operand.getValueType() == VT) return Operand; // noop truncate
2680 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2681 "Invalid truncate node, src < dst!");
2682 assert((!VT.isVector() ||
2683 VT.getVectorNumElements() ==
2684 Operand.getValueType().getVectorNumElements()) &&
2685 "Vector element count mismatch!");
2686 if (OpOpcode == ISD::TRUNCATE)
2687 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2688 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2689 OpOpcode == ISD::ANY_EXTEND) {
2690 // If the source is smaller than the dest, we still need an extend.
2691 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2692 .bitsLT(VT.getScalarType()))
2693 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2694 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2695 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2696 return Operand.getNode()->getOperand(0);
2698 if (OpOpcode == ISD::UNDEF)
2699 return getUNDEF(VT);
2702 // Basic sanity checking.
2703 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2704 && "Cannot BITCAST between types of different sizes!");
2705 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2706 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2707 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2708 if (OpOpcode == ISD::UNDEF)
2709 return getUNDEF(VT);
2711 case ISD::SCALAR_TO_VECTOR:
2712 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2713 (VT.getVectorElementType() == Operand.getValueType() ||
2714 (VT.getVectorElementType().isInteger() &&
2715 Operand.getValueType().isInteger() &&
2716 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2717 "Illegal SCALAR_TO_VECTOR node!");
2718 if (OpOpcode == ISD::UNDEF)
2719 return getUNDEF(VT);
2720 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2721 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2722 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2723 Operand.getConstantOperandVal(1) == 0 &&
2724 Operand.getOperand(0).getValueType() == VT)
2725 return Operand.getOperand(0);
2728 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2729 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2730 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2731 Operand.getNode()->getOperand(0));
2732 if (OpOpcode == ISD::FNEG) // --X -> X
2733 return Operand.getNode()->getOperand(0);
2736 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2737 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2742 SDVTList VTs = getVTList(VT);
2743 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2744 FoldingSetNodeID ID;
2745 SDValue Ops[1] = { Operand };
2746 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2748 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2749 return SDValue(E, 0);
2751 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2752 DL.getDebugLoc(), VTs, Operand);
2753 CSEMap.InsertNode(N, IP);
2755 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2756 DL.getDebugLoc(), VTs, Operand);
2759 AllNodes.push_back(N);
2763 return SDValue(N, 0);
2766 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2767 SDNode *Cst1, SDNode *Cst2) {
2768 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2769 SmallVector<SDValue, 4> Outputs;
2770 EVT SVT = VT.getScalarType();
2772 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2773 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2774 if (Scalar1 && Scalar2) {
2775 // Scalar instruction.
2776 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2778 // For vectors extract each constant element into Inputs so we can constant
2779 // fold them individually.
2780 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2781 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2785 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2787 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2788 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2789 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2790 if (!V1 || !V2) // Not a constant, bail.
2793 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2794 // FIXME: This is valid and could be handled by truncating the APInts.
2795 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2798 Inputs.push_back(std::make_pair(V1, V2));
2802 // We have a number of constant values, constant fold them element by element.
2803 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2804 const APInt &C1 = Inputs[I].first->getAPIntValue();
2805 const APInt &C2 = Inputs[I].second->getAPIntValue();
2809 Outputs.push_back(getConstant(C1 + C2, SVT));
2812 Outputs.push_back(getConstant(C1 - C2, SVT));
2815 Outputs.push_back(getConstant(C1 * C2, SVT));
2818 if (!C2.getBoolValue())
2820 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2823 if (!C2.getBoolValue())
2825 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2828 if (!C2.getBoolValue())
2830 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2833 if (!C2.getBoolValue())
2835 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2838 Outputs.push_back(getConstant(C1 & C2, SVT));
2841 Outputs.push_back(getConstant(C1 | C2, SVT));
2844 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2847 Outputs.push_back(getConstant(C1 << C2, SVT));
2850 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2853 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2856 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2859 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2866 // Handle the scalar case first.
2867 if (Scalar1 && Scalar2)
2868 return Outputs.back();
2870 // Otherwise build a big vector out of the scalar elements we generated.
2871 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(),
2875 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
2877 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2878 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2881 case ISD::TokenFactor:
2882 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2883 N2.getValueType() == MVT::Other && "Invalid token factor!");
2884 // Fold trivial token factors.
2885 if (N1.getOpcode() == ISD::EntryToken) return N2;
2886 if (N2.getOpcode() == ISD::EntryToken) return N1;
2887 if (N1 == N2) return N1;
2889 case ISD::CONCAT_VECTORS:
2890 // Concat of UNDEFs is UNDEF.
2891 if (N1.getOpcode() == ISD::UNDEF &&
2892 N2.getOpcode() == ISD::UNDEF)
2893 return getUNDEF(VT);
2895 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2896 // one big BUILD_VECTOR.
2897 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2898 N2.getOpcode() == ISD::BUILD_VECTOR) {
2899 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2900 N1.getNode()->op_end());
2901 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2902 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2906 assert(VT.isInteger() && "This operator does not apply to FP types!");
2907 assert(N1.getValueType() == N2.getValueType() &&
2908 N1.getValueType() == VT && "Binary operator types must match!");
2909 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2910 // worth handling here.
2911 if (N2C && N2C->isNullValue())
2913 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2920 assert(VT.isInteger() && "This operator does not apply to FP types!");
2921 assert(N1.getValueType() == N2.getValueType() &&
2922 N1.getValueType() == VT && "Binary operator types must match!");
2923 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2924 // it's worth handling here.
2925 if (N2C && N2C->isNullValue())
2935 assert(VT.isInteger() && "This operator does not apply to FP types!");
2936 assert(N1.getValueType() == N2.getValueType() &&
2937 N1.getValueType() == VT && "Binary operator types must match!");
2944 if (getTarget().Options.UnsafeFPMath) {
2945 if (Opcode == ISD::FADD) {
2947 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2948 if (CFP->getValueAPF().isZero())
2951 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2952 if (CFP->getValueAPF().isZero())
2954 } else if (Opcode == ISD::FSUB) {
2956 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2957 if (CFP->getValueAPF().isZero())
2959 } else if (Opcode == ISD::FMUL) {
2960 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
2963 // If the first operand isn't the constant, try the second
2965 CFP = dyn_cast<ConstantFPSDNode>(N2);
2972 return SDValue(CFP,0);
2974 if (CFP->isExactlyValue(1.0))
2979 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
2980 assert(N1.getValueType() == N2.getValueType() &&
2981 N1.getValueType() == VT && "Binary operator types must match!");
2983 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
2984 assert(N1.getValueType() == VT &&
2985 N1.getValueType().isFloatingPoint() &&
2986 N2.getValueType().isFloatingPoint() &&
2987 "Invalid FCOPYSIGN!");
2994 assert(VT == N1.getValueType() &&
2995 "Shift operators return type must be the same as their first arg");
2996 assert(VT.isInteger() && N2.getValueType().isInteger() &&
2997 "Shifts only work on integers");
2998 assert((!VT.isVector() || VT == N2.getValueType()) &&
2999 "Vector shift amounts must be in the same as their first arg");
3000 // Verify that the shift amount VT is bit enough to hold valid shift
3001 // amounts. This catches things like trying to shift an i1024 value by an
3002 // i8, which is easy to fall into in generic code that uses
3003 // TLI.getShiftAmount().
3004 assert(N2.getValueType().getSizeInBits() >=
3005 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3006 "Invalid use of small shift amount with oversized value!");
3008 // Always fold shifts of i1 values so the code generator doesn't need to
3009 // handle them. Since we know the size of the shift has to be less than the
3010 // size of the value, the shift/rotate count is guaranteed to be zero.
3013 if (N2C && N2C->isNullValue())
3016 case ISD::FP_ROUND_INREG: {
3017 EVT EVT = cast<VTSDNode>(N2)->getVT();
3018 assert(VT == N1.getValueType() && "Not an inreg round!");
3019 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3020 "Cannot FP_ROUND_INREG integer types");
3021 assert(EVT.isVector() == VT.isVector() &&
3022 "FP_ROUND_INREG type should be vector iff the operand "
3024 assert((!EVT.isVector() ||
3025 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3026 "Vector element counts must match in FP_ROUND_INREG");
3027 assert(EVT.bitsLE(VT) && "Not rounding down!");
3029 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3033 assert(VT.isFloatingPoint() &&
3034 N1.getValueType().isFloatingPoint() &&
3035 VT.bitsLE(N1.getValueType()) &&
3036 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
3037 if (N1.getValueType() == VT) return N1; // noop conversion.
3039 case ISD::AssertSext:
3040 case ISD::AssertZext: {
3041 EVT EVT = cast<VTSDNode>(N2)->getVT();
3042 assert(VT == N1.getValueType() && "Not an inreg extend!");
3043 assert(VT.isInteger() && EVT.isInteger() &&
3044 "Cannot *_EXTEND_INREG FP types");
3045 assert(!EVT.isVector() &&
3046 "AssertSExt/AssertZExt type should be the vector element type "
3047 "rather than the vector type!");
3048 assert(EVT.bitsLE(VT) && "Not extending!");
3049 if (VT == EVT) return N1; // noop assertion.
3052 case ISD::SIGN_EXTEND_INREG: {
3053 EVT EVT = cast<VTSDNode>(N2)->getVT();
3054 assert(VT == N1.getValueType() && "Not an inreg extend!");
3055 assert(VT.isInteger() && EVT.isInteger() &&
3056 "Cannot *_EXTEND_INREG FP types");
3057 assert(EVT.isVector() == VT.isVector() &&
3058 "SIGN_EXTEND_INREG type should be vector iff the operand "
3060 assert((!EVT.isVector() ||
3061 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3062 "Vector element counts must match in SIGN_EXTEND_INREG");
3063 assert(EVT.bitsLE(VT) && "Not extending!");
3064 if (EVT == VT) return N1; // Not actually extending
3067 APInt Val = N1C->getAPIntValue();
3068 unsigned FromBits = EVT.getScalarType().getSizeInBits();
3069 Val <<= Val.getBitWidth()-FromBits;
3070 Val = Val.ashr(Val.getBitWidth()-FromBits);
3071 return getConstant(Val, VT);
3075 case ISD::EXTRACT_VECTOR_ELT:
3076 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3077 if (N1.getOpcode() == ISD::UNDEF)
3078 return getUNDEF(VT);
3080 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3081 // expanding copies of large vectors from registers.
3083 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3084 N1.getNumOperands() > 0) {
3086 N1.getOperand(0).getValueType().getVectorNumElements();
3087 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3088 N1.getOperand(N2C->getZExtValue() / Factor),
3089 getConstant(N2C->getZExtValue() % Factor,
3090 N2.getValueType()));
3093 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3094 // expanding large vector constants.
3095 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3096 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3098 if (VT != Elt.getValueType())
3099 // If the vector element type is not legal, the BUILD_VECTOR operands
3100 // are promoted and implicitly truncated, and the result implicitly
3101 // extended. Make that explicit here.
3102 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3107 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3108 // operations are lowered to scalars.
3109 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3110 // If the indices are the same, return the inserted element else
3111 // if the indices are known different, extract the element from
3112 // the original vector.
3113 SDValue N1Op2 = N1.getOperand(2);
3114 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3116 if (N1Op2C && N2C) {
3117 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3118 if (VT == N1.getOperand(1).getValueType())
3119 return N1.getOperand(1);
3121 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3124 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3128 case ISD::EXTRACT_ELEMENT:
3129 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3130 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3131 (N1.getValueType().isInteger() == VT.isInteger()) &&
3132 N1.getValueType() != VT &&
3133 "Wrong types for EXTRACT_ELEMENT!");
3135 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3136 // 64-bit integers into 32-bit parts. Instead of building the extract of
3137 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3138 if (N1.getOpcode() == ISD::BUILD_PAIR)
3139 return N1.getOperand(N2C->getZExtValue());
3141 // EXTRACT_ELEMENT of a constant int is also very common.
3142 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3143 unsigned ElementSize = VT.getSizeInBits();
3144 unsigned Shift = ElementSize * N2C->getZExtValue();
3145 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3146 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3149 case ISD::EXTRACT_SUBVECTOR: {
3151 if (VT.isSimple() && N1.getValueType().isSimple()) {
3152 assert(VT.isVector() && N1.getValueType().isVector() &&
3153 "Extract subvector VTs must be a vectors!");
3154 assert(VT.getVectorElementType() ==
3155 N1.getValueType().getVectorElementType() &&
3156 "Extract subvector VTs must have the same element type!");
3157 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3158 "Extract subvector must be from larger vector to smaller vector!");
3160 if (isa<ConstantSDNode>(Index.getNode())) {
3161 assert((VT.getVectorNumElements() +
3162 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3163 <= N1.getValueType().getVectorNumElements())
3164 && "Extract subvector overflow!");
3167 // Trivial extraction.
3168 if (VT.getSimpleVT() == N1.getSimpleValueType())
3175 // Perform trivial constant folding.
3176 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3177 if (SV.getNode()) return SV;
3179 // Canonicalize constant to RHS if commutative.
3180 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3181 std::swap(N1C, N2C);
3185 // Constant fold FP operations.
3186 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3187 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3189 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3190 // Canonicalize constant to RHS if commutative.
3191 std::swap(N1CFP, N2CFP);
3194 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3195 APFloat::opStatus s;
3198 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3199 if (s != APFloat::opInvalidOp)
3200 return getConstantFP(V1, VT);
3203 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3204 if (s!=APFloat::opInvalidOp)
3205 return getConstantFP(V1, VT);
3208 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3209 if (s!=APFloat::opInvalidOp)
3210 return getConstantFP(V1, VT);
3213 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3214 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3215 return getConstantFP(V1, VT);
3218 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3219 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3220 return getConstantFP(V1, VT);
3222 case ISD::FCOPYSIGN:
3224 return getConstantFP(V1, VT);
3229 if (Opcode == ISD::FP_ROUND) {
3230 APFloat V = N1CFP->getValueAPF(); // make copy
3232 // This can return overflow, underflow, or inexact; we don't care.
3233 // FIXME need to be more flexible about rounding mode.
3234 (void)V.convert(EVTToAPFloatSemantics(VT),
3235 APFloat::rmNearestTiesToEven, &ignored);
3236 return getConstantFP(V, VT);
3240 // Canonicalize an UNDEF to the RHS, even over a constant.
3241 if (N1.getOpcode() == ISD::UNDEF) {
3242 if (isCommutativeBinOp(Opcode)) {
3246 case ISD::FP_ROUND_INREG:
3247 case ISD::SIGN_EXTEND_INREG:
3253 return N1; // fold op(undef, arg2) -> undef
3261 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3262 // For vectors, we can't easily build an all zero vector, just return
3269 // Fold a bunch of operators when the RHS is undef.
3270 if (N2.getOpcode() == ISD::UNDEF) {
3273 if (N1.getOpcode() == ISD::UNDEF)
3274 // Handle undef ^ undef -> 0 special case. This is a common
3276 return getConstant(0, VT);
3286 return N2; // fold op(arg1, undef) -> undef
3292 if (getTarget().Options.UnsafeFPMath)
3300 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3301 // For vectors, we can't easily build an all zero vector, just return
3306 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3307 // For vectors, we can't easily build an all one vector, just return
3315 // Memoize this node if possible.
3317 SDVTList VTs = getVTList(VT);
3318 if (VT != MVT::Glue) {
3319 SDValue Ops[] = { N1, N2 };
3320 FoldingSetNodeID ID;
3321 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3323 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3324 return SDValue(E, 0);
3326 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3327 DL.getDebugLoc(), VTs, N1, N2);
3328 CSEMap.InsertNode(N, IP);
3330 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3331 DL.getDebugLoc(), VTs, N1, N2);
3334 AllNodes.push_back(N);
3338 return SDValue(N, 0);
3341 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3342 SDValue N1, SDValue N2, SDValue N3) {
3343 // Perform various simplifications.
3344 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3347 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3348 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3349 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3350 if (N1CFP && N2CFP && N3CFP) {
3351 APFloat V1 = N1CFP->getValueAPF();
3352 const APFloat &V2 = N2CFP->getValueAPF();
3353 const APFloat &V3 = N3CFP->getValueAPF();
3354 APFloat::opStatus s =
3355 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3356 if (s != APFloat::opInvalidOp)
3357 return getConstantFP(V1, VT);
3361 case ISD::CONCAT_VECTORS:
3362 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3363 // one big BUILD_VECTOR.
3364 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3365 N2.getOpcode() == ISD::BUILD_VECTOR &&
3366 N3.getOpcode() == ISD::BUILD_VECTOR) {
3367 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3368 N1.getNode()->op_end());
3369 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3370 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3371 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
3375 // Use FoldSetCC to simplify SETCC's.
3376 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3377 if (Simp.getNode()) return Simp;
3382 if (N1C->getZExtValue())
3383 return N2; // select true, X, Y -> X
3384 return N3; // select false, X, Y -> Y
3387 if (N2 == N3) return N2; // select C, X, X -> X
3389 case ISD::VECTOR_SHUFFLE:
3390 llvm_unreachable("should use getVectorShuffle constructor!");
3391 case ISD::INSERT_SUBVECTOR: {
3393 if (VT.isSimple() && N1.getValueType().isSimple()
3394 && N2.getValueType().isSimple()) {
3395 assert(VT.isVector() && N1.getValueType().isVector() &&
3396 N2.getValueType().isVector() &&
3397 "Insert subvector VTs must be a vectors");
3398 assert(VT == N1.getValueType() &&
3399 "Dest and insert subvector source types must match!");
3400 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3401 "Insert subvector must be from smaller vector to larger vector!");
3402 if (isa<ConstantSDNode>(Index.getNode())) {
3403 assert((N2.getValueType().getVectorNumElements() +
3404 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3405 <= VT.getVectorNumElements())
3406 && "Insert subvector overflow!");
3409 // Trivial insertion.
3410 if (VT.getSimpleVT() == N2.getSimpleValueType())
3416 // Fold bit_convert nodes from a type to themselves.
3417 if (N1.getValueType() == VT)
3422 // Memoize node if it doesn't produce a flag.
3424 SDVTList VTs = getVTList(VT);
3425 if (VT != MVT::Glue) {
3426 SDValue Ops[] = { N1, N2, N3 };
3427 FoldingSetNodeID ID;
3428 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3430 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3431 return SDValue(E, 0);
3433 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3434 DL.getDebugLoc(), VTs, N1, N2, N3);
3435 CSEMap.InsertNode(N, IP);
3437 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3438 DL.getDebugLoc(), VTs, N1, N2, N3);
3441 AllNodes.push_back(N);
3445 return SDValue(N, 0);
3448 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3449 SDValue N1, SDValue N2, SDValue N3,
3451 SDValue Ops[] = { N1, N2, N3, N4 };
3452 return getNode(Opcode, DL, VT, Ops, 4);
3455 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3456 SDValue N1, SDValue N2, SDValue N3,
3457 SDValue N4, SDValue N5) {
3458 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3459 return getNode(Opcode, DL, VT, Ops, 5);
3462 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3463 /// the incoming stack arguments to be loaded from the stack.
3464 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3465 SmallVector<SDValue, 8> ArgChains;
3467 // Include the original chain at the beginning of the list. When this is
3468 // used by target LowerCall hooks, this helps legalize find the
3469 // CALLSEQ_BEGIN node.
3470 ArgChains.push_back(Chain);
3472 // Add a chain value for each stack argument.
3473 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3474 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3475 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3476 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3477 if (FI->getIndex() < 0)
3478 ArgChains.push_back(SDValue(L, 1));
3480 // Build a tokenfactor for all the chains.
3481 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
3482 &ArgChains[0], ArgChains.size());
3485 /// getMemsetValue - Vectorized representation of the memset value
3487 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3489 assert(Value.getOpcode() != ISD::UNDEF);
3491 unsigned NumBits = VT.getScalarType().getSizeInBits();
3492 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3493 assert(C->getAPIntValue().getBitWidth() == 8);
3494 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3496 return DAG.getConstant(Val, VT);
3497 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3500 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3502 // Use a multiplication with 0x010101... to extend the input to the
3504 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3505 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3511 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3512 /// used when a memcpy is turned into a memset when the source is a constant
3514 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3515 const TargetLowering &TLI, StringRef Str) {
3516 // Handle vector with all elements zero.
3519 return DAG.getConstant(0, VT);
3520 else if (VT == MVT::f32 || VT == MVT::f64)
3521 return DAG.getConstantFP(0.0, VT);
3522 else if (VT.isVector()) {
3523 unsigned NumElts = VT.getVectorNumElements();
3524 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3525 return DAG.getNode(ISD::BITCAST, dl, VT,
3526 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3529 llvm_unreachable("Expected type!");
3532 assert(!VT.isVector() && "Can't handle vector type here!");
3533 unsigned NumVTBits = VT.getSizeInBits();
3534 unsigned NumVTBytes = NumVTBits / 8;
3535 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3537 APInt Val(NumVTBits, 0);
3538 if (TLI.isLittleEndian()) {
3539 for (unsigned i = 0; i != NumBytes; ++i)
3540 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3542 for (unsigned i = 0; i != NumBytes; ++i)
3543 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3546 // If the "cost" of materializing the integer immediate is 1 or free, then
3547 // it is cost effective to turn the load into the immediate.
3548 const TargetTransformInfo *TTI = DAG.getTargetTransformInfo();
3549 if (TTI->getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2)
3550 return DAG.getConstant(Val, VT);
3551 return SDValue(0, 0);
3554 /// getMemBasePlusOffset - Returns base and offset node for the
3556 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3557 SelectionDAG &DAG) {
3558 EVT VT = Base.getValueType();
3559 return DAG.getNode(ISD::ADD, dl,
3560 VT, Base, DAG.getConstant(Offset, VT));
3563 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3565 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3566 unsigned SrcDelta = 0;
3567 GlobalAddressSDNode *G = NULL;
3568 if (Src.getOpcode() == ISD::GlobalAddress)
3569 G = cast<GlobalAddressSDNode>(Src);
3570 else if (Src.getOpcode() == ISD::ADD &&
3571 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3572 Src.getOperand(1).getOpcode() == ISD::Constant) {
3573 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3574 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3579 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3582 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3583 /// to replace the memset / memcpy. Return true if the number of memory ops
3584 /// is below the threshold. It returns the types of the sequence of
3585 /// memory ops to perform memset / memcpy by reference.
3586 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3587 unsigned Limit, uint64_t Size,
3588 unsigned DstAlign, unsigned SrcAlign,
3594 const TargetLowering &TLI) {
3595 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3596 "Expecting memcpy / memset source to meet alignment requirement!");
3597 // If 'SrcAlign' is zero, that means the memory operation does not need to
3598 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3599 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3600 // is the specified alignment of the memory operation. If it is zero, that
3601 // means it's possible to change the alignment of the destination.
3602 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3603 // not need to be loaded.
3604 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3605 IsMemset, ZeroMemset, MemcpyStrSrc,
3606 DAG.getMachineFunction());
3608 if (VT == MVT::Other) {
3609 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
3610 TLI.allowsUnalignedMemoryAccesses(VT)) {
3611 VT = TLI.getPointerTy();
3613 switch (DstAlign & 7) {
3614 case 0: VT = MVT::i64; break;
3615 case 4: VT = MVT::i32; break;
3616 case 2: VT = MVT::i16; break;
3617 default: VT = MVT::i8; break;
3622 while (!TLI.isTypeLegal(LVT))
3623 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3624 assert(LVT.isInteger());
3630 unsigned NumMemOps = 0;
3632 unsigned VTSize = VT.getSizeInBits() / 8;
3633 while (VTSize > Size) {
3634 // For now, only use non-vector load / store's for the left-over pieces.
3639 if (VT.isVector() || VT.isFloatingPoint()) {
3640 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3641 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3642 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3644 else if (NewVT == MVT::i64 &&
3645 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3646 TLI.isSafeMemOpType(MVT::f64)) {
3647 // i64 is usually not legal on 32-bit targets, but f64 may be.
3655 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3656 if (NewVT == MVT::i8)
3658 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3660 NewVTSize = NewVT.getSizeInBits() / 8;
3662 // If the new VT cannot cover all of the remaining bits, then consider
3663 // issuing a (or a pair of) unaligned and overlapping load / store.
3664 // FIXME: Only does this for 64-bit or more since we don't have proper
3665 // cost model for unaligned load / store.
3667 if (NumMemOps && AllowOverlap &&
3668 VTSize >= 8 && NewVTSize < Size &&
3669 TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
3677 if (++NumMemOps > Limit)
3680 MemOps.push_back(VT);
3687 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3688 SDValue Chain, SDValue Dst,
3689 SDValue Src, uint64_t Size,
3690 unsigned Align, bool isVol,
3692 MachinePointerInfo DstPtrInfo,
3693 MachinePointerInfo SrcPtrInfo) {
3694 // Turn a memcpy of undef to nop.
3695 if (Src.getOpcode() == ISD::UNDEF)
3698 // Expand memcpy to a series of load and store ops if the size operand falls
3699 // below a certain threshold.
3700 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3701 // rather than maybe a humongous number of loads and stores.
3702 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3703 std::vector<EVT> MemOps;
3704 bool DstAlignCanChange = false;
3705 MachineFunction &MF = DAG.getMachineFunction();
3706 MachineFrameInfo *MFI = MF.getFrameInfo();
3708 MF.getFunction()->getAttributes().
3709 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3710 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3711 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3712 DstAlignCanChange = true;
3713 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3714 if (Align > SrcAlign)
3717 bool CopyFromStr = isMemSrcFromString(Src, Str);
3718 bool isZeroStr = CopyFromStr && Str.empty();
3719 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3721 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3722 (DstAlignCanChange ? 0 : Align),
3723 (isZeroStr ? 0 : SrcAlign),
3724 false, false, CopyFromStr, true, DAG, TLI))
3727 if (DstAlignCanChange) {
3728 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3729 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3731 // Don't promote to an alignment that would require dynamic stack
3733 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3734 if (!TRI->needsStackRealignment(MF))
3735 while (NewAlign > Align &&
3736 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3739 if (NewAlign > Align) {
3740 // Give the stack frame object a larger alignment if needed.
3741 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3742 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3747 SmallVector<SDValue, 8> OutChains;
3748 unsigned NumMemOps = MemOps.size();
3749 uint64_t SrcOff = 0, DstOff = 0;
3750 for (unsigned i = 0; i != NumMemOps; ++i) {
3752 unsigned VTSize = VT.getSizeInBits() / 8;
3753 SDValue Value, Store;
3755 if (VTSize > Size) {
3756 // Issuing an unaligned load / store pair that overlaps with the previous
3757 // pair. Adjust the offset accordingly.
3758 assert(i == NumMemOps-1 && i != 0);
3759 SrcOff -= VTSize - Size;
3760 DstOff -= VTSize - Size;
3764 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3765 // It's unlikely a store of a vector immediate can be done in a single
3766 // instruction. It would require a load from a constantpool first.
3767 // We only handle zero vectors here.
3768 // FIXME: Handle other cases where store of vector immediate is done in
3769 // a single instruction.
3770 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3771 if (Value.getNode())
3772 Store = DAG.getStore(Chain, dl, Value,
3773 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3774 DstPtrInfo.getWithOffset(DstOff), isVol,
3778 if (!Store.getNode()) {
3779 // The type might not be legal for the target. This should only happen
3780 // if the type is smaller than a legal type, as on PPC, so the right
3781 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3782 // to Load/Store if NVT==VT.
3783 // FIXME does the case above also need this?
3784 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3785 assert(NVT.bitsGE(VT));
3786 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3787 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3788 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3789 MinAlign(SrcAlign, SrcOff));
3790 Store = DAG.getTruncStore(Chain, dl, Value,
3791 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3792 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3795 OutChains.push_back(Store);
3801 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3802 &OutChains[0], OutChains.size());
3805 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3806 SDValue Chain, SDValue Dst,
3807 SDValue Src, uint64_t Size,
3808 unsigned Align, bool isVol,
3810 MachinePointerInfo DstPtrInfo,
3811 MachinePointerInfo SrcPtrInfo) {
3812 // Turn a memmove of undef to nop.
3813 if (Src.getOpcode() == ISD::UNDEF)
3816 // Expand memmove to a series of load and store ops if the size operand falls
3817 // below a certain threshold.
3818 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3819 std::vector<EVT> MemOps;
3820 bool DstAlignCanChange = false;
3821 MachineFunction &MF = DAG.getMachineFunction();
3822 MachineFrameInfo *MFI = MF.getFrameInfo();
3823 bool OptSize = MF.getFunction()->getAttributes().
3824 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3825 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3826 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3827 DstAlignCanChange = true;
3828 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3829 if (Align > SrcAlign)
3831 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3833 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3834 (DstAlignCanChange ? 0 : Align), SrcAlign,
3835 false, false, false, false, DAG, TLI))
3838 if (DstAlignCanChange) {
3839 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3840 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3841 if (NewAlign > Align) {
3842 // Give the stack frame object a larger alignment if needed.
3843 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3844 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3849 uint64_t SrcOff = 0, DstOff = 0;
3850 SmallVector<SDValue, 8> LoadValues;
3851 SmallVector<SDValue, 8> LoadChains;
3852 SmallVector<SDValue, 8> OutChains;
3853 unsigned NumMemOps = MemOps.size();
3854 for (unsigned i = 0; i < NumMemOps; i++) {
3856 unsigned VTSize = VT.getSizeInBits() / 8;
3859 Value = DAG.getLoad(VT, dl, Chain,
3860 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3861 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3862 false, false, SrcAlign);
3863 LoadValues.push_back(Value);
3864 LoadChains.push_back(Value.getValue(1));
3867 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3868 &LoadChains[0], LoadChains.size());
3870 for (unsigned i = 0; i < NumMemOps; i++) {
3872 unsigned VTSize = VT.getSizeInBits() / 8;
3875 Store = DAG.getStore(Chain, dl, LoadValues[i],
3876 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3877 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3878 OutChains.push_back(Store);
3882 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3883 &OutChains[0], OutChains.size());
3886 /// \brief Lower the call to 'memset' intrinsic function into a series of store
3889 /// \param DAG Selection DAG where lowered code is placed.
3890 /// \param dl Link to corresponding IR location.
3891 /// \param Chain Control flow dependency.
3892 /// \param Dst Pointer to destination memory location.
3893 /// \param Src Value of byte to write into the memory.
3894 /// \param Size Number of bytes to write.
3895 /// \param Align Alignment of the destination in bytes.
3896 /// \param isVol True if destination is volatile.
3897 /// \param DstPtrInfo IR information on the memory pointer.
3898 /// \returns New head in the control flow, if lowering was successful, empty
3899 /// SDValue otherwise.
3901 /// The function tries to replace 'llvm.memset' intrinsic with several store
3902 /// operations and value calculation code. This is usually profitable for small
3904 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
3905 SDValue Chain, SDValue Dst,
3906 SDValue Src, uint64_t Size,
3907 unsigned Align, bool isVol,
3908 MachinePointerInfo DstPtrInfo) {
3909 // Turn a memset of undef to nop.
3910 if (Src.getOpcode() == ISD::UNDEF)
3913 // Expand memset to a series of load/store ops if the size operand
3914 // falls below a certain threshold.
3915 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3916 std::vector<EVT> MemOps;
3917 bool DstAlignCanChange = false;
3918 MachineFunction &MF = DAG.getMachineFunction();
3919 MachineFrameInfo *MFI = MF.getFrameInfo();
3920 bool OptSize = MF.getFunction()->getAttributes().
3921 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3922 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3923 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3924 DstAlignCanChange = true;
3926 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3927 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3928 Size, (DstAlignCanChange ? 0 : Align), 0,
3929 true, IsZeroVal, false, true, DAG, TLI))
3932 if (DstAlignCanChange) {
3933 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3934 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3935 if (NewAlign > Align) {
3936 // Give the stack frame object a larger alignment if needed.
3937 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3938 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3943 SmallVector<SDValue, 8> OutChains;
3944 uint64_t DstOff = 0;
3945 unsigned NumMemOps = MemOps.size();
3947 // Find the largest store and generate the bit pattern for it.
3948 EVT LargestVT = MemOps[0];
3949 for (unsigned i = 1; i < NumMemOps; i++)
3950 if (MemOps[i].bitsGT(LargestVT))
3951 LargestVT = MemOps[i];
3952 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
3954 for (unsigned i = 0; i < NumMemOps; i++) {
3956 unsigned VTSize = VT.getSizeInBits() / 8;
3957 if (VTSize > Size) {
3958 // Issuing an unaligned load / store pair that overlaps with the previous
3959 // pair. Adjust the offset accordingly.
3960 assert(i == NumMemOps-1 && i != 0);
3961 DstOff -= VTSize - Size;
3964 // If this store is smaller than the largest store see whether we can get
3965 // the smaller value for free with a truncate.
3966 SDValue Value = MemSetValue;
3967 if (VT.bitsLT(LargestVT)) {
3968 if (!LargestVT.isVector() && !VT.isVector() &&
3969 TLI.isTruncateFree(LargestVT, VT))
3970 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
3972 Value = getMemsetValue(Src, VT, DAG, dl);
3974 assert(Value.getValueType() == VT && "Value with wrong type.");
3975 SDValue Store = DAG.getStore(Chain, dl, Value,
3976 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3977 DstPtrInfo.getWithOffset(DstOff),
3978 isVol, false, Align);
3979 OutChains.push_back(Store);
3980 DstOff += VT.getSizeInBits() / 8;
3984 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3985 &OutChains[0], OutChains.size());
3988 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
3989 SDValue Src, SDValue Size,
3990 unsigned Align, bool isVol, bool AlwaysInline,
3991 MachinePointerInfo DstPtrInfo,
3992 MachinePointerInfo SrcPtrInfo) {
3993 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
3995 // Check to see if we should lower the memcpy to loads and stores first.
3996 // For cases within the target-specified limits, this is the best choice.
3997 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3999 // Memcpy with size zero? Just return the original chain.
4000 if (ConstantSize->isNullValue())
4003 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4004 ConstantSize->getZExtValue(),Align,
4005 isVol, false, DstPtrInfo, SrcPtrInfo);
4006 if (Result.getNode())
4010 // Then check to see if we should lower the memcpy with target-specific
4011 // code. If the target chooses to do this, this is the next best.
4013 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
4014 isVol, AlwaysInline,
4015 DstPtrInfo, SrcPtrInfo);
4016 if (Result.getNode())
4019 // If we really need inline code and the target declined to provide it,
4020 // use a (potentially long) sequence of loads and stores.
4022 assert(ConstantSize && "AlwaysInline requires a constant size!");
4023 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4024 ConstantSize->getZExtValue(), Align, isVol,
4025 true, DstPtrInfo, SrcPtrInfo);
4028 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4029 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4030 // respect volatile, so they may do things like read or write memory
4031 // beyond the given memory regions. But fixing this isn't easy, and most
4032 // people don't care.
4034 const TargetLowering *TLI = TM.getTargetLowering();
4036 // Emit a library call.
4037 TargetLowering::ArgListTy Args;
4038 TargetLowering::ArgListEntry Entry;
4039 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4040 Entry.Node = Dst; Args.push_back(Entry);
4041 Entry.Node = Src; Args.push_back(Entry);
4042 Entry.Node = Size; Args.push_back(Entry);
4043 // FIXME: pass in SDLoc
4045 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4046 false, false, false, false, 0,
4047 TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4048 /*isTailCall=*/false,
4049 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4050 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4051 TLI->getPointerTy()),
4053 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4055 return CallResult.second;
4058 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4059 SDValue Src, SDValue Size,
4060 unsigned Align, bool isVol,
4061 MachinePointerInfo DstPtrInfo,
4062 MachinePointerInfo SrcPtrInfo) {
4063 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4065 // Check to see if we should lower the memmove to loads and stores first.
4066 // For cases within the target-specified limits, this is the best choice.
4067 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4069 // Memmove with size zero? Just return the original chain.
4070 if (ConstantSize->isNullValue())
4074 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4075 ConstantSize->getZExtValue(), Align, isVol,
4076 false, DstPtrInfo, SrcPtrInfo);
4077 if (Result.getNode())
4081 // Then check to see if we should lower the memmove with target-specific
4082 // code. If the target chooses to do this, this is the next best.
4084 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4085 DstPtrInfo, SrcPtrInfo);
4086 if (Result.getNode())
4089 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4090 // not be safe. See memcpy above for more details.
4092 const TargetLowering *TLI = TM.getTargetLowering();
4094 // Emit a library call.
4095 TargetLowering::ArgListTy Args;
4096 TargetLowering::ArgListEntry Entry;
4097 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4098 Entry.Node = Dst; Args.push_back(Entry);
4099 Entry.Node = Src; Args.push_back(Entry);
4100 Entry.Node = Size; Args.push_back(Entry);
4101 // FIXME: pass in SDLoc
4103 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4104 false, false, false, false, 0,
4105 TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4106 /*isTailCall=*/false,
4107 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4108 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4109 TLI->getPointerTy()),
4111 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4113 return CallResult.second;
4116 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4117 SDValue Src, SDValue Size,
4118 unsigned Align, bool isVol,
4119 MachinePointerInfo DstPtrInfo) {
4120 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4122 // Check to see if we should lower the memset to stores first.
4123 // For cases within the target-specified limits, this is the best choice.
4124 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4126 // Memset with size zero? Just return the original chain.
4127 if (ConstantSize->isNullValue())
4131 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4132 Align, isVol, DstPtrInfo);
4134 if (Result.getNode())
4138 // Then check to see if we should lower the memset with target-specific
4139 // code. If the target chooses to do this, this is the next best.
4141 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4143 if (Result.getNode())
4146 // Emit a library call.
4147 const TargetLowering *TLI = TM.getTargetLowering();
4148 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4149 TargetLowering::ArgListTy Args;
4150 TargetLowering::ArgListEntry Entry;
4151 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4152 Args.push_back(Entry);
4153 // Extend or truncate the argument to be an i32 value for the call.
4154 if (Src.getValueType().bitsGT(MVT::i32))
4155 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4157 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4159 Entry.Ty = Type::getInt32Ty(*getContext());
4160 Entry.isSExt = true;
4161 Args.push_back(Entry);
4163 Entry.Ty = IntPtrTy;
4164 Entry.isSExt = false;
4165 Args.push_back(Entry);
4166 // FIXME: pass in SDLoc
4168 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4169 false, false, false, false, 0,
4170 TLI->getLibcallCallingConv(RTLIB::MEMSET),
4171 /*isTailCall=*/false,
4172 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4173 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4174 TLI->getPointerTy()),
4176 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4178 return CallResult.second;
4181 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4182 SDVTList VTList, SDValue* Ops, unsigned NumOps,
4183 MachineMemOperand *MMO,
4184 AtomicOrdering Ordering,
4185 SynchronizationScope SynchScope) {
4186 FoldingSetNodeID ID;
4187 ID.AddInteger(MemVT.getRawBits());
4188 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4189 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4191 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4192 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4193 return SDValue(E, 0);
4196 // Allocate the operands array for the node out of the BumpPtrAllocator, since
4197 // SDNode doesn't have access to it. This memory will be "leaked" when
4198 // the node is deallocated, but recovered when the allocator is released.
4199 // If the number of operands is less than 5 we use AtomicSDNode's internal
4201 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : 0;
4203 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4204 dl.getDebugLoc(), VTList, MemVT,
4205 Ops, DynOps, NumOps, MMO,
4206 Ordering, SynchScope);
4207 CSEMap.InsertNode(N, IP);
4208 AllNodes.push_back(N);
4209 return SDValue(N, 0);
4212 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4213 SDValue Chain, SDValue Ptr, SDValue Cmp,
4214 SDValue Swp, MachinePointerInfo PtrInfo,
4216 AtomicOrdering Ordering,
4217 SynchronizationScope SynchScope) {
4218 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4219 Alignment = getEVTAlignment(MemVT);
4221 MachineFunction &MF = getMachineFunction();
4223 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4224 // For now, atomics are considered to be volatile always.
4225 // FIXME: Volatile isn't really correct; we should keep track of atomic
4226 // orderings in the memoperand.
4227 unsigned Flags = MachineMemOperand::MOVolatile;
4228 if (Opcode != ISD::ATOMIC_STORE)
4229 Flags |= MachineMemOperand::MOLoad;
4230 if (Opcode != ISD::ATOMIC_LOAD)
4231 Flags |= MachineMemOperand::MOStore;
4233 MachineMemOperand *MMO =
4234 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4236 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4237 Ordering, SynchScope);
4240 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4242 SDValue Ptr, SDValue Cmp,
4243 SDValue Swp, MachineMemOperand *MMO,
4244 AtomicOrdering Ordering,
4245 SynchronizationScope SynchScope) {
4246 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4247 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4249 EVT VT = Cmp.getValueType();
4251 SDVTList VTs = getVTList(VT, MVT::Other);
4252 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4253 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, Ordering, SynchScope);
4256 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4258 SDValue Ptr, SDValue Val,
4259 const Value* PtrVal,
4261 AtomicOrdering Ordering,
4262 SynchronizationScope SynchScope) {
4263 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4264 Alignment = getEVTAlignment(MemVT);
4266 MachineFunction &MF = getMachineFunction();
4267 // An atomic store does not load. An atomic load does not store.
4268 // (An atomicrmw obviously both loads and stores.)
4269 // For now, atomics are considered to be volatile always, and they are
4271 // FIXME: Volatile isn't really correct; we should keep track of atomic
4272 // orderings in the memoperand.
4273 unsigned Flags = MachineMemOperand::MOVolatile;
4274 if (Opcode != ISD::ATOMIC_STORE)
4275 Flags |= MachineMemOperand::MOLoad;
4276 if (Opcode != ISD::ATOMIC_LOAD)
4277 Flags |= MachineMemOperand::MOStore;
4279 MachineMemOperand *MMO =
4280 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4281 MemVT.getStoreSize(), Alignment);
4283 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4284 Ordering, SynchScope);
4287 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4289 SDValue Ptr, SDValue Val,
4290 MachineMemOperand *MMO,
4291 AtomicOrdering Ordering,
4292 SynchronizationScope SynchScope) {
4293 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4294 Opcode == ISD::ATOMIC_LOAD_SUB ||
4295 Opcode == ISD::ATOMIC_LOAD_AND ||
4296 Opcode == ISD::ATOMIC_LOAD_OR ||
4297 Opcode == ISD::ATOMIC_LOAD_XOR ||
4298 Opcode == ISD::ATOMIC_LOAD_NAND ||
4299 Opcode == ISD::ATOMIC_LOAD_MIN ||
4300 Opcode == ISD::ATOMIC_LOAD_MAX ||
4301 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4302 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4303 Opcode == ISD::ATOMIC_SWAP ||
4304 Opcode == ISD::ATOMIC_STORE) &&
4305 "Invalid Atomic Op");
4307 EVT VT = Val.getValueType();
4309 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4310 getVTList(VT, MVT::Other);
4311 SDValue Ops[] = {Chain, Ptr, Val};
4312 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope);
4315 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4316 EVT VT, SDValue Chain,
4318 const Value* PtrVal,
4320 AtomicOrdering Ordering,
4321 SynchronizationScope SynchScope) {
4322 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4323 Alignment = getEVTAlignment(MemVT);
4325 MachineFunction &MF = getMachineFunction();
4326 // An atomic store does not load. An atomic load does not store.
4327 // (An atomicrmw obviously both loads and stores.)
4328 // For now, atomics are considered to be volatile always, and they are
4330 // FIXME: Volatile isn't really correct; we should keep track of atomic
4331 // orderings in the memoperand.
4332 unsigned Flags = MachineMemOperand::MOVolatile;
4333 if (Opcode != ISD::ATOMIC_STORE)
4334 Flags |= MachineMemOperand::MOLoad;
4335 if (Opcode != ISD::ATOMIC_LOAD)
4336 Flags |= MachineMemOperand::MOStore;
4338 MachineMemOperand *MMO =
4339 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4340 MemVT.getStoreSize(), Alignment);
4342 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
4343 Ordering, SynchScope);
4346 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4347 EVT VT, SDValue Chain,
4349 MachineMemOperand *MMO,
4350 AtomicOrdering Ordering,
4351 SynchronizationScope SynchScope) {
4352 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4354 SDVTList VTs = getVTList(VT, MVT::Other);
4355 SDValue Ops[] = {Chain, Ptr};
4356 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope);
4359 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4360 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
4365 SmallVector<EVT, 4> VTs;
4366 VTs.reserve(NumOps);
4367 for (unsigned i = 0; i < NumOps; ++i)
4368 VTs.push_back(Ops[i].getValueType());
4369 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
4374 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl,
4375 const EVT *VTs, unsigned NumVTs,
4376 const SDValue *Ops, unsigned NumOps,
4377 EVT MemVT, MachinePointerInfo PtrInfo,
4378 unsigned Align, bool Vol,
4379 bool ReadMem, bool WriteMem) {
4380 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
4381 MemVT, PtrInfo, Align, Vol,
4386 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4387 const SDValue *Ops, unsigned NumOps,
4388 EVT MemVT, MachinePointerInfo PtrInfo,
4389 unsigned Align, bool Vol,
4390 bool ReadMem, bool WriteMem) {
4391 if (Align == 0) // Ensure that codegen never sees alignment 0
4392 Align = getEVTAlignment(MemVT);
4394 MachineFunction &MF = getMachineFunction();
4397 Flags |= MachineMemOperand::MOStore;
4399 Flags |= MachineMemOperand::MOLoad;
4401 Flags |= MachineMemOperand::MOVolatile;
4402 MachineMemOperand *MMO =
4403 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4405 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
4409 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4410 const SDValue *Ops, unsigned NumOps,
4411 EVT MemVT, MachineMemOperand *MMO) {
4412 assert((Opcode == ISD::INTRINSIC_VOID ||
4413 Opcode == ISD::INTRINSIC_W_CHAIN ||
4414 Opcode == ISD::PREFETCH ||
4415 Opcode == ISD::LIFETIME_START ||
4416 Opcode == ISD::LIFETIME_END ||
4417 (Opcode <= INT_MAX &&
4418 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4419 "Opcode is not a memory-accessing opcode!");
4421 // Memoize the node unless it returns a flag.
4422 MemIntrinsicSDNode *N;
4423 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4424 FoldingSetNodeID ID;
4425 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4426 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4428 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4429 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4430 return SDValue(E, 0);
4433 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4434 dl.getDebugLoc(), VTList, Ops,
4435 NumOps, MemVT, MMO);
4436 CSEMap.InsertNode(N, IP);
4438 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4439 dl.getDebugLoc(), VTList, Ops,
4440 NumOps, MemVT, MMO);
4442 AllNodes.push_back(N);
4443 return SDValue(N, 0);
4446 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4447 /// MachinePointerInfo record from it. This is particularly useful because the
4448 /// code generator has many cases where it doesn't bother passing in a
4449 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4450 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4451 // If this is FI+Offset, we can model it.
4452 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4453 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4455 // If this is (FI+Offset1)+Offset2, we can model it.
4456 if (Ptr.getOpcode() != ISD::ADD ||
4457 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4458 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4459 return MachinePointerInfo();
4461 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4462 return MachinePointerInfo::getFixedStack(FI, Offset+
4463 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4466 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4467 /// MachinePointerInfo record from it. This is particularly useful because the
4468 /// code generator has many cases where it doesn't bother passing in a
4469 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4470 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4471 // If the 'Offset' value isn't a constant, we can't handle this.
4472 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4473 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4474 if (OffsetOp.getOpcode() == ISD::UNDEF)
4475 return InferPointerInfo(Ptr);
4476 return MachinePointerInfo();
4481 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4482 EVT VT, SDLoc dl, SDValue Chain,
4483 SDValue Ptr, SDValue Offset,
4484 MachinePointerInfo PtrInfo, EVT MemVT,
4485 bool isVolatile, bool isNonTemporal, bool isInvariant,
4486 unsigned Alignment, const MDNode *TBAAInfo,
4487 const MDNode *Ranges) {
4488 assert(Chain.getValueType() == MVT::Other &&
4489 "Invalid chain type");
4490 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4491 Alignment = getEVTAlignment(VT);
4493 unsigned Flags = MachineMemOperand::MOLoad;
4495 Flags |= MachineMemOperand::MOVolatile;
4497 Flags |= MachineMemOperand::MONonTemporal;
4499 Flags |= MachineMemOperand::MOInvariant;
4501 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4504 PtrInfo = InferPointerInfo(Ptr, Offset);
4506 MachineFunction &MF = getMachineFunction();
4507 MachineMemOperand *MMO =
4508 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4510 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4514 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4515 EVT VT, SDLoc dl, SDValue Chain,
4516 SDValue Ptr, SDValue Offset, EVT MemVT,
4517 MachineMemOperand *MMO) {
4519 ExtType = ISD::NON_EXTLOAD;
4520 } else if (ExtType == ISD::NON_EXTLOAD) {
4521 assert(VT == MemVT && "Non-extending load from different memory type!");
4524 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4525 "Should only be an extending load, not truncating!");
4526 assert(VT.isInteger() == MemVT.isInteger() &&
4527 "Cannot convert from FP to Int or Int -> FP!");
4528 assert(VT.isVector() == MemVT.isVector() &&
4529 "Cannot use trunc store to convert to or from a vector!");
4530 assert((!VT.isVector() ||
4531 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4532 "Cannot use trunc store to change the number of vector elements!");
4535 bool Indexed = AM != ISD::UNINDEXED;
4536 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4537 "Unindexed load with an offset!");
4539 SDVTList VTs = Indexed ?
4540 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4541 SDValue Ops[] = { Chain, Ptr, Offset };
4542 FoldingSetNodeID ID;
4543 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4544 ID.AddInteger(MemVT.getRawBits());
4545 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4546 MMO->isNonTemporal(),
4547 MMO->isInvariant()));
4548 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4550 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4551 cast<LoadSDNode>(E)->refineAlignment(MMO);
4552 return SDValue(E, 0);
4554 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4555 dl.getDebugLoc(), VTs, AM, ExtType,
4557 CSEMap.InsertNode(N, IP);
4558 AllNodes.push_back(N);
4559 return SDValue(N, 0);
4562 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4563 SDValue Chain, SDValue Ptr,
4564 MachinePointerInfo PtrInfo,
4565 bool isVolatile, bool isNonTemporal,
4566 bool isInvariant, unsigned Alignment,
4567 const MDNode *TBAAInfo,
4568 const MDNode *Ranges) {
4569 SDValue Undef = getUNDEF(Ptr.getValueType());
4570 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4571 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4575 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4576 SDValue Chain, SDValue Ptr,
4577 MachineMemOperand *MMO) {
4578 SDValue Undef = getUNDEF(Ptr.getValueType());
4579 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4583 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4584 SDValue Chain, SDValue Ptr,
4585 MachinePointerInfo PtrInfo, EVT MemVT,
4586 bool isVolatile, bool isNonTemporal,
4587 unsigned Alignment, const MDNode *TBAAInfo) {
4588 SDValue Undef = getUNDEF(Ptr.getValueType());
4589 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4590 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4595 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4596 SDValue Chain, SDValue Ptr, EVT MemVT,
4597 MachineMemOperand *MMO) {
4598 SDValue Undef = getUNDEF(Ptr.getValueType());
4599 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4604 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4605 SDValue Offset, ISD::MemIndexedMode AM) {
4606 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4607 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4608 "Load is already a indexed load!");
4609 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4610 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4611 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4612 false, LD->getAlignment());
4615 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4616 SDValue Ptr, MachinePointerInfo PtrInfo,
4617 bool isVolatile, bool isNonTemporal,
4618 unsigned Alignment, const MDNode *TBAAInfo) {
4619 assert(Chain.getValueType() == MVT::Other &&
4620 "Invalid chain type");
4621 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4622 Alignment = getEVTAlignment(Val.getValueType());
4624 unsigned Flags = MachineMemOperand::MOStore;
4626 Flags |= MachineMemOperand::MOVolatile;
4628 Flags |= MachineMemOperand::MONonTemporal;
4631 PtrInfo = InferPointerInfo(Ptr);
4633 MachineFunction &MF = getMachineFunction();
4634 MachineMemOperand *MMO =
4635 MF.getMachineMemOperand(PtrInfo, Flags,
4636 Val.getValueType().getStoreSize(), Alignment,
4639 return getStore(Chain, dl, Val, Ptr, MMO);
4642 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4643 SDValue Ptr, MachineMemOperand *MMO) {
4644 assert(Chain.getValueType() == MVT::Other &&
4645 "Invalid chain type");
4646 EVT VT = Val.getValueType();
4647 SDVTList VTs = getVTList(MVT::Other);
4648 SDValue Undef = getUNDEF(Ptr.getValueType());
4649 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4650 FoldingSetNodeID ID;
4651 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4652 ID.AddInteger(VT.getRawBits());
4653 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4654 MMO->isNonTemporal(), MMO->isInvariant()));
4655 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4657 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4658 cast<StoreSDNode>(E)->refineAlignment(MMO);
4659 return SDValue(E, 0);
4661 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4662 dl.getDebugLoc(), VTs,
4663 ISD::UNINDEXED, false, VT, MMO);
4664 CSEMap.InsertNode(N, IP);
4665 AllNodes.push_back(N);
4666 return SDValue(N, 0);
4669 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4670 SDValue Ptr, MachinePointerInfo PtrInfo,
4671 EVT SVT,bool isVolatile, bool isNonTemporal,
4673 const MDNode *TBAAInfo) {
4674 assert(Chain.getValueType() == MVT::Other &&
4675 "Invalid chain type");
4676 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4677 Alignment = getEVTAlignment(SVT);
4679 unsigned Flags = MachineMemOperand::MOStore;
4681 Flags |= MachineMemOperand::MOVolatile;
4683 Flags |= MachineMemOperand::MONonTemporal;
4686 PtrInfo = InferPointerInfo(Ptr);
4688 MachineFunction &MF = getMachineFunction();
4689 MachineMemOperand *MMO =
4690 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4693 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4696 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4697 SDValue Ptr, EVT SVT,
4698 MachineMemOperand *MMO) {
4699 EVT VT = Val.getValueType();
4701 assert(Chain.getValueType() == MVT::Other &&
4702 "Invalid chain type");
4704 return getStore(Chain, dl, Val, Ptr, MMO);
4706 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4707 "Should only be a truncating store, not extending!");
4708 assert(VT.isInteger() == SVT.isInteger() &&
4709 "Can't do FP-INT conversion!");
4710 assert(VT.isVector() == SVT.isVector() &&
4711 "Cannot use trunc store to convert to or from a vector!");
4712 assert((!VT.isVector() ||
4713 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4714 "Cannot use trunc store to change the number of vector elements!");
4716 SDVTList VTs = getVTList(MVT::Other);
4717 SDValue Undef = getUNDEF(Ptr.getValueType());
4718 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4719 FoldingSetNodeID ID;
4720 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4721 ID.AddInteger(SVT.getRawBits());
4722 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4723 MMO->isNonTemporal(), MMO->isInvariant()));
4724 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4726 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4727 cast<StoreSDNode>(E)->refineAlignment(MMO);
4728 return SDValue(E, 0);
4730 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4731 dl.getDebugLoc(), VTs,
4732 ISD::UNINDEXED, true, SVT, MMO);
4733 CSEMap.InsertNode(N, IP);
4734 AllNodes.push_back(N);
4735 return SDValue(N, 0);
4739 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4740 SDValue Offset, ISD::MemIndexedMode AM) {
4741 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4742 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4743 "Store is already a indexed store!");
4744 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4745 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4746 FoldingSetNodeID ID;
4747 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4748 ID.AddInteger(ST->getMemoryVT().getRawBits());
4749 ID.AddInteger(ST->getRawSubclassData());
4750 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4752 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4753 return SDValue(E, 0);
4755 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4756 dl.getDebugLoc(), VTs, AM,
4757 ST->isTruncatingStore(),
4759 ST->getMemOperand());
4760 CSEMap.InsertNode(N, IP);
4761 AllNodes.push_back(N);
4762 return SDValue(N, 0);
4765 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4766 SDValue Chain, SDValue Ptr,
4769 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4770 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
4773 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4774 const SDUse *Ops, unsigned NumOps) {
4776 case 0: return getNode(Opcode, DL, VT);
4777 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4778 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4779 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4783 // Copy from an SDUse array into an SDValue array for use with
4784 // the regular getNode logic.
4785 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4786 return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
4789 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4790 const SDValue *Ops, unsigned NumOps) {
4792 case 0: return getNode(Opcode, DL, VT);
4793 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4794 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4795 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4801 case ISD::SELECT_CC: {
4802 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4803 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4804 "LHS and RHS of condition must have same type!");
4805 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4806 "True and False arms of SelectCC must have same type!");
4807 assert(Ops[2].getValueType() == VT &&
4808 "select_cc node must be of same type as true and false value!");
4812 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4813 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4814 "LHS/RHS of comparison should match types!");
4821 SDVTList VTs = getVTList(VT);
4823 if (VT != MVT::Glue) {
4824 FoldingSetNodeID ID;
4825 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
4828 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4829 return SDValue(E, 0);
4831 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4833 CSEMap.InsertNode(N, IP);
4835 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4839 AllNodes.push_back(N);
4843 return SDValue(N, 0);
4846 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4847 ArrayRef<EVT> ResultTys,
4848 const SDValue *Ops, unsigned NumOps) {
4849 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
4853 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4854 const EVT *VTs, unsigned NumVTs,
4855 const SDValue *Ops, unsigned NumOps) {
4857 return getNode(Opcode, DL, VTs[0], Ops, NumOps);
4858 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
4861 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4862 const SDValue *Ops, unsigned NumOps) {
4863 if (VTList.NumVTs == 1)
4864 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
4868 // FIXME: figure out how to safely handle things like
4869 // int foo(int x) { return 1 << (x & 255); }
4870 // int bar() { return foo(256); }
4871 case ISD::SRA_PARTS:
4872 case ISD::SRL_PARTS:
4873 case ISD::SHL_PARTS:
4874 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4875 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4876 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4877 else if (N3.getOpcode() == ISD::AND)
4878 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4879 // If the and is only masking out bits that cannot effect the shift,
4880 // eliminate the and.
4881 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4882 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4883 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4889 // Memoize the node unless it returns a flag.
4891 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4892 FoldingSetNodeID ID;
4893 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4895 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4896 return SDValue(E, 0);
4899 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4900 DL.getDebugLoc(), VTList, Ops[0]);
4901 } else if (NumOps == 2) {
4902 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4903 DL.getDebugLoc(), VTList, Ops[0],
4905 } else if (NumOps == 3) {
4906 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4907 DL.getDebugLoc(), VTList, Ops[0],
4910 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4911 VTList, Ops, NumOps);
4913 CSEMap.InsertNode(N, IP);
4916 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4917 DL.getDebugLoc(), VTList, Ops[0]);
4918 } else if (NumOps == 2) {
4919 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4920 DL.getDebugLoc(), VTList, Ops[0],
4922 } else if (NumOps == 3) {
4923 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4924 DL.getDebugLoc(), VTList, Ops[0],
4927 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4928 VTList, Ops, NumOps);
4931 AllNodes.push_back(N);
4935 return SDValue(N, 0);
4938 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
4939 return getNode(Opcode, DL, VTList, 0, 0);
4942 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4944 SDValue Ops[] = { N1 };
4945 return getNode(Opcode, DL, VTList, Ops, 1);
4948 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4949 SDValue N1, SDValue N2) {
4950 SDValue Ops[] = { N1, N2 };
4951 return getNode(Opcode, DL, VTList, Ops, 2);
4954 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4955 SDValue N1, SDValue N2, SDValue N3) {
4956 SDValue Ops[] = { N1, N2, N3 };
4957 return getNode(Opcode, DL, VTList, Ops, 3);
4960 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4961 SDValue N1, SDValue N2, SDValue N3,
4963 SDValue Ops[] = { N1, N2, N3, N4 };
4964 return getNode(Opcode, DL, VTList, Ops, 4);
4967 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4968 SDValue N1, SDValue N2, SDValue N3,
4969 SDValue N4, SDValue N5) {
4970 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4971 return getNode(Opcode, DL, VTList, Ops, 5);
4974 SDVTList SelectionDAG::getVTList(EVT VT) {
4975 return makeVTList(SDNode::getValueTypeList(VT), 1);
4978 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4979 FoldingSetNodeID ID;
4981 ID.AddInteger(VT1.getRawBits());
4982 ID.AddInteger(VT2.getRawBits());
4985 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
4986 if (Result == NULL) {
4987 EVT *Array = Allocator.Allocate<EVT>(2);
4990 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
4991 VTListMap.InsertNode(Result, IP);
4993 return Result->getSDVTList();
4996 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
4997 FoldingSetNodeID ID;
4999 ID.AddInteger(VT1.getRawBits());
5000 ID.AddInteger(VT2.getRawBits());
5001 ID.AddInteger(VT3.getRawBits());
5004 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5005 if (Result == NULL) {
5006 EVT *Array = Allocator.Allocate<EVT>(3);
5010 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5011 VTListMap.InsertNode(Result, IP);
5013 return Result->getSDVTList();
5016 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5017 FoldingSetNodeID ID;
5019 ID.AddInteger(VT1.getRawBits());
5020 ID.AddInteger(VT2.getRawBits());
5021 ID.AddInteger(VT3.getRawBits());
5022 ID.AddInteger(VT4.getRawBits());
5025 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5026 if (Result == NULL) {
5027 EVT *Array = Allocator.Allocate<EVT>(4);
5032 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5033 VTListMap.InsertNode(Result, IP);
5035 return Result->getSDVTList();
5038 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
5039 FoldingSetNodeID ID;
5040 ID.AddInteger(NumVTs);
5041 for (unsigned index = 0; index < NumVTs; index++) {
5042 ID.AddInteger(VTs[index].getRawBits());
5046 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5047 if (Result == NULL) {
5048 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5049 std::copy(VTs, VTs + NumVTs, Array);
5050 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5051 VTListMap.InsertNode(Result, IP);
5053 return Result->getSDVTList();
5057 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5058 /// specified operands. If the resultant node already exists in the DAG,
5059 /// this does not modify the specified node, instead it returns the node that
5060 /// already exists. If the resultant node does not exist in the DAG, the
5061 /// input node is returned. As a degenerate case, if you specify the same
5062 /// input operands as the node already has, the input node is returned.
5063 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5064 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5066 // Check to see if there is no change.
5067 if (Op == N->getOperand(0)) return N;
5069 // See if the modified node already exists.
5070 void *InsertPos = 0;
5071 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5074 // Nope it doesn't. Remove the node from its current place in the maps.
5076 if (!RemoveNodeFromCSEMaps(N))
5079 // Now we update the operands.
5080 N->OperandList[0].set(Op);
5082 // If this gets put into a CSE map, add it.
5083 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5087 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5088 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5090 // Check to see if there is no change.
5091 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5092 return N; // No operands changed, just return the input node.
5094 // See if the modified node already exists.
5095 void *InsertPos = 0;
5096 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5099 // Nope it doesn't. Remove the node from its current place in the maps.
5101 if (!RemoveNodeFromCSEMaps(N))
5104 // Now we update the operands.
5105 if (N->OperandList[0] != Op1)
5106 N->OperandList[0].set(Op1);
5107 if (N->OperandList[1] != Op2)
5108 N->OperandList[1].set(Op2);
5110 // If this gets put into a CSE map, add it.
5111 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5115 SDNode *SelectionDAG::
5116 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5117 SDValue Ops[] = { Op1, Op2, Op3 };
5118 return UpdateNodeOperands(N, Ops, 3);
5121 SDNode *SelectionDAG::
5122 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5123 SDValue Op3, SDValue Op4) {
5124 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5125 return UpdateNodeOperands(N, Ops, 4);
5128 SDNode *SelectionDAG::
5129 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5130 SDValue Op3, SDValue Op4, SDValue Op5) {
5131 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5132 return UpdateNodeOperands(N, Ops, 5);
5135 SDNode *SelectionDAG::
5136 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
5137 assert(N->getNumOperands() == NumOps &&
5138 "Update with wrong number of operands");
5140 // Check to see if there is no change.
5141 bool AnyChange = false;
5142 for (unsigned i = 0; i != NumOps; ++i) {
5143 if (Ops[i] != N->getOperand(i)) {
5149 // No operands changed, just return the input node.
5150 if (!AnyChange) return N;
5152 // See if the modified node already exists.
5153 void *InsertPos = 0;
5154 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
5157 // Nope it doesn't. Remove the node from its current place in the maps.
5159 if (!RemoveNodeFromCSEMaps(N))
5162 // Now we update the operands.
5163 for (unsigned i = 0; i != NumOps; ++i)
5164 if (N->OperandList[i] != Ops[i])
5165 N->OperandList[i].set(Ops[i]);
5167 // If this gets put into a CSE map, add it.
5168 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5172 /// DropOperands - Release the operands and set this node to have
5174 void SDNode::DropOperands() {
5175 // Unlike the code in MorphNodeTo that does this, we don't need to
5176 // watch for dead nodes here.
5177 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5183 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5186 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5188 SDVTList VTs = getVTList(VT);
5189 return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
5192 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5193 EVT VT, SDValue Op1) {
5194 SDVTList VTs = getVTList(VT);
5195 SDValue Ops[] = { Op1 };
5196 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5199 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5200 EVT VT, SDValue Op1,
5202 SDVTList VTs = getVTList(VT);
5203 SDValue Ops[] = { Op1, Op2 };
5204 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5207 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5208 EVT VT, SDValue Op1,
5209 SDValue Op2, SDValue Op3) {
5210 SDVTList VTs = getVTList(VT);
5211 SDValue Ops[] = { Op1, Op2, Op3 };
5212 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5215 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5216 EVT VT, const SDValue *Ops,
5218 SDVTList VTs = getVTList(VT);
5219 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5222 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5223 EVT VT1, EVT VT2, const SDValue *Ops,
5225 SDVTList VTs = getVTList(VT1, VT2);
5226 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5229 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5231 SDVTList VTs = getVTList(VT1, VT2);
5232 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
5235 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5236 EVT VT1, EVT VT2, EVT VT3,
5237 const SDValue *Ops, unsigned NumOps) {
5238 SDVTList VTs = getVTList(VT1, VT2, VT3);
5239 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5242 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5243 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5244 const SDValue *Ops, unsigned NumOps) {
5245 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5246 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5249 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5252 SDVTList VTs = getVTList(VT1, VT2);
5253 SDValue Ops[] = { Op1 };
5254 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5257 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5259 SDValue Op1, SDValue Op2) {
5260 SDVTList VTs = getVTList(VT1, VT2);
5261 SDValue Ops[] = { Op1, Op2 };
5262 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5265 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5267 SDValue Op1, SDValue Op2,
5269 SDVTList VTs = getVTList(VT1, VT2);
5270 SDValue Ops[] = { Op1, Op2, Op3 };
5271 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5274 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5275 EVT VT1, EVT VT2, EVT VT3,
5276 SDValue Op1, SDValue Op2,
5278 SDVTList VTs = getVTList(VT1, VT2, VT3);
5279 SDValue Ops[] = { Op1, Op2, Op3 };
5280 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5283 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5284 SDVTList VTs, const SDValue *Ops,
5286 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5287 // Reset the NodeID to -1.
5292 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5293 /// the line number information on the merged node since it is not possible to
5294 /// preserve the information that operation is associated with multiple lines.
5295 /// This will make the debugger working better at -O0, were there is a higher
5296 /// probability having other instructions associated with that line.
5298 /// For IROrder, we keep the smaller of the two
5299 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5300 DebugLoc NLoc = N->getDebugLoc();
5301 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5302 (OLoc.getDebugLoc() != NLoc)) {
5303 N->setDebugLoc(DebugLoc());
5305 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5306 N->setIROrder(Order);
5310 /// MorphNodeTo - This *mutates* the specified node to have the specified
5311 /// return type, opcode, and operands.
5313 /// Note that MorphNodeTo returns the resultant node. If there is already a
5314 /// node of the specified opcode and operands, it returns that node instead of
5315 /// the current one. Note that the SDLoc need not be the same.
5317 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5318 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5319 /// node, and because it doesn't require CSE recalculation for any of
5320 /// the node's users.
5322 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5323 SDVTList VTs, const SDValue *Ops,
5325 // If an identical node already exists, use it.
5327 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5328 FoldingSetNodeID ID;
5329 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5330 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5331 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5334 if (!RemoveNodeFromCSEMaps(N))
5337 // Start the morphing.
5339 N->ValueList = VTs.VTs;
5340 N->NumValues = VTs.NumVTs;
5342 // Clear the operands list, updating used nodes to remove this from their
5343 // use list. Keep track of any operands that become dead as a result.
5344 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5345 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5347 SDNode *Used = Use.getNode();
5349 if (Used->use_empty())
5350 DeadNodeSet.insert(Used);
5353 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5354 // Initialize the memory references information.
5355 MN->setMemRefs(0, 0);
5356 // If NumOps is larger than the # of operands we can have in a
5357 // MachineSDNode, reallocate the operand list.
5358 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5359 if (MN->OperandsNeedDelete)
5360 delete[] MN->OperandList;
5361 if (NumOps > array_lengthof(MN->LocalOperands))
5362 // We're creating a final node that will live unmorphed for the
5363 // remainder of the current SelectionDAG iteration, so we can allocate
5364 // the operands directly out of a pool with no recycling metadata.
5365 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5368 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5369 MN->OperandsNeedDelete = false;
5371 MN->InitOperands(MN->OperandList, Ops, NumOps);
5373 // If NumOps is larger than the # of operands we currently have, reallocate
5374 // the operand list.
5375 if (NumOps > N->NumOperands) {
5376 if (N->OperandsNeedDelete)
5377 delete[] N->OperandList;
5378 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5379 N->OperandsNeedDelete = true;
5381 N->InitOperands(N->OperandList, Ops, NumOps);
5384 // Delete any nodes that are still dead after adding the uses for the
5386 if (!DeadNodeSet.empty()) {
5387 SmallVector<SDNode *, 16> DeadNodes;
5388 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5389 E = DeadNodeSet.end(); I != E; ++I)
5390 if ((*I)->use_empty())
5391 DeadNodes.push_back(*I);
5392 RemoveDeadNodes(DeadNodes);
5396 CSEMap.InsertNode(N, IP); // Memoize the new node.
5401 /// getMachineNode - These are used for target selectors to create a new node
5402 /// with specified return type(s), MachineInstr opcode, and operands.
5404 /// Note that getMachineNode returns the resultant node. If there is already a
5405 /// node of the specified opcode and operands, it returns that node instead of
5406 /// the current one.
5408 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5409 SDVTList VTs = getVTList(VT);
5410 return getMachineNode(Opcode, dl, VTs, None);
5414 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5415 SDVTList VTs = getVTList(VT);
5416 SDValue Ops[] = { Op1 };
5417 return getMachineNode(Opcode, dl, VTs, Ops);
5421 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5422 SDValue Op1, SDValue Op2) {
5423 SDVTList VTs = getVTList(VT);
5424 SDValue Ops[] = { Op1, Op2 };
5425 return getMachineNode(Opcode, dl, VTs, Ops);
5429 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5430 SDValue Op1, SDValue Op2, SDValue Op3) {
5431 SDVTList VTs = getVTList(VT);
5432 SDValue Ops[] = { Op1, Op2, Op3 };
5433 return getMachineNode(Opcode, dl, VTs, Ops);
5437 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5438 ArrayRef<SDValue> Ops) {
5439 SDVTList VTs = getVTList(VT);
5440 return getMachineNode(Opcode, dl, VTs, Ops);
5444 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5445 SDVTList VTs = getVTList(VT1, VT2);
5446 return getMachineNode(Opcode, dl, VTs, None);
5450 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5451 EVT VT1, EVT VT2, SDValue Op1) {
5452 SDVTList VTs = getVTList(VT1, VT2);
5453 SDValue Ops[] = { Op1 };
5454 return getMachineNode(Opcode, dl, VTs, Ops);
5458 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5459 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5460 SDVTList VTs = getVTList(VT1, VT2);
5461 SDValue Ops[] = { Op1, Op2 };
5462 return getMachineNode(Opcode, dl, VTs, Ops);
5466 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5467 EVT VT1, EVT VT2, SDValue Op1,
5468 SDValue Op2, SDValue Op3) {
5469 SDVTList VTs = getVTList(VT1, VT2);
5470 SDValue Ops[] = { Op1, Op2, Op3 };
5471 return getMachineNode(Opcode, dl, VTs, Ops);
5475 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5477 ArrayRef<SDValue> Ops) {
5478 SDVTList VTs = getVTList(VT1, VT2);
5479 return getMachineNode(Opcode, dl, VTs, Ops);
5483 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5484 EVT VT1, EVT VT2, EVT VT3,
5485 SDValue Op1, SDValue Op2) {
5486 SDVTList VTs = getVTList(VT1, VT2, VT3);
5487 SDValue Ops[] = { Op1, Op2 };
5488 return getMachineNode(Opcode, dl, VTs, Ops);
5492 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5493 EVT VT1, EVT VT2, EVT VT3,
5494 SDValue Op1, SDValue Op2, SDValue Op3) {
5495 SDVTList VTs = getVTList(VT1, VT2, VT3);
5496 SDValue Ops[] = { Op1, Op2, Op3 };
5497 return getMachineNode(Opcode, dl, VTs, Ops);
5501 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5502 EVT VT1, EVT VT2, EVT VT3,
5503 ArrayRef<SDValue> Ops) {
5504 SDVTList VTs = getVTList(VT1, VT2, VT3);
5505 return getMachineNode(Opcode, dl, VTs, Ops);
5509 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5510 EVT VT2, EVT VT3, EVT VT4,
5511 ArrayRef<SDValue> Ops) {
5512 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5513 return getMachineNode(Opcode, dl, VTs, Ops);
5517 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5518 ArrayRef<EVT> ResultTys,
5519 ArrayRef<SDValue> Ops) {
5520 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
5521 return getMachineNode(Opcode, dl, VTs, Ops);
5525 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5526 ArrayRef<SDValue> OpsArray) {
5527 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5530 const SDValue *Ops = OpsArray.data();
5531 unsigned NumOps = OpsArray.size();
5534 FoldingSetNodeID ID;
5535 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5537 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5538 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5542 // Allocate a new MachineSDNode.
5543 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5544 DL.getDebugLoc(), VTs);
5546 // Initialize the operands list.
5547 if (NumOps > array_lengthof(N->LocalOperands))
5548 // We're creating a final node that will live unmorphed for the
5549 // remainder of the current SelectionDAG iteration, so we can allocate
5550 // the operands directly out of a pool with no recycling metadata.
5551 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5554 N->InitOperands(N->LocalOperands, Ops, NumOps);
5555 N->OperandsNeedDelete = false;
5558 CSEMap.InsertNode(N, IP);
5560 AllNodes.push_back(N);
5562 VerifyMachineNode(N);
5567 /// getTargetExtractSubreg - A convenience function for creating
5568 /// TargetOpcode::EXTRACT_SUBREG nodes.
5570 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5572 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5573 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5574 VT, Operand, SRIdxVal);
5575 return SDValue(Subreg, 0);
5578 /// getTargetInsertSubreg - A convenience function for creating
5579 /// TargetOpcode::INSERT_SUBREG nodes.
5581 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5582 SDValue Operand, SDValue Subreg) {
5583 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5584 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5585 VT, Operand, Subreg, SRIdxVal);
5586 return SDValue(Result, 0);
5589 /// getNodeIfExists - Get the specified node if it's already available, or
5590 /// else return NULL.
5591 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5592 const SDValue *Ops, unsigned NumOps) {
5593 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5594 FoldingSetNodeID ID;
5595 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5597 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5603 /// getDbgValue - Creates a SDDbgValue node.
5606 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
5607 DebugLoc DL, unsigned O) {
5608 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
5612 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
5613 DebugLoc DL, unsigned O) {
5614 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5618 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5619 DebugLoc DL, unsigned O) {
5620 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5625 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5626 /// pointed to by a use iterator is deleted, increment the use iterator
5627 /// so that it doesn't dangle.
5629 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5630 SDNode::use_iterator &UI;
5631 SDNode::use_iterator &UE;
5633 virtual void NodeDeleted(SDNode *N, SDNode *E) {
5634 // Increment the iterator as needed.
5635 while (UI != UE && N == *UI)
5640 RAUWUpdateListener(SelectionDAG &d,
5641 SDNode::use_iterator &ui,
5642 SDNode::use_iterator &ue)
5643 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5648 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5649 /// This can cause recursive merging of nodes in the DAG.
5651 /// This version assumes From has a single result value.
5653 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5654 SDNode *From = FromN.getNode();
5655 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5656 "Cannot replace with this method!");
5657 assert(From != To.getNode() && "Cannot replace uses of with self");
5659 // Iterate over all the existing uses of From. New uses will be added
5660 // to the beginning of the use list, which we avoid visiting.
5661 // This specifically avoids visiting uses of From that arise while the
5662 // replacement is happening, because any such uses would be the result
5663 // of CSE: If an existing node looks like From after one of its operands
5664 // is replaced by To, we don't want to replace of all its users with To
5665 // too. See PR3018 for more info.
5666 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5667 RAUWUpdateListener Listener(*this, UI, UE);
5671 // This node is about to morph, remove its old self from the CSE maps.
5672 RemoveNodeFromCSEMaps(User);
5674 // A user can appear in a use list multiple times, and when this
5675 // happens the uses are usually next to each other in the list.
5676 // To help reduce the number of CSE recomputations, process all
5677 // the uses of this user that we can find this way.
5679 SDUse &Use = UI.getUse();
5682 } while (UI != UE && *UI == User);
5684 // Now that we have modified User, add it back to the CSE maps. If it
5685 // already exists there, recursively merge the results together.
5686 AddModifiedNodeToCSEMaps(User);
5689 // If we just RAUW'd the root, take note.
5690 if (FromN == getRoot())
5694 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5695 /// This can cause recursive merging of nodes in the DAG.
5697 /// This version assumes that for each value of From, there is a
5698 /// corresponding value in To in the same position with the same type.
5700 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5702 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5703 assert((!From->hasAnyUseOfValue(i) ||
5704 From->getValueType(i) == To->getValueType(i)) &&
5705 "Cannot use this version of ReplaceAllUsesWith!");
5708 // Handle the trivial case.
5712 // Iterate over just the existing users of From. See the comments in
5713 // the ReplaceAllUsesWith above.
5714 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5715 RAUWUpdateListener Listener(*this, UI, UE);
5719 // This node is about to morph, remove its old self from the CSE maps.
5720 RemoveNodeFromCSEMaps(User);
5722 // A user can appear in a use list multiple times, and when this
5723 // happens the uses are usually next to each other in the list.
5724 // To help reduce the number of CSE recomputations, process all
5725 // the uses of this user that we can find this way.
5727 SDUse &Use = UI.getUse();
5730 } while (UI != UE && *UI == User);
5732 // Now that we have modified User, add it back to the CSE maps. If it
5733 // already exists there, recursively merge the results together.
5734 AddModifiedNodeToCSEMaps(User);
5737 // If we just RAUW'd the root, take note.
5738 if (From == getRoot().getNode())
5739 setRoot(SDValue(To, getRoot().getResNo()));
5742 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5743 /// This can cause recursive merging of nodes in the DAG.
5745 /// This version can replace From with any result values. To must match the
5746 /// number and types of values returned by From.
5747 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5748 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5749 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5751 // Iterate over just the existing users of From. See the comments in
5752 // the ReplaceAllUsesWith above.
5753 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5754 RAUWUpdateListener Listener(*this, UI, UE);
5758 // This node is about to morph, remove its old self from the CSE maps.
5759 RemoveNodeFromCSEMaps(User);
5761 // A user can appear in a use list multiple times, and when this
5762 // happens the uses are usually next to each other in the list.
5763 // To help reduce the number of CSE recomputations, process all
5764 // the uses of this user that we can find this way.
5766 SDUse &Use = UI.getUse();
5767 const SDValue &ToOp = To[Use.getResNo()];
5770 } while (UI != UE && *UI == User);
5772 // Now that we have modified User, add it back to the CSE maps. If it
5773 // already exists there, recursively merge the results together.
5774 AddModifiedNodeToCSEMaps(User);
5777 // If we just RAUW'd the root, take note.
5778 if (From == getRoot().getNode())
5779 setRoot(SDValue(To[getRoot().getResNo()]));
5782 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5783 /// uses of other values produced by From.getNode() alone. The Deleted
5784 /// vector is handled the same way as for ReplaceAllUsesWith.
5785 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5786 // Handle the really simple, really trivial case efficiently.
5787 if (From == To) return;
5789 // Handle the simple, trivial, case efficiently.
5790 if (From.getNode()->getNumValues() == 1) {
5791 ReplaceAllUsesWith(From, To);
5795 // Iterate over just the existing users of From. See the comments in
5796 // the ReplaceAllUsesWith above.
5797 SDNode::use_iterator UI = From.getNode()->use_begin(),
5798 UE = From.getNode()->use_end();
5799 RAUWUpdateListener Listener(*this, UI, UE);
5802 bool UserRemovedFromCSEMaps = false;
5804 // A user can appear in a use list multiple times, and when this
5805 // happens the uses are usually next to each other in the list.
5806 // To help reduce the number of CSE recomputations, process all
5807 // the uses of this user that we can find this way.
5809 SDUse &Use = UI.getUse();
5811 // Skip uses of different values from the same node.
5812 if (Use.getResNo() != From.getResNo()) {
5817 // If this node hasn't been modified yet, it's still in the CSE maps,
5818 // so remove its old self from the CSE maps.
5819 if (!UserRemovedFromCSEMaps) {
5820 RemoveNodeFromCSEMaps(User);
5821 UserRemovedFromCSEMaps = true;
5826 } while (UI != UE && *UI == User);
5828 // We are iterating over all uses of the From node, so if a use
5829 // doesn't use the specific value, no changes are made.
5830 if (!UserRemovedFromCSEMaps)
5833 // Now that we have modified User, add it back to the CSE maps. If it
5834 // already exists there, recursively merge the results together.
5835 AddModifiedNodeToCSEMaps(User);
5838 // If we just RAUW'd the root, take note.
5839 if (From == getRoot())
5844 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5845 /// to record information about a use.
5852 /// operator< - Sort Memos by User.
5853 bool operator<(const UseMemo &L, const UseMemo &R) {
5854 return (intptr_t)L.User < (intptr_t)R.User;
5858 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5859 /// uses of other values produced by From.getNode() alone. The same value
5860 /// may appear in both the From and To list. The Deleted vector is
5861 /// handled the same way as for ReplaceAllUsesWith.
5862 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5865 // Handle the simple, trivial case efficiently.
5867 return ReplaceAllUsesOfValueWith(*From, *To);
5869 // Read up all the uses and make records of them. This helps
5870 // processing new uses that are introduced during the
5871 // replacement process.
5872 SmallVector<UseMemo, 4> Uses;
5873 for (unsigned i = 0; i != Num; ++i) {
5874 unsigned FromResNo = From[i].getResNo();
5875 SDNode *FromNode = From[i].getNode();
5876 for (SDNode::use_iterator UI = FromNode->use_begin(),
5877 E = FromNode->use_end(); UI != E; ++UI) {
5878 SDUse &Use = UI.getUse();
5879 if (Use.getResNo() == FromResNo) {
5880 UseMemo Memo = { *UI, i, &Use };
5881 Uses.push_back(Memo);
5886 // Sort the uses, so that all the uses from a given User are together.
5887 std::sort(Uses.begin(), Uses.end());
5889 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5890 UseIndex != UseIndexEnd; ) {
5891 // We know that this user uses some value of From. If it is the right
5892 // value, update it.
5893 SDNode *User = Uses[UseIndex].User;
5895 // This node is about to morph, remove its old self from the CSE maps.
5896 RemoveNodeFromCSEMaps(User);
5898 // The Uses array is sorted, so all the uses for a given User
5899 // are next to each other in the list.
5900 // To help reduce the number of CSE recomputations, process all
5901 // the uses of this user that we can find this way.
5903 unsigned i = Uses[UseIndex].Index;
5904 SDUse &Use = *Uses[UseIndex].Use;
5908 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5910 // Now that we have modified User, add it back to the CSE maps. If it
5911 // already exists there, recursively merge the results together.
5912 AddModifiedNodeToCSEMaps(User);
5916 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5917 /// based on their topological order. It returns the maximum id and a vector
5918 /// of the SDNodes* in assigned order by reference.
5919 unsigned SelectionDAG::AssignTopologicalOrder() {
5921 unsigned DAGSize = 0;
5923 // SortedPos tracks the progress of the algorithm. Nodes before it are
5924 // sorted, nodes after it are unsorted. When the algorithm completes
5925 // it is at the end of the list.
5926 allnodes_iterator SortedPos = allnodes_begin();
5928 // Visit all the nodes. Move nodes with no operands to the front of
5929 // the list immediately. Annotate nodes that do have operands with their
5930 // operand count. Before we do this, the Node Id fields of the nodes
5931 // may contain arbitrary values. After, the Node Id fields for nodes
5932 // before SortedPos will contain the topological sort index, and the
5933 // Node Id fields for nodes At SortedPos and after will contain the
5934 // count of outstanding operands.
5935 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5938 unsigned Degree = N->getNumOperands();
5940 // A node with no uses, add it to the result array immediately.
5941 N->setNodeId(DAGSize++);
5942 allnodes_iterator Q = N;
5944 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5945 assert(SortedPos != AllNodes.end() && "Overran node list");
5948 // Temporarily use the Node Id as scratch space for the degree count.
5949 N->setNodeId(Degree);
5953 // Visit all the nodes. As we iterate, move nodes into sorted order,
5954 // such that by the time the end is reached all nodes will be sorted.
5955 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5958 // N is in sorted position, so all its uses have one less operand
5959 // that needs to be sorted.
5960 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5963 unsigned Degree = P->getNodeId();
5964 assert(Degree != 0 && "Invalid node degree");
5967 // All of P's operands are sorted, so P may sorted now.
5968 P->setNodeId(DAGSize++);
5970 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5971 assert(SortedPos != AllNodes.end() && "Overran node list");
5974 // Update P's outstanding operand count.
5975 P->setNodeId(Degree);
5978 if (I == SortedPos) {
5981 dbgs() << "Overran sorted position:\n";
5984 llvm_unreachable(0);
5988 assert(SortedPos == AllNodes.end() &&
5989 "Topological sort incomplete!");
5990 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
5991 "First node in topological sort is not the entry token!");
5992 assert(AllNodes.front().getNodeId() == 0 &&
5993 "First node in topological sort has non-zero id!");
5994 assert(AllNodes.front().getNumOperands() == 0 &&
5995 "First node in topological sort has operands!");
5996 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
5997 "Last node in topologic sort has unexpected id!");
5998 assert(AllNodes.back().use_empty() &&
5999 "Last node in topologic sort has users!");
6000 assert(DAGSize == allnodes_size() && "Node count mismatch!");
6004 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6005 /// value is produced by SD.
6006 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6007 DbgInfo->add(DB, SD, isParameter);
6009 SD->setHasDebugValue(true);
6012 /// TransferDbgValues - Transfer SDDbgValues.
6013 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6014 if (From == To || !From.getNode()->getHasDebugValue())
6016 SDNode *FromNode = From.getNode();
6017 SDNode *ToNode = To.getNode();
6018 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6019 SmallVector<SDDbgValue *, 2> ClonedDVs;
6020 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6022 SDDbgValue *Dbg = *I;
6023 if (Dbg->getKind() == SDDbgValue::SDNODE) {
6024 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
6025 Dbg->getOffset(), Dbg->getDebugLoc(),
6027 ClonedDVs.push_back(Clone);
6030 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6031 E = ClonedDVs.end(); I != E; ++I)
6032 AddDbgValue(*I, ToNode, false);
6035 //===----------------------------------------------------------------------===//
6037 //===----------------------------------------------------------------------===//
6039 HandleSDNode::~HandleSDNode() {
6043 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6044 DebugLoc DL, const GlobalValue *GA,
6045 EVT VT, int64_t o, unsigned char TF)
6046 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6050 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6051 SDValue X, unsigned SrcAS,
6053 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6054 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6056 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6057 EVT memvt, MachineMemOperand *mmo)
6058 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6059 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6060 MMO->isNonTemporal(), MMO->isInvariant());
6061 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6062 assert(isNonTemporal() == MMO->isNonTemporal() &&
6063 "Non-temporal encoding error!");
6064 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6067 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6068 const SDValue *Ops, unsigned NumOps, EVT memvt,
6069 MachineMemOperand *mmo)
6070 : SDNode(Opc, Order, dl, VTs, Ops, NumOps),
6071 MemoryVT(memvt), MMO(mmo) {
6072 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6073 MMO->isNonTemporal(), MMO->isInvariant());
6074 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6075 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6078 /// Profile - Gather unique data for the node.
6080 void SDNode::Profile(FoldingSetNodeID &ID) const {
6081 AddNodeIDNode(ID, this);
6086 std::vector<EVT> VTs;
6089 VTs.reserve(MVT::LAST_VALUETYPE);
6090 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6091 VTs.push_back(MVT((MVT::SimpleValueType)i));
6096 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6097 static ManagedStatic<EVTArray> SimpleVTArray;
6098 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6100 /// getValueTypeList - Return a pointer to the specified value type.
6102 const EVT *SDNode::getValueTypeList(EVT VT) {
6103 if (VT.isExtended()) {
6104 sys::SmartScopedLock<true> Lock(*VTMutex);
6105 return &(*EVTs->insert(VT).first);
6107 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6108 "Value type out of range!");
6109 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6113 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6114 /// indicated value. This method ignores uses of other values defined by this
6116 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6117 assert(Value < getNumValues() && "Bad value!");
6119 // TODO: Only iterate over uses of a given value of the node
6120 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6121 if (UI.getUse().getResNo() == Value) {
6128 // Found exactly the right number of uses?
6133 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6134 /// value. This method ignores uses of other values defined by this operation.
6135 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6136 assert(Value < getNumValues() && "Bad value!");
6138 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6139 if (UI.getUse().getResNo() == Value)
6146 /// isOnlyUserOf - Return true if this node is the only use of N.
6148 bool SDNode::isOnlyUserOf(SDNode *N) const {
6150 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6161 /// isOperand - Return true if this node is an operand of N.
6163 bool SDValue::isOperandOf(SDNode *N) const {
6164 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6165 if (*this == N->getOperand(i))
6170 bool SDNode::isOperandOf(SDNode *N) const {
6171 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6172 if (this == N->OperandList[i].getNode())
6177 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6178 /// be a chain) reaches the specified operand without crossing any
6179 /// side-effecting instructions on any chain path. In practice, this looks
6180 /// through token factors and non-volatile loads. In order to remain efficient,
6181 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6182 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6183 unsigned Depth) const {
6184 if (*this == Dest) return true;
6186 // Don't search too deeply, we just want to be able to see through
6187 // TokenFactor's etc.
6188 if (Depth == 0) return false;
6190 // If this is a token factor, all inputs to the TF happen in parallel. If any
6191 // of the operands of the TF does not reach dest, then we cannot do the xform.
6192 if (getOpcode() == ISD::TokenFactor) {
6193 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6194 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6199 // Loads don't have side effects, look through them.
6200 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6201 if (!Ld->isVolatile())
6202 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6207 /// hasPredecessor - Return true if N is a predecessor of this node.
6208 /// N is either an operand of this node, or can be reached by recursively
6209 /// traversing up the operands.
6210 /// NOTE: This is an expensive method. Use it carefully.
6211 bool SDNode::hasPredecessor(const SDNode *N) const {
6212 SmallPtrSet<const SDNode *, 32> Visited;
6213 SmallVector<const SDNode *, 16> Worklist;
6214 return hasPredecessorHelper(N, Visited, Worklist);
6218 SDNode::hasPredecessorHelper(const SDNode *N,
6219 SmallPtrSet<const SDNode *, 32> &Visited,
6220 SmallVectorImpl<const SDNode *> &Worklist) const {
6221 if (Visited.empty()) {
6222 Worklist.push_back(this);
6224 // Take a look in the visited set. If we've already encountered this node
6225 // we needn't search further.
6226 if (Visited.count(N))
6230 // Haven't visited N yet. Continue the search.
6231 while (!Worklist.empty()) {
6232 const SDNode *M = Worklist.pop_back_val();
6233 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6234 SDNode *Op = M->getOperand(i).getNode();
6235 if (Visited.insert(Op))
6236 Worklist.push_back(Op);
6245 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6246 assert(Num < NumOperands && "Invalid child # of SDNode!");
6247 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6250 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6251 assert(N->getNumValues() == 1 &&
6252 "Can't unroll a vector with multiple results!");
6254 EVT VT = N->getValueType(0);
6255 unsigned NE = VT.getVectorNumElements();
6256 EVT EltVT = VT.getVectorElementType();
6259 SmallVector<SDValue, 8> Scalars;
6260 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6262 // If ResNE is 0, fully unroll the vector op.
6265 else if (NE > ResNE)
6269 for (i= 0; i != NE; ++i) {
6270 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6271 SDValue Operand = N->getOperand(j);
6272 EVT OperandVT = Operand.getValueType();
6273 if (OperandVT.isVector()) {
6274 // A vector operand; extract a single element.
6275 const TargetLowering *TLI = TM.getTargetLowering();
6276 EVT OperandEltVT = OperandVT.getVectorElementType();
6277 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6280 getConstant(i, TLI->getVectorIdxTy()));
6282 // A scalar operand; just use it as is.
6283 Operands[j] = Operand;
6287 switch (N->getOpcode()) {
6289 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6290 &Operands[0], Operands.size()));
6293 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT,
6294 &Operands[0], Operands.size()));
6301 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6302 getShiftAmountOperand(Operands[0].getValueType(),
6305 case ISD::SIGN_EXTEND_INREG:
6306 case ISD::FP_ROUND_INREG: {
6307 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6308 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6310 getValueType(ExtVT)));
6315 for (; i < ResNE; ++i)
6316 Scalars.push_back(getUNDEF(EltVT));
6318 return getNode(ISD::BUILD_VECTOR, dl,
6319 EVT::getVectorVT(*getContext(), EltVT, ResNE),
6320 &Scalars[0], Scalars.size());
6324 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6325 /// location that is 'Dist' units away from the location that the 'Base' load
6326 /// is loading from.
6327 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6328 unsigned Bytes, int Dist) const {
6329 if (LD->getChain() != Base->getChain())
6331 EVT VT = LD->getValueType(0);
6332 if (VT.getSizeInBits() / 8 != Bytes)
6335 SDValue Loc = LD->getOperand(1);
6336 SDValue BaseLoc = Base->getOperand(1);
6337 if (Loc.getOpcode() == ISD::FrameIndex) {
6338 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6340 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6341 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6342 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6343 int FS = MFI->getObjectSize(FI);
6344 int BFS = MFI->getObjectSize(BFI);
6345 if (FS != BFS || FS != (int)Bytes) return false;
6346 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6350 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6351 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6354 const GlobalValue *GV1 = NULL;
6355 const GlobalValue *GV2 = NULL;
6356 int64_t Offset1 = 0;
6357 int64_t Offset2 = 0;
6358 const TargetLowering *TLI = TM.getTargetLowering();
6359 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6360 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6361 if (isGA1 && isGA2 && GV1 == GV2)
6362 return Offset1 == (Offset2 + Dist*Bytes);
6367 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6368 /// it cannot be inferred.
6369 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6370 // If this is a GlobalAddress + cst, return the alignment.
6371 const GlobalValue *GV;
6372 int64_t GVOffset = 0;
6373 const TargetLowering *TLI = TM.getTargetLowering();
6374 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6375 unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
6376 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6377 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6378 TLI->getDataLayout());
6379 unsigned AlignBits = KnownZero.countTrailingOnes();
6380 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6382 return MinAlign(Align, GVOffset);
6385 // If this is a direct reference to a stack slot, use information about the
6386 // stack slot's alignment.
6387 int FrameIdx = 1 << 31;
6388 int64_t FrameOffset = 0;
6389 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6390 FrameIdx = FI->getIndex();
6391 } else if (isBaseWithConstantOffset(Ptr) &&
6392 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6394 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6395 FrameOffset = Ptr.getConstantOperandVal(1);
6398 if (FrameIdx != (1 << 31)) {
6399 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6400 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6408 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6409 unsigned GlobalAddressSDNode::getAddressSpace() const {
6410 return getGlobal()->getType()->getAddressSpace();
6414 Type *ConstantPoolSDNode::getType() const {
6415 if (isMachineConstantPoolEntry())
6416 return Val.MachineCPVal->getType();
6417 return Val.ConstVal->getType();
6420 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6422 unsigned &SplatBitSize,
6424 unsigned MinSplatBits,
6426 EVT VT = getValueType(0);
6427 assert(VT.isVector() && "Expected a vector type");
6428 unsigned sz = VT.getSizeInBits();
6429 if (MinSplatBits > sz)
6432 SplatValue = APInt(sz, 0);
6433 SplatUndef = APInt(sz, 0);
6435 // Get the bits. Bits with undefined values (when the corresponding element
6436 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6437 // in SplatValue. If any of the values are not constant, give up and return
6439 unsigned int nOps = getNumOperands();
6440 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6441 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6443 for (unsigned j = 0; j < nOps; ++j) {
6444 unsigned i = isBigEndian ? nOps-1-j : j;
6445 SDValue OpVal = getOperand(i);
6446 unsigned BitPos = j * EltBitSize;
6448 if (OpVal.getOpcode() == ISD::UNDEF)
6449 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6450 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6451 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6452 zextOrTrunc(sz) << BitPos;
6453 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6454 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6459 // The build_vector is all constants or undefs. Find the smallest element
6460 // size that splats the vector.
6462 HasAnyUndefs = (SplatUndef != 0);
6465 unsigned HalfSize = sz / 2;
6466 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6467 APInt LowValue = SplatValue.trunc(HalfSize);
6468 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6469 APInt LowUndef = SplatUndef.trunc(HalfSize);
6471 // If the two halves do not match (ignoring undef bits), stop here.
6472 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6473 MinSplatBits > HalfSize)
6476 SplatValue = HighValue | LowValue;
6477 SplatUndef = HighUndef & LowUndef;
6486 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6487 // Find the first non-undef value in the shuffle mask.
6489 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6492 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6494 // Make sure all remaining elements are either undef or the same as the first
6496 for (int Idx = Mask[i]; i != e; ++i)
6497 if (Mask[i] >= 0 && Mask[i] != Idx)
6503 static void checkForCyclesHelper(const SDNode *N,
6504 SmallPtrSet<const SDNode*, 32> &Visited,
6505 SmallPtrSet<const SDNode*, 32> &Checked) {
6506 // If this node has already been checked, don't check it again.
6507 if (Checked.count(N))
6510 // If a node has already been visited on this depth-first walk, reject it as
6512 if (!Visited.insert(N)) {
6513 dbgs() << "Offending node:\n";
6515 errs() << "Detected cycle in SelectionDAG\n";
6519 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6520 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6527 void llvm::checkForCycles(const llvm::SDNode *N) {
6529 assert(N && "Checking nonexistent SDNode");
6530 SmallPtrSet<const SDNode*, 32> visited;
6531 SmallPtrSet<const SDNode*, 32> checked;
6532 checkForCyclesHelper(N, visited, checked);
6536 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6537 checkForCycles(DAG->getRoot().getNode());