1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "SDNodeOrdering.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/Assembly/Writer.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/DebugInfo.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalAlias.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/ManagedStatic.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Support/Mutex.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetIntrinsicInfo.h"
47 #include "llvm/Target/TargetLowering.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Target/TargetRegisterInfo.h"
51 #include "llvm/Target/TargetSelectionDAGInfo.h"
56 /// makeVTList - Return an instance of the SDVTList struct initialized with the
57 /// specified members.
58 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
59 SDVTList Res = {VTs, NumVTs};
63 // Default null implementations of the callbacks.
64 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
65 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
67 //===----------------------------------------------------------------------===//
68 // ConstantFPSDNode Class
69 //===----------------------------------------------------------------------===//
71 /// isExactlyValue - We don't rely on operator== working on double values, as
72 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
73 /// As such, this method can be used to do an exact bit-for-bit comparison of
74 /// two floating point values.
75 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
76 return getValueAPF().bitwiseIsEqual(V);
79 bool ConstantFPSDNode::isValueValidForType(EVT VT,
81 assert(VT.isFloatingPoint() && "Can only convert between FP types");
83 // convert modifies in place, so make a copy.
84 APFloat Val2 = APFloat(Val);
86 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
87 APFloat::rmNearestTiesToEven,
92 //===----------------------------------------------------------------------===//
94 //===----------------------------------------------------------------------===//
96 /// isBuildVectorAllOnes - Return true if the specified node is a
97 /// BUILD_VECTOR where all of the elements are ~0 or undef.
98 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
99 // Look through a bit convert.
100 if (N->getOpcode() == ISD::BITCAST)
101 N = N->getOperand(0).getNode();
103 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
105 unsigned i = 0, e = N->getNumOperands();
107 // Skip over all of the undef values.
108 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
111 // Do not accept an all-undef vector.
112 if (i == e) return false;
114 // Do not accept build_vectors that aren't all constants or which have non-~0
115 // elements. We have to be a bit careful here, as the type of the constant
116 // may not be the same as the type of the vector elements due to type
117 // legalization (the elements are promoted to a legal type for the target and
118 // a vector of a type may be legal when the base element type is not).
119 // We only want to check enough bits to cover the vector elements, because
120 // we care if the resultant vector is all ones, not whether the individual
122 SDValue NotZero = N->getOperand(i);
123 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
124 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
125 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
127 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
128 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
133 // Okay, we have at least one ~0 value, check to see if the rest match or are
134 // undefs. Even with the above element type twiddling, this should be OK, as
135 // the same type legalization should have applied to all the elements.
136 for (++i; i != e; ++i)
137 if (N->getOperand(i) != NotZero &&
138 N->getOperand(i).getOpcode() != ISD::UNDEF)
144 /// isBuildVectorAllZeros - Return true if the specified node is a
145 /// BUILD_VECTOR where all of the elements are 0 or undef.
146 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
147 // Look through a bit convert.
148 if (N->getOpcode() == ISD::BITCAST)
149 N = N->getOperand(0).getNode();
151 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
153 unsigned i = 0, e = N->getNumOperands();
155 // Skip over all of the undef values.
156 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
159 // Do not accept an all-undef vector.
160 if (i == e) return false;
162 // Do not accept build_vectors that aren't all constants or which have non-0
164 SDValue Zero = N->getOperand(i);
165 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
166 if (!CN->isNullValue())
168 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
169 if (!CFPN->getValueAPF().isPosZero())
174 // Okay, we have at least one 0 value, check to see if the rest match or are
176 for (++i; i != e; ++i)
177 if (N->getOperand(i) != Zero &&
178 N->getOperand(i).getOpcode() != ISD::UNDEF)
183 /// isScalarToVector - Return true if the specified node is a
184 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
185 /// element is not an undef.
186 bool ISD::isScalarToVector(const SDNode *N) {
187 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
190 if (N->getOpcode() != ISD::BUILD_VECTOR)
192 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
194 unsigned NumElems = N->getNumOperands();
197 for (unsigned i = 1; i < NumElems; ++i) {
198 SDValue V = N->getOperand(i);
199 if (V.getOpcode() != ISD::UNDEF)
205 /// allOperandsUndef - Return true if the node has at least one operand
206 /// and all operands of the specified node are ISD::UNDEF.
207 bool ISD::allOperandsUndef(const SDNode *N) {
208 // Return false if the node has no operands.
209 // This is "logically inconsistent" with the definition of "all" but
210 // is probably the desired behavior.
211 if (N->getNumOperands() == 0)
214 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
215 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
221 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
222 /// when given the operation for (X op Y).
223 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
224 // To perform this operation, we just need to swap the L and G bits of the
226 unsigned OldL = (Operation >> 2) & 1;
227 unsigned OldG = (Operation >> 1) & 1;
228 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
229 (OldL << 1) | // New G bit
230 (OldG << 2)); // New L bit.
233 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
234 /// 'op' is a valid SetCC operation.
235 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
236 unsigned Operation = Op;
238 Operation ^= 7; // Flip L, G, E bits, but not U.
240 Operation ^= 15; // Flip all of the condition bits.
242 if (Operation > ISD::SETTRUE2)
243 Operation &= ~8; // Don't let N and U bits get set.
245 return ISD::CondCode(Operation);
249 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
250 /// signed operation and 2 if the result is an unsigned comparison. Return zero
251 /// if the operation does not depend on the sign of the input (setne and seteq).
252 static int isSignedOp(ISD::CondCode Opcode) {
254 default: llvm_unreachable("Illegal integer setcc operation!");
256 case ISD::SETNE: return 0;
260 case ISD::SETGE: return 1;
264 case ISD::SETUGE: return 2;
268 /// getSetCCOrOperation - Return the result of a logical OR between different
269 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
270 /// returns SETCC_INVALID if it is not possible to represent the resultant
272 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
274 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
275 // Cannot fold a signed integer setcc with an unsigned integer setcc.
276 return ISD::SETCC_INVALID;
278 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
280 // If the N and U bits get set then the resultant comparison DOES suddenly
281 // care about orderedness, and is true when ordered.
282 if (Op > ISD::SETTRUE2)
283 Op &= ~16; // Clear the U bit if the N bit is set.
285 // Canonicalize illegal integer setcc's.
286 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
289 return ISD::CondCode(Op);
292 /// getSetCCAndOperation - Return the result of a logical AND between different
293 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
294 /// function returns zero if it is not possible to represent the resultant
296 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
298 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
299 // Cannot fold a signed setcc with an unsigned setcc.
300 return ISD::SETCC_INVALID;
302 // Combine all of the condition bits.
303 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
305 // Canonicalize illegal integer setcc's.
309 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
310 case ISD::SETOEQ: // SETEQ & SETU[LG]E
311 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
312 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
313 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
320 //===----------------------------------------------------------------------===//
321 // SDNode Profile Support
322 //===----------------------------------------------------------------------===//
324 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
326 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
330 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
331 /// solely with their pointer.
332 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
333 ID.AddPointer(VTList.VTs);
336 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
338 static void AddNodeIDOperands(FoldingSetNodeID &ID,
339 const SDValue *Ops, unsigned NumOps) {
340 for (; NumOps; --NumOps, ++Ops) {
341 ID.AddPointer(Ops->getNode());
342 ID.AddInteger(Ops->getResNo());
346 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
348 static void AddNodeIDOperands(FoldingSetNodeID &ID,
349 const SDUse *Ops, unsigned NumOps) {
350 for (; NumOps; --NumOps, ++Ops) {
351 ID.AddPointer(Ops->getNode());
352 ID.AddInteger(Ops->getResNo());
356 static void AddNodeIDNode(FoldingSetNodeID &ID,
357 unsigned short OpC, SDVTList VTList,
358 const SDValue *OpList, unsigned N) {
359 AddNodeIDOpcode(ID, OpC);
360 AddNodeIDValueTypes(ID, VTList);
361 AddNodeIDOperands(ID, OpList, N);
364 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
366 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
367 switch (N->getOpcode()) {
368 case ISD::TargetExternalSymbol:
369 case ISD::ExternalSymbol:
370 llvm_unreachable("Should only be used on nodes with operands");
371 default: break; // Normal nodes don't need extra info.
372 case ISD::TargetConstant:
374 ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
376 case ISD::TargetConstantFP:
377 case ISD::ConstantFP: {
378 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
381 case ISD::TargetGlobalAddress:
382 case ISD::GlobalAddress:
383 case ISD::TargetGlobalTLSAddress:
384 case ISD::GlobalTLSAddress: {
385 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
386 ID.AddPointer(GA->getGlobal());
387 ID.AddInteger(GA->getOffset());
388 ID.AddInteger(GA->getTargetFlags());
389 ID.AddInteger(GA->getAddressSpace());
392 case ISD::BasicBlock:
393 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
396 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
398 case ISD::RegisterMask:
399 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
402 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
404 case ISD::FrameIndex:
405 case ISD::TargetFrameIndex:
406 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
409 case ISD::TargetJumpTable:
410 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
411 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
413 case ISD::ConstantPool:
414 case ISD::TargetConstantPool: {
415 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
416 ID.AddInteger(CP->getAlignment());
417 ID.AddInteger(CP->getOffset());
418 if (CP->isMachineConstantPoolEntry())
419 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
421 ID.AddPointer(CP->getConstVal());
422 ID.AddInteger(CP->getTargetFlags());
425 case ISD::TargetIndex: {
426 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
427 ID.AddInteger(TI->getIndex());
428 ID.AddInteger(TI->getOffset());
429 ID.AddInteger(TI->getTargetFlags());
433 const LoadSDNode *LD = cast<LoadSDNode>(N);
434 ID.AddInteger(LD->getMemoryVT().getRawBits());
435 ID.AddInteger(LD->getRawSubclassData());
436 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
440 const StoreSDNode *ST = cast<StoreSDNode>(N);
441 ID.AddInteger(ST->getMemoryVT().getRawBits());
442 ID.AddInteger(ST->getRawSubclassData());
443 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
446 case ISD::ATOMIC_CMP_SWAP:
447 case ISD::ATOMIC_SWAP:
448 case ISD::ATOMIC_LOAD_ADD:
449 case ISD::ATOMIC_LOAD_SUB:
450 case ISD::ATOMIC_LOAD_AND:
451 case ISD::ATOMIC_LOAD_OR:
452 case ISD::ATOMIC_LOAD_XOR:
453 case ISD::ATOMIC_LOAD_NAND:
454 case ISD::ATOMIC_LOAD_MIN:
455 case ISD::ATOMIC_LOAD_MAX:
456 case ISD::ATOMIC_LOAD_UMIN:
457 case ISD::ATOMIC_LOAD_UMAX:
458 case ISD::ATOMIC_LOAD:
459 case ISD::ATOMIC_STORE: {
460 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
461 ID.AddInteger(AT->getMemoryVT().getRawBits());
462 ID.AddInteger(AT->getRawSubclassData());
463 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
466 case ISD::PREFETCH: {
467 const MemSDNode *PF = cast<MemSDNode>(N);
468 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
471 case ISD::VECTOR_SHUFFLE: {
472 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
473 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
475 ID.AddInteger(SVN->getMaskElt(i));
478 case ISD::TargetBlockAddress:
479 case ISD::BlockAddress: {
480 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
481 ID.AddPointer(BA->getBlockAddress());
482 ID.AddInteger(BA->getOffset());
483 ID.AddInteger(BA->getTargetFlags());
486 } // end switch (N->getOpcode())
488 // Target specific memory nodes could also have address spaces to check.
489 if (N->isTargetMemoryOpcode())
490 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
493 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
495 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
496 AddNodeIDOpcode(ID, N->getOpcode());
497 // Add the return value info.
498 AddNodeIDValueTypes(ID, N->getVTList());
499 // Add the operand info.
500 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
502 // Handle SDNode leafs with special info.
503 AddNodeIDCustom(ID, N);
506 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
507 /// the CSE map that carries volatility, temporalness, indexing mode, and
508 /// extension/truncation information.
510 static inline unsigned
511 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
512 bool isNonTemporal, bool isInvariant) {
513 assert((ConvType & 3) == ConvType &&
514 "ConvType may not require more than 2 bits!");
515 assert((AM & 7) == AM &&
516 "AM may not require more than 3 bits!");
520 (isNonTemporal << 6) |
524 //===----------------------------------------------------------------------===//
525 // SelectionDAG Class
526 //===----------------------------------------------------------------------===//
528 /// doNotCSE - Return true if CSE should not be performed for this node.
529 static bool doNotCSE(SDNode *N) {
530 if (N->getValueType(0) == MVT::Glue)
531 return true; // Never CSE anything that produces a flag.
533 switch (N->getOpcode()) {
535 case ISD::HANDLENODE:
537 return true; // Never CSE these nodes.
540 // Check that remaining values produced are not flags.
541 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
542 if (N->getValueType(i) == MVT::Glue)
543 return true; // Never CSE anything that produces a flag.
548 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
550 void SelectionDAG::RemoveDeadNodes() {
551 // Create a dummy node (which is not added to allnodes), that adds a reference
552 // to the root node, preventing it from being deleted.
553 HandleSDNode Dummy(getRoot());
555 SmallVector<SDNode*, 128> DeadNodes;
557 // Add all obviously-dead nodes to the DeadNodes worklist.
558 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
560 DeadNodes.push_back(I);
562 RemoveDeadNodes(DeadNodes);
564 // If the root changed (e.g. it was a dead load, update the root).
565 setRoot(Dummy.getValue());
568 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
569 /// given list, and any nodes that become unreachable as a result.
570 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
572 // Process the worklist, deleting the nodes and adding their uses to the
574 while (!DeadNodes.empty()) {
575 SDNode *N = DeadNodes.pop_back_val();
577 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
578 DUL->NodeDeleted(N, 0);
580 // Take the node out of the appropriate CSE map.
581 RemoveNodeFromCSEMaps(N);
583 // Next, brutally remove the operand list. This is safe to do, as there are
584 // no cycles in the graph.
585 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
587 SDNode *Operand = Use.getNode();
590 // Now that we removed this operand, see if there are no uses of it left.
591 if (Operand->use_empty())
592 DeadNodes.push_back(Operand);
599 void SelectionDAG::RemoveDeadNode(SDNode *N){
600 SmallVector<SDNode*, 16> DeadNodes(1, N);
602 // Create a dummy node that adds a reference to the root node, preventing
603 // it from being deleted. (This matters if the root is an operand of the
605 HandleSDNode Dummy(getRoot());
607 RemoveDeadNodes(DeadNodes);
610 void SelectionDAG::DeleteNode(SDNode *N) {
611 // First take this out of the appropriate CSE map.
612 RemoveNodeFromCSEMaps(N);
614 // Finally, remove uses due to operands of this node, remove from the
615 // AllNodes list, and delete the node.
616 DeleteNodeNotInCSEMaps(N);
619 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
620 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
621 assert(N->use_empty() && "Cannot delete a node that is not dead!");
623 // Drop all of the operands and decrement used node's use counts.
629 void SelectionDAG::DeallocateNode(SDNode *N) {
630 if (N->OperandsNeedDelete)
631 delete[] N->OperandList;
633 // Set the opcode to DELETED_NODE to help catch bugs when node
634 // memory is reallocated.
635 N->NodeType = ISD::DELETED_NODE;
637 NodeAllocator.Deallocate(AllNodes.remove(N));
639 // Remove the ordering of this node.
642 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
643 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
644 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
645 DbgVals[i]->setIsInvalidated();
648 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
649 /// correspond to it. This is useful when we're about to delete or repurpose
650 /// the node. We don't want future request for structurally identical nodes
651 /// to return N anymore.
652 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
654 switch (N->getOpcode()) {
655 case ISD::HANDLENODE: return false; // noop.
657 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
658 "Cond code doesn't exist!");
659 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
660 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
662 case ISD::ExternalSymbol:
663 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
665 case ISD::TargetExternalSymbol: {
666 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
667 Erased = TargetExternalSymbols.erase(
668 std::pair<std::string,unsigned char>(ESN->getSymbol(),
669 ESN->getTargetFlags()));
672 case ISD::VALUETYPE: {
673 EVT VT = cast<VTSDNode>(N)->getVT();
674 if (VT.isExtended()) {
675 Erased = ExtendedValueTypeNodes.erase(VT);
677 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
678 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
683 // Remove it from the CSE Map.
684 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
685 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
686 Erased = CSEMap.RemoveNode(N);
690 // Verify that the node was actually in one of the CSE maps, unless it has a
691 // flag result (which cannot be CSE'd) or is one of the special cases that are
692 // not subject to CSE.
693 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
694 !N->isMachineOpcode() && !doNotCSE(N)) {
697 llvm_unreachable("Node is not in map!");
703 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
704 /// maps and modified in place. Add it back to the CSE maps, unless an identical
705 /// node already exists, in which case transfer all its users to the existing
706 /// node. This transfer can potentially trigger recursive merging.
709 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
710 // For node types that aren't CSE'd, just act as if no identical node
713 SDNode *Existing = CSEMap.GetOrInsertNode(N);
715 // If there was already an existing matching node, use ReplaceAllUsesWith
716 // to replace the dead one with the existing one. This can cause
717 // recursive merging of other unrelated nodes down the line.
718 ReplaceAllUsesWith(N, Existing);
720 // N is now dead. Inform the listeners and delete it.
721 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
722 DUL->NodeDeleted(N, Existing);
723 DeleteNodeNotInCSEMaps(N);
728 // If the node doesn't already exist, we updated it. Inform listeners.
729 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
733 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
734 /// were replaced with those specified. If this node is never memoized,
735 /// return null, otherwise return a pointer to the slot it would take. If a
736 /// node already exists with these operands, the slot will be non-null.
737 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
742 SDValue Ops[] = { Op };
744 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
745 AddNodeIDCustom(ID, N);
746 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
750 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
751 /// were replaced with those specified. If this node is never memoized,
752 /// return null, otherwise return a pointer to the slot it would take. If a
753 /// node already exists with these operands, the slot will be non-null.
754 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
755 SDValue Op1, SDValue Op2,
760 SDValue Ops[] = { Op1, Op2 };
762 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
763 AddNodeIDCustom(ID, N);
764 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
769 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
770 /// were replaced with those specified. If this node is never memoized,
771 /// return null, otherwise return a pointer to the slot it would take. If a
772 /// node already exists with these operands, the slot will be non-null.
773 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
774 const SDValue *Ops,unsigned NumOps,
780 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
781 AddNodeIDCustom(ID, N);
782 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
787 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
788 static void VerifyNodeCommon(SDNode *N) {
789 switch (N->getOpcode()) {
792 case ISD::BUILD_PAIR: {
793 EVT VT = N->getValueType(0);
794 assert(N->getNumValues() == 1 && "Too many results!");
795 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
796 "Wrong return type!");
797 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
798 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
799 "Mismatched operand types!");
800 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
801 "Wrong operand type!");
802 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
803 "Wrong return type size");
806 case ISD::BUILD_VECTOR: {
807 assert(N->getNumValues() == 1 && "Too many results!");
808 assert(N->getValueType(0).isVector() && "Wrong return type!");
809 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
810 "Wrong number of operands!");
811 EVT EltVT = N->getValueType(0).getVectorElementType();
812 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
813 assert((I->getValueType() == EltVT ||
814 (EltVT.isInteger() && I->getValueType().isInteger() &&
815 EltVT.bitsLE(I->getValueType()))) &&
816 "Wrong operand type!");
817 assert(I->getValueType() == N->getOperand(0).getValueType() &&
818 "Operands must all have the same type");
825 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
826 static void VerifySDNode(SDNode *N) {
827 // The SDNode allocators cannot be used to allocate nodes with fields that are
828 // not present in an SDNode!
829 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
830 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
831 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
832 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
833 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
834 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
835 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
836 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
837 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
838 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
839 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
840 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
841 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
842 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
843 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
844 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
845 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
846 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
847 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
852 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
854 static void VerifyMachineNode(SDNode *N) {
855 // The MachineNode allocators cannot be used to allocate nodes with fields
856 // that are not present in a MachineNode!
857 // Currently there are no such nodes.
863 /// getEVTAlignment - Compute the default alignment value for the
866 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
867 Type *Ty = VT == MVT::iPTR ?
868 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
869 VT.getTypeForEVT(*getContext());
871 return TLI.getDataLayout()->getABITypeAlignment(Ty);
874 // EntryNode could meaningfully have debug info if we can find it...
875 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
876 : TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()),
877 TTI(0), OptLevel(OL), EntryNode(ISD::EntryToken, DebugLoc(),
878 getVTList(MVT::Other)),
879 Root(getEntryNode()), Ordering(0), UpdateListeners(0) {
880 AllNodes.push_back(&EntryNode);
881 Ordering = new SDNodeOrdering();
882 DbgInfo = new SDDbgInfo();
885 void SelectionDAG::init(MachineFunction &mf, const TargetTransformInfo *tti) {
888 Context = &mf.getFunction()->getContext();
891 SelectionDAG::~SelectionDAG() {
892 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
898 void SelectionDAG::allnodes_clear() {
899 assert(&*AllNodes.begin() == &EntryNode);
900 AllNodes.remove(AllNodes.begin());
901 while (!AllNodes.empty())
902 DeallocateNode(AllNodes.begin());
905 void SelectionDAG::clear() {
907 OperandAllocator.Reset();
910 ExtendedValueTypeNodes.clear();
911 ExternalSymbols.clear();
912 TargetExternalSymbols.clear();
913 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
914 static_cast<CondCodeSDNode*>(0));
915 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
916 static_cast<SDNode*>(0));
918 EntryNode.UseList = 0;
919 AllNodes.push_back(&EntryNode);
920 Root = getEntryNode();
925 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
926 return VT.bitsGT(Op.getValueType()) ?
927 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
928 getNode(ISD::TRUNCATE, DL, VT, Op);
931 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
932 return VT.bitsGT(Op.getValueType()) ?
933 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
934 getNode(ISD::TRUNCATE, DL, VT, Op);
937 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
938 return VT.bitsGT(Op.getValueType()) ?
939 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
940 getNode(ISD::TRUNCATE, DL, VT, Op);
943 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, EVT VT) {
944 assert(!VT.isVector() &&
945 "getZeroExtendInReg should use the vector element type instead of "
947 if (Op.getValueType() == VT) return Op;
948 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
949 APInt Imm = APInt::getLowBitsSet(BitWidth,
951 return getNode(ISD::AND, DL, Op.getValueType(), Op,
952 getConstant(Imm, Op.getValueType()));
955 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
957 SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, EVT VT) {
958 EVT EltVT = VT.getScalarType();
960 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
961 return getNode(ISD::XOR, DL, VT, Val, NegOne);
964 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
965 EVT EltVT = VT.getScalarType();
966 assert((EltVT.getSizeInBits() >= 64 ||
967 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
968 "getConstant with a uint64_t value that doesn't fit in the type!");
969 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
972 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
973 return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
976 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
977 assert(VT.isInteger() && "Cannot create FP integer constant!");
979 EVT EltVT = VT.getScalarType();
980 const ConstantInt *Elt = &Val;
982 // In some cases the vector type is legal but the element type is illegal and
983 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
984 // inserted value (the type does not need to match the vector element type).
985 // Any extra bits introduced will be truncated away.
986 if (VT.isVector() && TLI.getTypeAction(*getContext(), EltVT) ==
987 TargetLowering::TypePromoteInteger) {
988 EltVT = TLI.getTypeToTransformTo(*getContext(), EltVT);
989 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
990 Elt = ConstantInt::get(*getContext(), NewVal);
993 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
994 "APInt size does not match type size!");
995 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
997 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1001 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1003 return SDValue(N, 0);
1006 N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT);
1007 CSEMap.InsertNode(N, IP);
1008 AllNodes.push_back(N);
1011 SDValue Result(N, 0);
1012 if (VT.isVector()) {
1013 SmallVector<SDValue, 8> Ops;
1014 Ops.assign(VT.getVectorNumElements(), Result);
1015 Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size());
1020 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1021 return getConstant(Val, TLI.getPointerTy(), isTarget);
1025 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1026 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1029 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1030 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1032 EVT EltVT = VT.getScalarType();
1034 // Do the map lookup using the actual bit pattern for the floating point
1035 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1036 // we don't have issues with SNANs.
1037 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1038 FoldingSetNodeID ID;
1039 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1043 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1045 return SDValue(N, 0);
1048 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1049 CSEMap.InsertNode(N, IP);
1050 AllNodes.push_back(N);
1053 SDValue Result(N, 0);
1054 if (VT.isVector()) {
1055 SmallVector<SDValue, 8> Ops;
1056 Ops.assign(VT.getVectorNumElements(), Result);
1057 // FIXME DebugLoc info might be appropriate here
1058 Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size());
1063 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1064 EVT EltVT = VT.getScalarType();
1065 if (EltVT==MVT::f32)
1066 return getConstantFP(APFloat((float)Val), VT, isTarget);
1067 else if (EltVT==MVT::f64)
1068 return getConstantFP(APFloat(Val), VT, isTarget);
1069 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1072 APFloat apf = APFloat(Val);
1073 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1075 return getConstantFP(apf, VT, isTarget);
1077 llvm_unreachable("Unsupported type in getConstantFP");
1080 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL,
1081 EVT VT, int64_t Offset,
1083 unsigned char TargetFlags) {
1084 assert((TargetFlags == 0 || isTargetGA) &&
1085 "Cannot set target flags on target-independent globals");
1087 // Truncate (with sign-extension) the offset value to the pointer size.
1088 unsigned BitWidth = TLI.getPointerTy().getSizeInBits();
1090 Offset = SignExtend64(Offset, BitWidth);
1092 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1094 // If GV is an alias then use the aliasee for determining thread-localness.
1095 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1096 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
1100 if (GVar && GVar->isThreadLocal())
1101 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1103 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1105 FoldingSetNodeID ID;
1106 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1108 ID.AddInteger(Offset);
1109 ID.AddInteger(TargetFlags);
1110 ID.AddInteger(GV->getType()->getAddressSpace());
1112 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1113 return SDValue(E, 0);
1115 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL, GV, VT,
1116 Offset, TargetFlags);
1117 CSEMap.InsertNode(N, IP);
1118 AllNodes.push_back(N);
1119 return SDValue(N, 0);
1122 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1123 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1124 FoldingSetNodeID ID;
1125 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1128 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1129 return SDValue(E, 0);
1131 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1132 CSEMap.InsertNode(N, IP);
1133 AllNodes.push_back(N);
1134 return SDValue(N, 0);
1137 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1138 unsigned char TargetFlags) {
1139 assert((TargetFlags == 0 || isTarget) &&
1140 "Cannot set target flags on target-independent jump tables");
1141 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1142 FoldingSetNodeID ID;
1143 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1145 ID.AddInteger(TargetFlags);
1147 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1148 return SDValue(E, 0);
1150 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1152 CSEMap.InsertNode(N, IP);
1153 AllNodes.push_back(N);
1154 return SDValue(N, 0);
1157 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1158 unsigned Alignment, int Offset,
1160 unsigned char TargetFlags) {
1161 assert((TargetFlags == 0 || isTarget) &&
1162 "Cannot set target flags on target-independent globals");
1164 Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
1165 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1166 FoldingSetNodeID ID;
1167 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1168 ID.AddInteger(Alignment);
1169 ID.AddInteger(Offset);
1171 ID.AddInteger(TargetFlags);
1173 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1174 return SDValue(E, 0);
1176 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1177 Alignment, TargetFlags);
1178 CSEMap.InsertNode(N, IP);
1179 AllNodes.push_back(N);
1180 return SDValue(N, 0);
1184 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1185 unsigned Alignment, int Offset,
1187 unsigned char TargetFlags) {
1188 assert((TargetFlags == 0 || isTarget) &&
1189 "Cannot set target flags on target-independent globals");
1191 Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
1192 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1193 FoldingSetNodeID ID;
1194 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1195 ID.AddInteger(Alignment);
1196 ID.AddInteger(Offset);
1197 C->addSelectionDAGCSEId(ID);
1198 ID.AddInteger(TargetFlags);
1200 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1201 return SDValue(E, 0);
1203 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1204 Alignment, TargetFlags);
1205 CSEMap.InsertNode(N, IP);
1206 AllNodes.push_back(N);
1207 return SDValue(N, 0);
1210 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1211 unsigned char TargetFlags) {
1212 FoldingSetNodeID ID;
1213 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
1214 ID.AddInteger(Index);
1215 ID.AddInteger(Offset);
1216 ID.AddInteger(TargetFlags);
1218 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1219 return SDValue(E, 0);
1221 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1223 CSEMap.InsertNode(N, IP);
1224 AllNodes.push_back(N);
1225 return SDValue(N, 0);
1228 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1229 FoldingSetNodeID ID;
1230 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1233 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1234 return SDValue(E, 0);
1236 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1237 CSEMap.InsertNode(N, IP);
1238 AllNodes.push_back(N);
1239 return SDValue(N, 0);
1242 SDValue SelectionDAG::getValueType(EVT VT) {
1243 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1244 ValueTypeNodes.size())
1245 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1247 SDNode *&N = VT.isExtended() ?
1248 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1250 if (N) return SDValue(N, 0);
1251 N = new (NodeAllocator) VTSDNode(VT);
1252 AllNodes.push_back(N);
1253 return SDValue(N, 0);
1256 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1257 SDNode *&N = ExternalSymbols[Sym];
1258 if (N) return SDValue(N, 0);
1259 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1260 AllNodes.push_back(N);
1261 return SDValue(N, 0);
1264 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1265 unsigned char TargetFlags) {
1267 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1269 if (N) return SDValue(N, 0);
1270 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1271 AllNodes.push_back(N);
1272 return SDValue(N, 0);
1275 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1276 if ((unsigned)Cond >= CondCodeNodes.size())
1277 CondCodeNodes.resize(Cond+1);
1279 if (CondCodeNodes[Cond] == 0) {
1280 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1281 CondCodeNodes[Cond] = N;
1282 AllNodes.push_back(N);
1285 return SDValue(CondCodeNodes[Cond], 0);
1288 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1289 // the shuffle mask M that point at N1 to point at N2, and indices that point
1290 // N2 to point at N1.
1291 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1293 int NElts = M.size();
1294 for (int i = 0; i != NElts; ++i) {
1302 SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1,
1303 SDValue N2, const int *Mask) {
1304 assert(N1.getValueType() == N2.getValueType() && "Invalid VECTOR_SHUFFLE");
1305 assert(VT.isVector() && N1.getValueType().isVector() &&
1306 "Vector Shuffle VTs must be a vectors");
1307 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType()
1308 && "Vector Shuffle VTs must have same element type");
1310 // Canonicalize shuffle undef, undef -> undef
1311 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1312 return getUNDEF(VT);
1314 // Validate that all indices in Mask are within the range of the elements
1315 // input to the shuffle.
1316 unsigned NElts = VT.getVectorNumElements();
1317 SmallVector<int, 8> MaskVec;
1318 for (unsigned i = 0; i != NElts; ++i) {
1319 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1320 MaskVec.push_back(Mask[i]);
1323 // Canonicalize shuffle v, v -> v, undef
1326 for (unsigned i = 0; i != NElts; ++i)
1327 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1330 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1331 if (N1.getOpcode() == ISD::UNDEF)
1332 commuteShuffle(N1, N2, MaskVec);
1334 // Canonicalize all index into lhs, -> shuffle lhs, undef
1335 // Canonicalize all index into rhs, -> shuffle rhs, undef
1336 bool AllLHS = true, AllRHS = true;
1337 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1338 for (unsigned i = 0; i != NElts; ++i) {
1339 if (MaskVec[i] >= (int)NElts) {
1344 } else if (MaskVec[i] >= 0) {
1348 if (AllLHS && AllRHS)
1349 return getUNDEF(VT);
1350 if (AllLHS && !N2Undef)
1354 commuteShuffle(N1, N2, MaskVec);
1357 // If Identity shuffle, or all shuffle in to undef, return that node.
1358 bool AllUndef = true;
1359 bool Identity = true;
1360 for (unsigned i = 0; i != NElts; ++i) {
1361 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1362 if (MaskVec[i] >= 0) AllUndef = false;
1364 if (Identity && NElts == N1.getValueType().getVectorNumElements())
1367 return getUNDEF(VT);
1369 FoldingSetNodeID ID;
1370 SDValue Ops[2] = { N1, N2 };
1371 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1372 for (unsigned i = 0; i != NElts; ++i)
1373 ID.AddInteger(MaskVec[i]);
1376 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1377 return SDValue(E, 0);
1379 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1380 // SDNode doesn't have access to it. This memory will be "leaked" when
1381 // the node is deallocated, but recovered when the NodeAllocator is released.
1382 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1383 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1385 ShuffleVectorSDNode *N =
1386 new (NodeAllocator) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc);
1387 CSEMap.InsertNode(N, IP);
1388 AllNodes.push_back(N);
1389 return SDValue(N, 0);
1392 SDValue SelectionDAG::getConvertRndSat(EVT VT, DebugLoc dl,
1393 SDValue Val, SDValue DTy,
1394 SDValue STy, SDValue Rnd, SDValue Sat,
1395 ISD::CvtCode Code) {
1396 // If the src and dest types are the same and the conversion is between
1397 // integer types of the same sign or two floats, no conversion is necessary.
1399 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1402 FoldingSetNodeID ID;
1403 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1404 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1406 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1407 return SDValue(E, 0);
1409 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl, Ops, 5,
1411 CSEMap.InsertNode(N, IP);
1412 AllNodes.push_back(N);
1413 return SDValue(N, 0);
1416 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1417 FoldingSetNodeID ID;
1418 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1419 ID.AddInteger(RegNo);
1421 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1422 return SDValue(E, 0);
1424 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1425 CSEMap.InsertNode(N, IP);
1426 AllNodes.push_back(N);
1427 return SDValue(N, 0);
1430 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1431 FoldingSetNodeID ID;
1432 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
1433 ID.AddPointer(RegMask);
1435 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1436 return SDValue(E, 0);
1438 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1439 CSEMap.InsertNode(N, IP);
1440 AllNodes.push_back(N);
1441 return SDValue(N, 0);
1444 SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
1445 FoldingSetNodeID ID;
1446 SDValue Ops[] = { Root };
1447 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1448 ID.AddPointer(Label);
1450 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1451 return SDValue(E, 0);
1453 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label);
1454 CSEMap.InsertNode(N, IP);
1455 AllNodes.push_back(N);
1456 return SDValue(N, 0);
1460 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1463 unsigned char TargetFlags) {
1464 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1466 FoldingSetNodeID ID;
1467 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1469 ID.AddInteger(Offset);
1470 ID.AddInteger(TargetFlags);
1472 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1473 return SDValue(E, 0);
1475 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1477 CSEMap.InsertNode(N, IP);
1478 AllNodes.push_back(N);
1479 return SDValue(N, 0);
1482 SDValue SelectionDAG::getSrcValue(const Value *V) {
1483 assert((!V || V->getType()->isPointerTy()) &&
1484 "SrcValue is not a pointer?");
1486 FoldingSetNodeID ID;
1487 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1491 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1492 return SDValue(E, 0);
1494 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1495 CSEMap.InsertNode(N, IP);
1496 AllNodes.push_back(N);
1497 return SDValue(N, 0);
1500 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1501 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1502 FoldingSetNodeID ID;
1503 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
1507 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1508 return SDValue(E, 0);
1510 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1511 CSEMap.InsertNode(N, IP);
1512 AllNodes.push_back(N);
1513 return SDValue(N, 0);
1517 /// getShiftAmountOperand - Return the specified value casted to
1518 /// the target's desired shift amount type.
1519 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1520 EVT OpTy = Op.getValueType();
1521 MVT ShTy = TLI.getShiftAmountTy(LHSTy);
1522 if (OpTy == ShTy || OpTy.isVector()) return Op;
1524 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1525 return getNode(Opcode, Op.getDebugLoc(), ShTy, Op);
1528 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1529 /// specified value type.
1530 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1531 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1532 unsigned ByteSize = VT.getStoreSize();
1533 Type *Ty = VT.getTypeForEVT(*getContext());
1534 unsigned StackAlign =
1535 std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1537 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1538 return getFrameIndex(FrameIdx, TLI.getPointerTy());
1541 /// CreateStackTemporary - Create a stack temporary suitable for holding
1542 /// either of the specified value types.
1543 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1544 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1545 VT2.getStoreSizeInBits())/8;
1546 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1547 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1548 const DataLayout *TD = TLI.getDataLayout();
1549 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1550 TD->getPrefTypeAlignment(Ty2));
1552 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1553 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1554 return getFrameIndex(FrameIdx, TLI.getPointerTy());
1557 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1558 SDValue N2, ISD::CondCode Cond, DebugLoc dl) {
1559 // These setcc operations always fold.
1563 case ISD::SETFALSE2: return getConstant(0, VT);
1565 case ISD::SETTRUE2: return getConstant(1, VT);
1577 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1581 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1582 const APInt &C2 = N2C->getAPIntValue();
1583 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1584 const APInt &C1 = N1C->getAPIntValue();
1587 default: llvm_unreachable("Unknown integer setcc!");
1588 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1589 case ISD::SETNE: return getConstant(C1 != C2, VT);
1590 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1591 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1592 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1593 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1594 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1595 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1596 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1597 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1601 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1602 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1603 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1606 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1607 return getUNDEF(VT);
1609 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1610 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1611 return getUNDEF(VT);
1613 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1614 R==APFloat::cmpLessThan, VT);
1615 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1616 return getUNDEF(VT);
1618 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1619 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1620 return getUNDEF(VT);
1622 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1623 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1624 return getUNDEF(VT);
1626 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1627 R==APFloat::cmpEqual, VT);
1628 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1629 return getUNDEF(VT);
1631 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1632 R==APFloat::cmpEqual, VT);
1633 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1634 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1635 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1636 R==APFloat::cmpEqual, VT);
1637 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1638 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1639 R==APFloat::cmpLessThan, VT);
1640 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1641 R==APFloat::cmpUnordered, VT);
1642 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1643 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1646 // Ensure that the constant occurs on the RHS.
1647 return getSetCC(dl, VT, N2, N1, ISD::getSetCCSwappedOperands(Cond));
1651 // Could not fold it.
1655 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1656 /// use this predicate to simplify operations downstream.
1657 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1658 // This predicate is not safe for vector operations.
1659 if (Op.getValueType().isVector())
1662 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1663 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1666 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1667 /// this predicate to simplify operations downstream. Mask is known to be zero
1668 /// for bits that V cannot have.
1669 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1670 unsigned Depth) const {
1671 APInt KnownZero, KnownOne;
1672 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1673 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1674 return (KnownZero & Mask) == Mask;
1677 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
1678 /// known to be either zero or one and return them in the KnownZero/KnownOne
1679 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1681 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1682 APInt &KnownOne, unsigned Depth) const {
1683 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1685 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1687 return; // Limit search depth.
1689 APInt KnownZero2, KnownOne2;
1691 switch (Op.getOpcode()) {
1693 // We know all of the bits for a constant!
1694 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1695 KnownZero = ~KnownOne;
1698 // If either the LHS or the RHS are Zero, the result is zero.
1699 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1700 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1701 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1702 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1704 // Output known-1 bits are only known if set in both the LHS & RHS.
1705 KnownOne &= KnownOne2;
1706 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1707 KnownZero |= KnownZero2;
1710 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1711 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1712 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1713 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1715 // Output known-0 bits are only known if clear in both the LHS & RHS.
1716 KnownZero &= KnownZero2;
1717 // Output known-1 are known to be set if set in either the LHS | RHS.
1718 KnownOne |= KnownOne2;
1721 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1722 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1723 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1724 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1726 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1727 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1728 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1729 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1730 KnownZero = KnownZeroOut;
1734 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1735 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1736 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1737 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1739 // If low bits are zero in either operand, output low known-0 bits.
1740 // Also compute a conserative estimate for high known-0 bits.
1741 // More trickiness is possible, but this is sufficient for the
1742 // interesting case of alignment computation.
1743 KnownOne.clearAllBits();
1744 unsigned TrailZ = KnownZero.countTrailingOnes() +
1745 KnownZero2.countTrailingOnes();
1746 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1747 KnownZero2.countLeadingOnes(),
1748 BitWidth) - BitWidth;
1750 TrailZ = std::min(TrailZ, BitWidth);
1751 LeadZ = std::min(LeadZ, BitWidth);
1752 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1753 APInt::getHighBitsSet(BitWidth, LeadZ);
1757 // For the purposes of computing leading zeros we can conservatively
1758 // treat a udiv as a logical right shift by the power of 2 known to
1759 // be less than the denominator.
1760 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1761 unsigned LeadZ = KnownZero2.countLeadingOnes();
1763 KnownOne2.clearAllBits();
1764 KnownZero2.clearAllBits();
1765 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1766 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1767 if (RHSUnknownLeadingOnes != BitWidth)
1768 LeadZ = std::min(BitWidth,
1769 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1771 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1775 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1776 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1777 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1778 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1780 // Only known if known in both the LHS and RHS.
1781 KnownOne &= KnownOne2;
1782 KnownZero &= KnownZero2;
1784 case ISD::SELECT_CC:
1785 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1786 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1787 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1788 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1790 // Only known if known in both the LHS and RHS.
1791 KnownOne &= KnownOne2;
1792 KnownZero &= KnownZero2;
1800 if (Op.getResNo() != 1)
1802 // The boolean result conforms to getBooleanContents. Fall through.
1804 // If we know the result of a setcc has the top bits zero, use this info.
1805 if (TLI.getBooleanContents(Op.getValueType().isVector()) ==
1806 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1807 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1810 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1811 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1812 unsigned ShAmt = SA->getZExtValue();
1814 // If the shift count is an invalid immediate, don't do anything.
1815 if (ShAmt >= BitWidth)
1818 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1819 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1820 KnownZero <<= ShAmt;
1822 // low bits known zero.
1823 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1827 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1828 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1829 unsigned ShAmt = SA->getZExtValue();
1831 // If the shift count is an invalid immediate, don't do anything.
1832 if (ShAmt >= BitWidth)
1835 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1836 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1837 KnownZero = KnownZero.lshr(ShAmt);
1838 KnownOne = KnownOne.lshr(ShAmt);
1840 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1841 KnownZero |= HighBits; // High bits known zero.
1845 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1846 unsigned ShAmt = SA->getZExtValue();
1848 // If the shift count is an invalid immediate, don't do anything.
1849 if (ShAmt >= BitWidth)
1852 // If any of the demanded bits are produced by the sign extension, we also
1853 // demand the input sign bit.
1854 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1856 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1857 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1858 KnownZero = KnownZero.lshr(ShAmt);
1859 KnownOne = KnownOne.lshr(ShAmt);
1861 // Handle the sign bits.
1862 APInt SignBit = APInt::getSignBit(BitWidth);
1863 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1865 if (KnownZero.intersects(SignBit)) {
1866 KnownZero |= HighBits; // New bits are known zero.
1867 } else if (KnownOne.intersects(SignBit)) {
1868 KnownOne |= HighBits; // New bits are known one.
1872 case ISD::SIGN_EXTEND_INREG: {
1873 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1874 unsigned EBits = EVT.getScalarType().getSizeInBits();
1876 // Sign extension. Compute the demanded bits in the result that are not
1877 // present in the input.
1878 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1880 APInt InSignBit = APInt::getSignBit(EBits);
1881 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
1883 // If the sign extended bits are demanded, we know that the sign
1885 InSignBit = InSignBit.zext(BitWidth);
1886 if (NewBits.getBoolValue())
1887 InputDemandedBits |= InSignBit;
1889 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1890 KnownOne &= InputDemandedBits;
1891 KnownZero &= InputDemandedBits;
1892 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1894 // If the sign bit of the input is known set or clear, then we know the
1895 // top bits of the result.
1896 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
1897 KnownZero |= NewBits;
1898 KnownOne &= ~NewBits;
1899 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
1900 KnownOne |= NewBits;
1901 KnownZero &= ~NewBits;
1902 } else { // Input sign bit unknown
1903 KnownZero &= ~NewBits;
1904 KnownOne &= ~NewBits;
1909 case ISD::CTTZ_ZERO_UNDEF:
1911 case ISD::CTLZ_ZERO_UNDEF:
1913 unsigned LowBits = Log2_32(BitWidth)+1;
1914 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1915 KnownOne.clearAllBits();
1919 LoadSDNode *LD = cast<LoadSDNode>(Op);
1920 if (ISD::isZEXTLoad(Op.getNode())) {
1921 EVT VT = LD->getMemoryVT();
1922 unsigned MemBits = VT.getScalarType().getSizeInBits();
1923 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1924 } else if (const MDNode *Ranges = LD->getRanges()) {
1925 computeMaskedBitsLoad(*Ranges, KnownZero);
1929 case ISD::ZERO_EXTEND: {
1930 EVT InVT = Op.getOperand(0).getValueType();
1931 unsigned InBits = InVT.getScalarType().getSizeInBits();
1932 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1933 KnownZero = KnownZero.trunc(InBits);
1934 KnownOne = KnownOne.trunc(InBits);
1935 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1936 KnownZero = KnownZero.zext(BitWidth);
1937 KnownOne = KnownOne.zext(BitWidth);
1938 KnownZero |= NewBits;
1941 case ISD::SIGN_EXTEND: {
1942 EVT InVT = Op.getOperand(0).getValueType();
1943 unsigned InBits = InVT.getScalarType().getSizeInBits();
1944 APInt InSignBit = APInt::getSignBit(InBits);
1945 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1947 KnownZero = KnownZero.trunc(InBits);
1948 KnownOne = KnownOne.trunc(InBits);
1949 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1951 // Note if the sign bit is known to be zero or one.
1952 bool SignBitKnownZero = KnownZero.isNegative();
1953 bool SignBitKnownOne = KnownOne.isNegative();
1954 assert(!(SignBitKnownZero && SignBitKnownOne) &&
1955 "Sign bit can't be known to be both zero and one!");
1957 KnownZero = KnownZero.zext(BitWidth);
1958 KnownOne = KnownOne.zext(BitWidth);
1960 // If the sign bit is known zero or one, the top bits match.
1961 if (SignBitKnownZero)
1962 KnownZero |= NewBits;
1963 else if (SignBitKnownOne)
1964 KnownOne |= NewBits;
1967 case ISD::ANY_EXTEND: {
1968 EVT InVT = Op.getOperand(0).getValueType();
1969 unsigned InBits = InVT.getScalarType().getSizeInBits();
1970 KnownZero = KnownZero.trunc(InBits);
1971 KnownOne = KnownOne.trunc(InBits);
1972 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1973 KnownZero = KnownZero.zext(BitWidth);
1974 KnownOne = KnownOne.zext(BitWidth);
1977 case ISD::TRUNCATE: {
1978 EVT InVT = Op.getOperand(0).getValueType();
1979 unsigned InBits = InVT.getScalarType().getSizeInBits();
1980 KnownZero = KnownZero.zext(InBits);
1981 KnownOne = KnownOne.zext(InBits);
1982 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1983 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1984 KnownZero = KnownZero.trunc(BitWidth);
1985 KnownOne = KnownOne.trunc(BitWidth);
1988 case ISD::AssertZext: {
1989 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1990 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
1991 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1992 KnownZero |= (~InMask);
1993 KnownOne &= (~KnownZero);
1997 // All bits are zero except the low bit.
1998 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2002 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2003 // We know that the top bits of C-X are clear if X contains less bits
2004 // than C (i.e. no wrap-around can happen). For example, 20-X is
2005 // positive if we can prove that X is >= 0 and < 16.
2006 if (CLHS->getAPIntValue().isNonNegative()) {
2007 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2008 // NLZ can't be BitWidth with no sign bit
2009 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2010 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2012 // If all of the MaskV bits are known to be zero, then we know the
2013 // output top bits are zero, because we now know that the output is
2015 if ((KnownZero2 & MaskV) == MaskV) {
2016 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2017 // Top bits known zero.
2018 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2026 // Output known-0 bits are known if clear or set in both the low clear bits
2027 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2028 // low 3 bits clear.
2029 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2030 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2031 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2033 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2034 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2035 KnownZeroOut = std::min(KnownZeroOut,
2036 KnownZero2.countTrailingOnes());
2038 if (Op.getOpcode() == ISD::ADD) {
2039 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2043 // With ADDE, a carry bit may be added in, so we can only use this
2044 // information if we know (at least) that the low two bits are clear. We
2045 // then return to the caller that the low bit is unknown but that other bits
2047 if (KnownZeroOut >= 2) // ADDE
2048 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2052 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2053 const APInt &RA = Rem->getAPIntValue().abs();
2054 if (RA.isPowerOf2()) {
2055 APInt LowBits = RA - 1;
2056 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
2057 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2059 // The low bits of the first operand are unchanged by the srem.
2060 KnownZero = KnownZero2 & LowBits;
2061 KnownOne = KnownOne2 & LowBits;
2063 // If the first operand is non-negative or has all low bits zero, then
2064 // the upper bits are all zero.
2065 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2066 KnownZero |= ~LowBits;
2068 // If the first operand is negative and not all low bits are zero, then
2069 // the upper bits are all one.
2070 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2071 KnownOne |= ~LowBits;
2072 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2077 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2078 const APInt &RA = Rem->getAPIntValue();
2079 if (RA.isPowerOf2()) {
2080 APInt LowBits = (RA - 1);
2081 KnownZero |= ~LowBits;
2082 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2083 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2088 // Since the result is less than or equal to either operand, any leading
2089 // zero bits in either operand must also exist in the result.
2090 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2091 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2093 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2094 KnownZero2.countLeadingOnes());
2095 KnownOne.clearAllBits();
2096 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2099 case ISD::FrameIndex:
2100 case ISD::TargetFrameIndex:
2101 if (unsigned Align = InferPtrAlignment(Op)) {
2102 // The low bits are known zero if the pointer is aligned.
2103 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2109 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2112 case ISD::INTRINSIC_WO_CHAIN:
2113 case ISD::INTRINSIC_W_CHAIN:
2114 case ISD::INTRINSIC_VOID:
2115 // Allow the target to implement this method for its nodes.
2116 TLI.computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2121 /// ComputeNumSignBits - Return the number of times the sign bit of the
2122 /// register is replicated into the other bits. We know that at least 1 bit
2123 /// is always equal to the sign bit (itself), but other cases can give us
2124 /// information. For example, immediately after an "SRA X, 2", we know that
2125 /// the top 3 bits are all equal to each other, so we return 3.
2126 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2127 EVT VT = Op.getValueType();
2128 assert(VT.isInteger() && "Invalid VT!");
2129 unsigned VTBits = VT.getScalarType().getSizeInBits();
2131 unsigned FirstAnswer = 1;
2134 return 1; // Limit search depth.
2136 switch (Op.getOpcode()) {
2138 case ISD::AssertSext:
2139 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2140 return VTBits-Tmp+1;
2141 case ISD::AssertZext:
2142 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2145 case ISD::Constant: {
2146 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2147 return Val.getNumSignBits();
2150 case ISD::SIGN_EXTEND:
2151 Tmp = VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2152 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2154 case ISD::SIGN_EXTEND_INREG:
2155 // Max of the input and what this extends.
2157 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2160 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2161 return std::max(Tmp, Tmp2);
2164 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2165 // SRA X, C -> adds C sign bits.
2166 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2167 Tmp += C->getZExtValue();
2168 if (Tmp > VTBits) Tmp = VTBits;
2172 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2173 // shl destroys sign bits.
2174 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2175 if (C->getZExtValue() >= VTBits || // Bad shift.
2176 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2177 return Tmp - C->getZExtValue();
2182 case ISD::XOR: // NOT is handled here.
2183 // Logical binary ops preserve the number of sign bits at the worst.
2184 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2186 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2187 FirstAnswer = std::min(Tmp, Tmp2);
2188 // We computed what we know about the sign bits as our first
2189 // answer. Now proceed to the generic code that uses
2190 // ComputeMaskedBits, and pick whichever answer is better.
2195 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2196 if (Tmp == 1) return 1; // Early out.
2197 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2198 return std::min(Tmp, Tmp2);
2206 if (Op.getResNo() != 1)
2208 // The boolean result conforms to getBooleanContents. Fall through.
2210 // If setcc returns 0/-1, all bits are sign bits.
2211 if (TLI.getBooleanContents(Op.getValueType().isVector()) ==
2212 TargetLowering::ZeroOrNegativeOneBooleanContent)
2217 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2218 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2220 // Handle rotate right by N like a rotate left by 32-N.
2221 if (Op.getOpcode() == ISD::ROTR)
2222 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2224 // If we aren't rotating out all of the known-in sign bits, return the
2225 // number that are left. This handles rotl(sext(x), 1) for example.
2226 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2227 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2231 // Add can have at most one carry bit. Thus we know that the output
2232 // is, at worst, one more bit than the inputs.
2233 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2234 if (Tmp == 1) return 1; // Early out.
2236 // Special case decrementing a value (ADD X, -1):
2237 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2238 if (CRHS->isAllOnesValue()) {
2239 APInt KnownZero, KnownOne;
2240 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2242 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2244 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2247 // If we are subtracting one from a positive number, there is no carry
2248 // out of the result.
2249 if (KnownZero.isNegative())
2253 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2254 if (Tmp2 == 1) return 1;
2255 return std::min(Tmp, Tmp2)-1;
2258 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2259 if (Tmp2 == 1) return 1;
2262 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2263 if (CLHS->isNullValue()) {
2264 APInt KnownZero, KnownOne;
2265 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2266 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2268 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2271 // If the input is known to be positive (the sign bit is known clear),
2272 // the output of the NEG has the same number of sign bits as the input.
2273 if (KnownZero.isNegative())
2276 // Otherwise, we treat this like a SUB.
2279 // Sub can have at most one carry bit. Thus we know that the output
2280 // is, at worst, one more bit than the inputs.
2281 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2282 if (Tmp == 1) return 1; // Early out.
2283 return std::min(Tmp, Tmp2)-1;
2285 // FIXME: it's tricky to do anything useful for this, but it is an important
2286 // case for targets like X86.
2290 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2291 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2292 unsigned ExtType = LD->getExtensionType();
2295 case ISD::SEXTLOAD: // '17' bits known
2296 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2297 return VTBits-Tmp+1;
2298 case ISD::ZEXTLOAD: // '16' bits known
2299 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2304 // Allow the target to implement this method for its nodes.
2305 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2306 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2307 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2308 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2309 unsigned NumBits = TLI.ComputeNumSignBitsForTargetNode(Op, Depth);
2310 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2313 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2314 // use this information.
2315 APInt KnownZero, KnownOne;
2316 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2319 if (KnownZero.isNegative()) { // sign bit is 0
2321 } else if (KnownOne.isNegative()) { // sign bit is 1;
2328 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2329 // the number of identical bits in the top of the input value.
2331 Mask <<= Mask.getBitWidth()-VTBits;
2332 // Return # leading zeros. We use 'min' here in case Val was zero before
2333 // shifting. We don't want to return '64' as for an i32 "0".
2334 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2337 /// isBaseWithConstantOffset - Return true if the specified operand is an
2338 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2339 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2340 /// semantics as an ADD. This handles the equivalence:
2341 /// X|Cst == X+Cst iff X&Cst = 0.
2342 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2343 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2344 !isa<ConstantSDNode>(Op.getOperand(1)))
2347 if (Op.getOpcode() == ISD::OR &&
2348 !MaskedValueIsZero(Op.getOperand(0),
2349 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2356 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2357 // If we're told that NaNs won't happen, assume they won't.
2358 if (getTarget().Options.NoNaNsFPMath)
2361 // If the value is a constant, we can obviously see if it is a NaN or not.
2362 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2363 return !C->getValueAPF().isNaN();
2365 // TODO: Recognize more cases here.
2370 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2371 // If the value is a constant, we can obviously see if it is a zero or not.
2372 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2373 return !C->isZero();
2375 // TODO: Recognize more cases here.
2376 switch (Op.getOpcode()) {
2379 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2380 return !C->isNullValue();
2387 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2388 // Check the obvious case.
2389 if (A == B) return true;
2391 // For for negative and positive zero.
2392 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2393 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2394 if (CA->isZero() && CB->isZero()) return true;
2396 // Otherwise they may not be equal.
2400 /// getNode - Gets or creates the specified node.
2402 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) {
2403 FoldingSetNodeID ID;
2404 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2406 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2407 return SDValue(E, 0);
2409 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL, getVTList(VT));
2410 CSEMap.InsertNode(N, IP);
2412 AllNodes.push_back(N);
2416 return SDValue(N, 0);
2419 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
2420 EVT VT, SDValue Operand) {
2421 // Constant fold unary operations with an integer constant operand.
2422 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2423 const APInt &Val = C->getAPIntValue();
2426 case ISD::SIGN_EXTEND:
2427 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
2428 case ISD::ANY_EXTEND:
2429 case ISD::ZERO_EXTEND:
2431 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
2432 case ISD::UINT_TO_FP:
2433 case ISD::SINT_TO_FP: {
2434 APFloat apf(EVTToAPFloatSemantics(VT),
2435 APInt::getNullValue(VT.getSizeInBits()));
2436 (void)apf.convertFromAPInt(Val,
2437 Opcode==ISD::SINT_TO_FP,
2438 APFloat::rmNearestTiesToEven);
2439 return getConstantFP(apf, VT);
2442 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2443 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2444 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2445 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2448 return getConstant(Val.byteSwap(), VT);
2450 return getConstant(Val.countPopulation(), VT);
2452 case ISD::CTLZ_ZERO_UNDEF:
2453 return getConstant(Val.countLeadingZeros(), VT);
2455 case ISD::CTTZ_ZERO_UNDEF:
2456 return getConstant(Val.countTrailingZeros(), VT);
2460 // Constant fold unary operations with a floating point constant operand.
2461 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2462 APFloat V = C->getValueAPF(); // make copy
2466 return getConstantFP(V, VT);
2469 return getConstantFP(V, VT);
2471 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2472 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2473 return getConstantFP(V, VT);
2477 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2478 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2479 return getConstantFP(V, VT);
2483 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2484 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2485 return getConstantFP(V, VT);
2488 case ISD::FP_EXTEND: {
2490 // This can return overflow, underflow, or inexact; we don't care.
2491 // FIXME need to be more flexible about rounding mode.
2492 (void)V.convert(EVTToAPFloatSemantics(VT),
2493 APFloat::rmNearestTiesToEven, &ignored);
2494 return getConstantFP(V, VT);
2496 case ISD::FP_TO_SINT:
2497 case ISD::FP_TO_UINT: {
2500 assert(integerPartWidth >= 64);
2501 // FIXME need to be more flexible about rounding mode.
2502 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2503 Opcode==ISD::FP_TO_SINT,
2504 APFloat::rmTowardZero, &ignored);
2505 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2507 APInt api(VT.getSizeInBits(), x);
2508 return getConstant(api, VT);
2511 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2512 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2513 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2514 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2519 unsigned OpOpcode = Operand.getNode()->getOpcode();
2521 case ISD::TokenFactor:
2522 case ISD::MERGE_VALUES:
2523 case ISD::CONCAT_VECTORS:
2524 return Operand; // Factor, merge or concat of one node? No need.
2525 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2526 case ISD::FP_EXTEND:
2527 assert(VT.isFloatingPoint() &&
2528 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2529 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2530 assert((!VT.isVector() ||
2531 VT.getVectorNumElements() ==
2532 Operand.getValueType().getVectorNumElements()) &&
2533 "Vector element count mismatch!");
2534 if (Operand.getOpcode() == ISD::UNDEF)
2535 return getUNDEF(VT);
2537 case ISD::SIGN_EXTEND:
2538 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2539 "Invalid SIGN_EXTEND!");
2540 if (Operand.getValueType() == VT) return Operand; // noop extension
2541 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2542 "Invalid sext node, dst < src!");
2543 assert((!VT.isVector() ||
2544 VT.getVectorNumElements() ==
2545 Operand.getValueType().getVectorNumElements()) &&
2546 "Vector element count mismatch!");
2547 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2548 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2549 else if (OpOpcode == ISD::UNDEF)
2550 // sext(undef) = 0, because the top bits will all be the same.
2551 return getConstant(0, VT);
2553 case ISD::ZERO_EXTEND:
2554 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2555 "Invalid ZERO_EXTEND!");
2556 if (Operand.getValueType() == VT) return Operand; // noop extension
2557 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2558 "Invalid zext node, dst < src!");
2559 assert((!VT.isVector() ||
2560 VT.getVectorNumElements() ==
2561 Operand.getValueType().getVectorNumElements()) &&
2562 "Vector element count mismatch!");
2563 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2564 return getNode(ISD::ZERO_EXTEND, DL, VT,
2565 Operand.getNode()->getOperand(0));
2566 else if (OpOpcode == ISD::UNDEF)
2567 // zext(undef) = 0, because the top bits will be zero.
2568 return getConstant(0, VT);
2570 case ISD::ANY_EXTEND:
2571 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2572 "Invalid ANY_EXTEND!");
2573 if (Operand.getValueType() == VT) return Operand; // noop extension
2574 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2575 "Invalid anyext node, dst < src!");
2576 assert((!VT.isVector() ||
2577 VT.getVectorNumElements() ==
2578 Operand.getValueType().getVectorNumElements()) &&
2579 "Vector element count mismatch!");
2581 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2582 OpOpcode == ISD::ANY_EXTEND)
2583 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2584 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2585 else if (OpOpcode == ISD::UNDEF)
2586 return getUNDEF(VT);
2588 // (ext (trunx x)) -> x
2589 if (OpOpcode == ISD::TRUNCATE) {
2590 SDValue OpOp = Operand.getNode()->getOperand(0);
2591 if (OpOp.getValueType() == VT)
2596 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2597 "Invalid TRUNCATE!");
2598 if (Operand.getValueType() == VT) return Operand; // noop truncate
2599 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2600 "Invalid truncate node, src < dst!");
2601 assert((!VT.isVector() ||
2602 VT.getVectorNumElements() ==
2603 Operand.getValueType().getVectorNumElements()) &&
2604 "Vector element count mismatch!");
2605 if (OpOpcode == ISD::TRUNCATE)
2606 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2607 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2608 OpOpcode == ISD::ANY_EXTEND) {
2609 // If the source is smaller than the dest, we still need an extend.
2610 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2611 .bitsLT(VT.getScalarType()))
2612 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2613 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2614 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2615 return Operand.getNode()->getOperand(0);
2617 if (OpOpcode == ISD::UNDEF)
2618 return getUNDEF(VT);
2621 // Basic sanity checking.
2622 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2623 && "Cannot BITCAST between types of different sizes!");
2624 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2625 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2626 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2627 if (OpOpcode == ISD::UNDEF)
2628 return getUNDEF(VT);
2630 case ISD::SCALAR_TO_VECTOR:
2631 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2632 (VT.getVectorElementType() == Operand.getValueType() ||
2633 (VT.getVectorElementType().isInteger() &&
2634 Operand.getValueType().isInteger() &&
2635 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2636 "Illegal SCALAR_TO_VECTOR node!");
2637 if (OpOpcode == ISD::UNDEF)
2638 return getUNDEF(VT);
2639 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2640 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2641 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2642 Operand.getConstantOperandVal(1) == 0 &&
2643 Operand.getOperand(0).getValueType() == VT)
2644 return Operand.getOperand(0);
2647 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2648 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2649 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2650 Operand.getNode()->getOperand(0));
2651 if (OpOpcode == ISD::FNEG) // --X -> X
2652 return Operand.getNode()->getOperand(0);
2655 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2656 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2661 SDVTList VTs = getVTList(VT);
2662 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2663 FoldingSetNodeID ID;
2664 SDValue Ops[1] = { Operand };
2665 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2667 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2668 return SDValue(E, 0);
2670 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand);
2671 CSEMap.InsertNode(N, IP);
2673 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand);
2676 AllNodes.push_back(N);
2680 return SDValue(N, 0);
2683 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2684 SDNode *Cst1, SDNode *Cst2) {
2685 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2686 SmallVector<SDValue, 4> Outputs;
2687 EVT SVT = VT.getScalarType();
2689 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2690 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2691 if (Scalar1 && Scalar2) {
2692 // Scalar instruction.
2693 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2695 // For vectors extract each constant element into Inputs so we can constant
2696 // fold them individually.
2697 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2698 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2702 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2704 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2705 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2706 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2707 if (!V1 || !V2) // Not a constant, bail.
2710 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2711 // FIXME: This is valid and could be handled by truncating the APInts.
2712 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2715 Inputs.push_back(std::make_pair(V1, V2));
2719 // We have a number of constant values, constant fold them element by element.
2720 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2721 const APInt &C1 = Inputs[I].first->getAPIntValue();
2722 const APInt &C2 = Inputs[I].second->getAPIntValue();
2726 Outputs.push_back(getConstant(C1 + C2, SVT));
2729 Outputs.push_back(getConstant(C1 - C2, SVT));
2732 Outputs.push_back(getConstant(C1 * C2, SVT));
2735 if (!C2.getBoolValue())
2737 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2740 if (!C2.getBoolValue())
2742 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2745 if (!C2.getBoolValue())
2747 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2750 if (!C2.getBoolValue())
2752 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2755 Outputs.push_back(getConstant(C1 & C2, SVT));
2758 Outputs.push_back(getConstant(C1 | C2, SVT));
2761 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2764 Outputs.push_back(getConstant(C1 << C2, SVT));
2767 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2770 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2773 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2776 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2783 // Handle the scalar case first.
2784 if (Outputs.size() == 1)
2785 return Outputs.back();
2787 // Otherwise build a big vector out of the scalar elements we generated.
2788 return getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, Outputs.data(),
2792 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, SDValue N1,
2794 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2795 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2798 case ISD::TokenFactor:
2799 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2800 N2.getValueType() == MVT::Other && "Invalid token factor!");
2801 // Fold trivial token factors.
2802 if (N1.getOpcode() == ISD::EntryToken) return N2;
2803 if (N2.getOpcode() == ISD::EntryToken) return N1;
2804 if (N1 == N2) return N1;
2806 case ISD::CONCAT_VECTORS:
2807 // Concat of UNDEFs is UNDEF.
2808 if (N1.getOpcode() == ISD::UNDEF &&
2809 N2.getOpcode() == ISD::UNDEF)
2810 return getUNDEF(VT);
2812 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2813 // one big BUILD_VECTOR.
2814 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2815 N2.getOpcode() == ISD::BUILD_VECTOR) {
2816 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2817 N1.getNode()->op_end());
2818 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2819 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2823 assert(VT.isInteger() && "This operator does not apply to FP types!");
2824 assert(N1.getValueType() == N2.getValueType() &&
2825 N1.getValueType() == VT && "Binary operator types must match!");
2826 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2827 // worth handling here.
2828 if (N2C && N2C->isNullValue())
2830 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2837 assert(VT.isInteger() && "This operator does not apply to FP types!");
2838 assert(N1.getValueType() == N2.getValueType() &&
2839 N1.getValueType() == VT && "Binary operator types must match!");
2840 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2841 // it's worth handling here.
2842 if (N2C && N2C->isNullValue())
2852 assert(VT.isInteger() && "This operator does not apply to FP types!");
2853 assert(N1.getValueType() == N2.getValueType() &&
2854 N1.getValueType() == VT && "Binary operator types must match!");
2861 if (getTarget().Options.UnsafeFPMath) {
2862 if (Opcode == ISD::FADD) {
2864 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2865 if (CFP->getValueAPF().isZero())
2868 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2869 if (CFP->getValueAPF().isZero())
2871 } else if (Opcode == ISD::FSUB) {
2873 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2874 if (CFP->getValueAPF().isZero())
2876 } else if (Opcode == ISD::FMUL) {
2877 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
2880 // If the first operand isn't the constant, try the second
2882 CFP = dyn_cast<ConstantFPSDNode>(N2);
2889 return SDValue(CFP,0);
2891 if (CFP->isExactlyValue(1.0))
2896 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
2897 assert(N1.getValueType() == N2.getValueType() &&
2898 N1.getValueType() == VT && "Binary operator types must match!");
2900 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
2901 assert(N1.getValueType() == VT &&
2902 N1.getValueType().isFloatingPoint() &&
2903 N2.getValueType().isFloatingPoint() &&
2904 "Invalid FCOPYSIGN!");
2911 assert(VT == N1.getValueType() &&
2912 "Shift operators return type must be the same as their first arg");
2913 assert(VT.isInteger() && N2.getValueType().isInteger() &&
2914 "Shifts only work on integers");
2915 // Verify that the shift amount VT is bit enough to hold valid shift
2916 // amounts. This catches things like trying to shift an i1024 value by an
2917 // i8, which is easy to fall into in generic code that uses
2918 // TLI.getShiftAmount().
2919 assert(N2.getValueType().getSizeInBits() >=
2920 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
2921 "Invalid use of small shift amount with oversized value!");
2923 // Always fold shifts of i1 values so the code generator doesn't need to
2924 // handle them. Since we know the size of the shift has to be less than the
2925 // size of the value, the shift/rotate count is guaranteed to be zero.
2928 if (N2C && N2C->isNullValue())
2931 case ISD::FP_ROUND_INREG: {
2932 EVT EVT = cast<VTSDNode>(N2)->getVT();
2933 assert(VT == N1.getValueType() && "Not an inreg round!");
2934 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
2935 "Cannot FP_ROUND_INREG integer types");
2936 assert(EVT.isVector() == VT.isVector() &&
2937 "FP_ROUND_INREG type should be vector iff the operand "
2939 assert((!EVT.isVector() ||
2940 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2941 "Vector element counts must match in FP_ROUND_INREG");
2942 assert(EVT.bitsLE(VT) && "Not rounding down!");
2944 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
2948 assert(VT.isFloatingPoint() &&
2949 N1.getValueType().isFloatingPoint() &&
2950 VT.bitsLE(N1.getValueType()) &&
2951 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
2952 if (N1.getValueType() == VT) return N1; // noop conversion.
2954 case ISD::AssertSext:
2955 case ISD::AssertZext: {
2956 EVT EVT = cast<VTSDNode>(N2)->getVT();
2957 assert(VT == N1.getValueType() && "Not an inreg extend!");
2958 assert(VT.isInteger() && EVT.isInteger() &&
2959 "Cannot *_EXTEND_INREG FP types");
2960 assert(!EVT.isVector() &&
2961 "AssertSExt/AssertZExt type should be the vector element type "
2962 "rather than the vector type!");
2963 assert(EVT.bitsLE(VT) && "Not extending!");
2964 if (VT == EVT) return N1; // noop assertion.
2967 case ISD::SIGN_EXTEND_INREG: {
2968 EVT EVT = cast<VTSDNode>(N2)->getVT();
2969 assert(VT == N1.getValueType() && "Not an inreg extend!");
2970 assert(VT.isInteger() && EVT.isInteger() &&
2971 "Cannot *_EXTEND_INREG FP types");
2972 assert(EVT.isVector() == VT.isVector() &&
2973 "SIGN_EXTEND_INREG type should be vector iff the operand "
2975 assert((!EVT.isVector() ||
2976 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2977 "Vector element counts must match in SIGN_EXTEND_INREG");
2978 assert(EVT.bitsLE(VT) && "Not extending!");
2979 if (EVT == VT) return N1; // Not actually extending
2982 APInt Val = N1C->getAPIntValue();
2983 unsigned FromBits = EVT.getScalarType().getSizeInBits();
2984 Val <<= Val.getBitWidth()-FromBits;
2985 Val = Val.ashr(Val.getBitWidth()-FromBits);
2986 return getConstant(Val, VT);
2990 case ISD::EXTRACT_VECTOR_ELT:
2991 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
2992 if (N1.getOpcode() == ISD::UNDEF)
2993 return getUNDEF(VT);
2995 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
2996 // expanding copies of large vectors from registers.
2998 N1.getOpcode() == ISD::CONCAT_VECTORS &&
2999 N1.getNumOperands() > 0) {
3001 N1.getOperand(0).getValueType().getVectorNumElements();
3002 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3003 N1.getOperand(N2C->getZExtValue() / Factor),
3004 getConstant(N2C->getZExtValue() % Factor,
3005 N2.getValueType()));
3008 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3009 // expanding large vector constants.
3010 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3011 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3013 if (VT != Elt.getValueType())
3014 // If the vector element type is not legal, the BUILD_VECTOR operands
3015 // are promoted and implicitly truncated, and the result implicitly
3016 // extended. Make that explicit here.
3017 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3022 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3023 // operations are lowered to scalars.
3024 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3025 // If the indices are the same, return the inserted element else
3026 // if the indices are known different, extract the element from
3027 // the original vector.
3028 SDValue N1Op2 = N1.getOperand(2);
3029 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3031 if (N1Op2C && N2C) {
3032 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3033 if (VT == N1.getOperand(1).getValueType())
3034 return N1.getOperand(1);
3036 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3039 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3043 case ISD::EXTRACT_ELEMENT:
3044 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3045 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3046 (N1.getValueType().isInteger() == VT.isInteger()) &&
3047 N1.getValueType() != VT &&
3048 "Wrong types for EXTRACT_ELEMENT!");
3050 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3051 // 64-bit integers into 32-bit parts. Instead of building the extract of
3052 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3053 if (N1.getOpcode() == ISD::BUILD_PAIR)
3054 return N1.getOperand(N2C->getZExtValue());
3056 // EXTRACT_ELEMENT of a constant int is also very common.
3057 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3058 unsigned ElementSize = VT.getSizeInBits();
3059 unsigned Shift = ElementSize * N2C->getZExtValue();
3060 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3061 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3064 case ISD::EXTRACT_SUBVECTOR: {
3066 if (VT.isSimple() && N1.getValueType().isSimple()) {
3067 assert(VT.isVector() && N1.getValueType().isVector() &&
3068 "Extract subvector VTs must be a vectors!");
3069 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() &&
3070 "Extract subvector VTs must have the same element type!");
3071 assert(VT.getSimpleVT() <= N1.getValueType().getSimpleVT() &&
3072 "Extract subvector must be from larger vector to smaller vector!");
3074 if (isa<ConstantSDNode>(Index.getNode())) {
3075 assert((VT.getVectorNumElements() +
3076 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3077 <= N1.getValueType().getVectorNumElements())
3078 && "Extract subvector overflow!");
3081 // Trivial extraction.
3082 if (VT.getSimpleVT() == N1.getValueType().getSimpleVT())
3089 // Perform trivial constant folding.
3090 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3091 if (SV.getNode()) return SV;
3093 // Canonicalize constant to RHS if commutative.
3094 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3095 std::swap(N1C, N2C);
3099 // Constant fold FP operations.
3100 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3101 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3103 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3104 // Canonicalize constant to RHS if commutative.
3105 std::swap(N1CFP, N2CFP);
3108 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3109 APFloat::opStatus s;
3112 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3113 if (s != APFloat::opInvalidOp)
3114 return getConstantFP(V1, VT);
3117 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3118 if (s!=APFloat::opInvalidOp)
3119 return getConstantFP(V1, VT);
3122 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3123 if (s!=APFloat::opInvalidOp)
3124 return getConstantFP(V1, VT);
3127 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3128 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3129 return getConstantFP(V1, VT);
3132 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3133 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3134 return getConstantFP(V1, VT);
3136 case ISD::FCOPYSIGN:
3138 return getConstantFP(V1, VT);
3143 if (Opcode == ISD::FP_ROUND) {
3144 APFloat V = N1CFP->getValueAPF(); // make copy
3146 // This can return overflow, underflow, or inexact; we don't care.
3147 // FIXME need to be more flexible about rounding mode.
3148 (void)V.convert(EVTToAPFloatSemantics(VT),
3149 APFloat::rmNearestTiesToEven, &ignored);
3150 return getConstantFP(V, VT);
3154 // Canonicalize an UNDEF to the RHS, even over a constant.
3155 if (N1.getOpcode() == ISD::UNDEF) {
3156 if (isCommutativeBinOp(Opcode)) {
3160 case ISD::FP_ROUND_INREG:
3161 case ISD::SIGN_EXTEND_INREG:
3167 return N1; // fold op(undef, arg2) -> undef
3175 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3176 // For vectors, we can't easily build an all zero vector, just return
3183 // Fold a bunch of operators when the RHS is undef.
3184 if (N2.getOpcode() == ISD::UNDEF) {
3187 if (N1.getOpcode() == ISD::UNDEF)
3188 // Handle undef ^ undef -> 0 special case. This is a common
3190 return getConstant(0, VT);
3200 return N2; // fold op(arg1, undef) -> undef
3206 if (getTarget().Options.UnsafeFPMath)
3214 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3215 // For vectors, we can't easily build an all zero vector, just return
3220 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3221 // For vectors, we can't easily build an all one vector, just return
3229 // Memoize this node if possible.
3231 SDVTList VTs = getVTList(VT);
3232 if (VT != MVT::Glue) {
3233 SDValue Ops[] = { N1, N2 };
3234 FoldingSetNodeID ID;
3235 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3237 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3238 return SDValue(E, 0);
3240 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2);
3241 CSEMap.InsertNode(N, IP);
3243 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2);
3246 AllNodes.push_back(N);
3250 return SDValue(N, 0);
3253 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3254 SDValue N1, SDValue N2, SDValue N3) {
3255 // Perform various simplifications.
3256 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3258 case ISD::CONCAT_VECTORS:
3259 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3260 // one big BUILD_VECTOR.
3261 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3262 N2.getOpcode() == ISD::BUILD_VECTOR &&
3263 N3.getOpcode() == ISD::BUILD_VECTOR) {
3264 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3265 N1.getNode()->op_end());
3266 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3267 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3268 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
3272 // Use FoldSetCC to simplify SETCC's.
3273 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3274 if (Simp.getNode()) return Simp;
3279 if (N1C->getZExtValue())
3280 return N2; // select true, X, Y -> X
3281 return N3; // select false, X, Y -> Y
3284 if (N2 == N3) return N2; // select C, X, X -> X
3286 case ISD::VECTOR_SHUFFLE:
3287 llvm_unreachable("should use getVectorShuffle constructor!");
3288 case ISD::INSERT_SUBVECTOR: {
3290 if (VT.isSimple() && N1.getValueType().isSimple()
3291 && N2.getValueType().isSimple()) {
3292 assert(VT.isVector() && N1.getValueType().isVector() &&
3293 N2.getValueType().isVector() &&
3294 "Insert subvector VTs must be a vectors");
3295 assert(VT == N1.getValueType() &&
3296 "Dest and insert subvector source types must match!");
3297 assert(N2.getValueType().getSimpleVT() <= N1.getValueType().getSimpleVT() &&
3298 "Insert subvector must be from smaller vector to larger vector!");
3299 if (isa<ConstantSDNode>(Index.getNode())) {
3300 assert((N2.getValueType().getVectorNumElements() +
3301 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3302 <= VT.getVectorNumElements())
3303 && "Insert subvector overflow!");
3306 // Trivial insertion.
3307 if (VT.getSimpleVT() == N2.getValueType().getSimpleVT())
3313 // Fold bit_convert nodes from a type to themselves.
3314 if (N1.getValueType() == VT)
3319 // Memoize node if it doesn't produce a flag.
3321 SDVTList VTs = getVTList(VT);
3322 if (VT != MVT::Glue) {
3323 SDValue Ops[] = { N1, N2, N3 };
3324 FoldingSetNodeID ID;
3325 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3327 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3328 return SDValue(E, 0);
3330 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
3331 CSEMap.InsertNode(N, IP);
3333 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
3336 AllNodes.push_back(N);
3340 return SDValue(N, 0);
3343 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3344 SDValue N1, SDValue N2, SDValue N3,
3346 SDValue Ops[] = { N1, N2, N3, N4 };
3347 return getNode(Opcode, DL, VT, Ops, 4);
3350 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3351 SDValue N1, SDValue N2, SDValue N3,
3352 SDValue N4, SDValue N5) {
3353 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3354 return getNode(Opcode, DL, VT, Ops, 5);
3357 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3358 /// the incoming stack arguments to be loaded from the stack.
3359 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3360 SmallVector<SDValue, 8> ArgChains;
3362 // Include the original chain at the beginning of the list. When this is
3363 // used by target LowerCall hooks, this helps legalize find the
3364 // CALLSEQ_BEGIN node.
3365 ArgChains.push_back(Chain);
3367 // Add a chain value for each stack argument.
3368 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3369 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3370 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3371 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3372 if (FI->getIndex() < 0)
3373 ArgChains.push_back(SDValue(L, 1));
3375 // Build a tokenfactor for all the chains.
3376 return getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
3377 &ArgChains[0], ArgChains.size());
3380 /// getMemsetValue - Vectorized representation of the memset value
3382 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3384 assert(Value.getOpcode() != ISD::UNDEF);
3386 unsigned NumBits = VT.getScalarType().getSizeInBits();
3387 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3388 assert(C->getAPIntValue().getBitWidth() == 8);
3389 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3391 return DAG.getConstant(Val, VT);
3392 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3395 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3397 // Use a multiplication with 0x010101... to extend the input to the
3399 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3400 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3406 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3407 /// used when a memcpy is turned into a memset when the source is a constant
3409 static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
3410 const TargetLowering &TLI, StringRef Str) {
3411 // Handle vector with all elements zero.
3414 return DAG.getConstant(0, VT);
3415 else if (VT == MVT::f32 || VT == MVT::f64)
3416 return DAG.getConstantFP(0.0, VT);
3417 else if (VT.isVector()) {
3418 unsigned NumElts = VT.getVectorNumElements();
3419 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3420 return DAG.getNode(ISD::BITCAST, dl, VT,
3421 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3424 llvm_unreachable("Expected type!");
3427 assert(!VT.isVector() && "Can't handle vector type here!");
3428 unsigned NumVTBits = VT.getSizeInBits();
3429 unsigned NumVTBytes = NumVTBits / 8;
3430 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3432 APInt Val(NumVTBits, 0);
3433 if (TLI.isLittleEndian()) {
3434 for (unsigned i = 0; i != NumBytes; ++i)
3435 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3437 for (unsigned i = 0; i != NumBytes; ++i)
3438 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3441 // If the "cost" of materializing the integer immediate is 1 or free, then
3442 // it is cost effective to turn the load into the immediate.
3443 const TargetTransformInfo *TTI = DAG.getTargetTransformInfo();
3444 if (TTI->getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2)
3445 return DAG.getConstant(Val, VT);
3446 return SDValue(0, 0);
3449 /// getMemBasePlusOffset - Returns base and offset node for the
3451 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset,
3452 SelectionDAG &DAG) {
3453 EVT VT = Base.getValueType();
3454 return DAG.getNode(ISD::ADD, Base.getDebugLoc(),
3455 VT, Base, DAG.getConstant(Offset, VT));
3458 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3460 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3461 unsigned SrcDelta = 0;
3462 GlobalAddressSDNode *G = NULL;
3463 if (Src.getOpcode() == ISD::GlobalAddress)
3464 G = cast<GlobalAddressSDNode>(Src);
3465 else if (Src.getOpcode() == ISD::ADD &&
3466 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3467 Src.getOperand(1).getOpcode() == ISD::Constant) {
3468 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3469 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3474 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3477 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3478 /// to replace the memset / memcpy. Return true if the number of memory ops
3479 /// is below the threshold. It returns the types of the sequence of
3480 /// memory ops to perform memset / memcpy by reference.
3481 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3482 unsigned Limit, uint64_t Size,
3483 unsigned DstAlign, unsigned SrcAlign,
3489 const TargetLowering &TLI) {
3490 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3491 "Expecting memcpy / memset source to meet alignment requirement!");
3492 // If 'SrcAlign' is zero, that means the memory operation does not need to
3493 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3494 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3495 // is the specified alignment of the memory operation. If it is zero, that
3496 // means it's possible to change the alignment of the destination.
3497 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3498 // not need to be loaded.
3499 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3500 IsMemset, ZeroMemset, MemcpyStrSrc,
3501 DAG.getMachineFunction());
3503 if (VT == MVT::Other) {
3504 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
3505 TLI.allowsUnalignedMemoryAccesses(VT)) {
3506 VT = TLI.getPointerTy();
3508 switch (DstAlign & 7) {
3509 case 0: VT = MVT::i64; break;
3510 case 4: VT = MVT::i32; break;
3511 case 2: VT = MVT::i16; break;
3512 default: VT = MVT::i8; break;
3517 while (!TLI.isTypeLegal(LVT))
3518 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3519 assert(LVT.isInteger());
3525 unsigned NumMemOps = 0;
3527 unsigned VTSize = VT.getSizeInBits() / 8;
3528 while (VTSize > Size) {
3529 // For now, only use non-vector load / store's for the left-over pieces.
3534 if (VT.isVector() || VT.isFloatingPoint()) {
3535 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3536 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3537 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3539 else if (NewVT == MVT::i64 &&
3540 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3541 TLI.isSafeMemOpType(MVT::f64)) {
3542 // i64 is usually not legal on 32-bit targets, but f64 may be.
3550 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3551 if (NewVT == MVT::i8)
3553 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3555 NewVTSize = NewVT.getSizeInBits() / 8;
3557 // If the new VT cannot cover all of the remaining bits, then consider
3558 // issuing a (or a pair of) unaligned and overlapping load / store.
3559 // FIXME: Only does this for 64-bit or more since we don't have proper
3560 // cost model for unaligned load / store.
3562 if (NumMemOps && AllowOverlap &&
3563 VTSize >= 8 && NewVTSize < Size &&
3564 TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
3572 if (++NumMemOps > Limit)
3575 MemOps.push_back(VT);
3582 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3583 SDValue Chain, SDValue Dst,
3584 SDValue Src, uint64_t Size,
3585 unsigned Align, bool isVol,
3587 MachinePointerInfo DstPtrInfo,
3588 MachinePointerInfo SrcPtrInfo) {
3589 // Turn a memcpy of undef to nop.
3590 if (Src.getOpcode() == ISD::UNDEF)
3593 // Expand memcpy to a series of load and store ops if the size operand falls
3594 // below a certain threshold.
3595 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3596 // rather than maybe a humongous number of loads and stores.
3597 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3598 std::vector<EVT> MemOps;
3599 bool DstAlignCanChange = false;
3600 MachineFunction &MF = DAG.getMachineFunction();
3601 MachineFrameInfo *MFI = MF.getFrameInfo();
3603 MF.getFunction()->getAttributes().
3604 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3605 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3606 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3607 DstAlignCanChange = true;
3608 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3609 if (Align > SrcAlign)
3612 bool CopyFromStr = isMemSrcFromString(Src, Str);
3613 bool isZeroStr = CopyFromStr && Str.empty();
3614 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3616 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3617 (DstAlignCanChange ? 0 : Align),
3618 (isZeroStr ? 0 : SrcAlign),
3619 false, false, CopyFromStr, true, DAG, TLI))
3622 if (DstAlignCanChange) {
3623 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3624 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3626 // Don't promote to an alignment that would require dynamic stack
3628 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3629 if (!TRI->needsStackRealignment(MF))
3630 while (NewAlign > Align &&
3631 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3634 if (NewAlign > Align) {
3635 // Give the stack frame object a larger alignment if needed.
3636 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3637 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3642 SmallVector<SDValue, 8> OutChains;
3643 unsigned NumMemOps = MemOps.size();
3644 uint64_t SrcOff = 0, DstOff = 0;
3645 for (unsigned i = 0; i != NumMemOps; ++i) {
3647 unsigned VTSize = VT.getSizeInBits() / 8;
3648 SDValue Value, Store;
3650 if (VTSize > Size) {
3651 // Issuing an unaligned load / store pair that overlaps with the previous
3652 // pair. Adjust the offset accordingly.
3653 assert(i == NumMemOps-1 && i != 0);
3654 SrcOff -= VTSize - Size;
3655 DstOff -= VTSize - Size;
3659 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3660 // It's unlikely a store of a vector immediate can be done in a single
3661 // instruction. It would require a load from a constantpool first.
3662 // We only handle zero vectors here.
3663 // FIXME: Handle other cases where store of vector immediate is done in
3664 // a single instruction.
3665 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3666 if (Value.getNode())
3667 Store = DAG.getStore(Chain, dl, Value,
3668 getMemBasePlusOffset(Dst, DstOff, DAG),
3669 DstPtrInfo.getWithOffset(DstOff), isVol,
3673 if (!Store.getNode()) {
3674 // The type might not be legal for the target. This should only happen
3675 // if the type is smaller than a legal type, as on PPC, so the right
3676 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3677 // to Load/Store if NVT==VT.
3678 // FIXME does the case above also need this?
3679 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3680 assert(NVT.bitsGE(VT));
3681 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3682 getMemBasePlusOffset(Src, SrcOff, DAG),
3683 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3684 MinAlign(SrcAlign, SrcOff));
3685 Store = DAG.getTruncStore(Chain, dl, Value,
3686 getMemBasePlusOffset(Dst, DstOff, DAG),
3687 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3690 OutChains.push_back(Store);
3696 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3697 &OutChains[0], OutChains.size());
3700 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3701 SDValue Chain, SDValue Dst,
3702 SDValue Src, uint64_t Size,
3703 unsigned Align, bool isVol,
3705 MachinePointerInfo DstPtrInfo,
3706 MachinePointerInfo SrcPtrInfo) {
3707 // Turn a memmove of undef to nop.
3708 if (Src.getOpcode() == ISD::UNDEF)
3711 // Expand memmove to a series of load and store ops if the size operand falls
3712 // below a certain threshold.
3713 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3714 std::vector<EVT> MemOps;
3715 bool DstAlignCanChange = false;
3716 MachineFunction &MF = DAG.getMachineFunction();
3717 MachineFrameInfo *MFI = MF.getFrameInfo();
3718 bool OptSize = MF.getFunction()->getAttributes().
3719 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3720 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3721 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3722 DstAlignCanChange = true;
3723 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3724 if (Align > SrcAlign)
3726 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3728 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3729 (DstAlignCanChange ? 0 : Align), SrcAlign,
3730 false, false, false, false, DAG, TLI))
3733 if (DstAlignCanChange) {
3734 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3735 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3736 if (NewAlign > Align) {
3737 // Give the stack frame object a larger alignment if needed.
3738 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3739 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3744 uint64_t SrcOff = 0, DstOff = 0;
3745 SmallVector<SDValue, 8> LoadValues;
3746 SmallVector<SDValue, 8> LoadChains;
3747 SmallVector<SDValue, 8> OutChains;
3748 unsigned NumMemOps = MemOps.size();
3749 for (unsigned i = 0; i < NumMemOps; i++) {
3751 unsigned VTSize = VT.getSizeInBits() / 8;
3752 SDValue Value, Store;
3754 Value = DAG.getLoad(VT, dl, Chain,
3755 getMemBasePlusOffset(Src, SrcOff, DAG),
3756 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3757 false, false, SrcAlign);
3758 LoadValues.push_back(Value);
3759 LoadChains.push_back(Value.getValue(1));
3762 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3763 &LoadChains[0], LoadChains.size());
3765 for (unsigned i = 0; i < NumMemOps; i++) {
3767 unsigned VTSize = VT.getSizeInBits() / 8;
3768 SDValue Value, Store;
3770 Store = DAG.getStore(Chain, dl, LoadValues[i],
3771 getMemBasePlusOffset(Dst, DstOff, DAG),
3772 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3773 OutChains.push_back(Store);
3777 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3778 &OutChains[0], OutChains.size());
3781 static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
3782 SDValue Chain, SDValue Dst,
3783 SDValue Src, uint64_t Size,
3784 unsigned Align, bool isVol,
3785 MachinePointerInfo DstPtrInfo) {
3786 // Turn a memset of undef to nop.
3787 if (Src.getOpcode() == ISD::UNDEF)
3790 // Expand memset to a series of load/store ops if the size operand
3791 // falls below a certain threshold.
3792 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3793 std::vector<EVT> MemOps;
3794 bool DstAlignCanChange = false;
3795 MachineFunction &MF = DAG.getMachineFunction();
3796 MachineFrameInfo *MFI = MF.getFrameInfo();
3797 bool OptSize = MF.getFunction()->getAttributes().
3798 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3799 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3800 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3801 DstAlignCanChange = true;
3803 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3804 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3805 Size, (DstAlignCanChange ? 0 : Align), 0,
3806 true, IsZeroVal, false, true, DAG, TLI))
3809 if (DstAlignCanChange) {
3810 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3811 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3812 if (NewAlign > Align) {
3813 // Give the stack frame object a larger alignment if needed.
3814 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3815 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3820 SmallVector<SDValue, 8> OutChains;
3821 uint64_t DstOff = 0;
3822 unsigned NumMemOps = MemOps.size();
3824 // Find the largest store and generate the bit pattern for it.
3825 EVT LargestVT = MemOps[0];
3826 for (unsigned i = 1; i < NumMemOps; i++)
3827 if (MemOps[i].bitsGT(LargestVT))
3828 LargestVT = MemOps[i];
3829 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
3831 for (unsigned i = 0; i < NumMemOps; i++) {
3833 unsigned VTSize = VT.getSizeInBits() / 8;
3834 if (VTSize > Size) {
3835 // Issuing an unaligned load / store pair that overlaps with the previous
3836 // pair. Adjust the offset accordingly.
3837 assert(i == NumMemOps-1 && i != 0);
3838 DstOff -= VTSize - Size;
3841 // If this store is smaller than the largest store see whether we can get
3842 // the smaller value for free with a truncate.
3843 SDValue Value = MemSetValue;
3844 if (VT.bitsLT(LargestVT)) {
3845 if (!LargestVT.isVector() && !VT.isVector() &&
3846 TLI.isTruncateFree(LargestVT, VT))
3847 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
3849 Value = getMemsetValue(Src, VT, DAG, dl);
3851 assert(Value.getValueType() == VT && "Value with wrong type.");
3852 SDValue Store = DAG.getStore(Chain, dl, Value,
3853 getMemBasePlusOffset(Dst, DstOff, DAG),
3854 DstPtrInfo.getWithOffset(DstOff),
3855 isVol, false, Align);
3856 OutChains.push_back(Store);
3857 DstOff += VT.getSizeInBits() / 8;
3861 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3862 &OutChains[0], OutChains.size());
3865 SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
3866 SDValue Src, SDValue Size,
3867 unsigned Align, bool isVol, bool AlwaysInline,
3868 MachinePointerInfo DstPtrInfo,
3869 MachinePointerInfo SrcPtrInfo) {
3871 // Check to see if we should lower the memcpy to loads and stores first.
3872 // For cases within the target-specified limits, this is the best choice.
3873 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3875 // Memcpy with size zero? Just return the original chain.
3876 if (ConstantSize->isNullValue())
3879 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3880 ConstantSize->getZExtValue(),Align,
3881 isVol, false, DstPtrInfo, SrcPtrInfo);
3882 if (Result.getNode())
3886 // Then check to see if we should lower the memcpy with target-specific
3887 // code. If the target chooses to do this, this is the next best.
3889 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
3890 isVol, AlwaysInline,
3891 DstPtrInfo, SrcPtrInfo);
3892 if (Result.getNode())
3895 // If we really need inline code and the target declined to provide it,
3896 // use a (potentially long) sequence of loads and stores.
3898 assert(ConstantSize && "AlwaysInline requires a constant size!");
3899 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3900 ConstantSize->getZExtValue(), Align, isVol,
3901 true, DstPtrInfo, SrcPtrInfo);
3904 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
3905 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
3906 // respect volatile, so they may do things like read or write memory
3907 // beyond the given memory regions. But fixing this isn't easy, and most
3908 // people don't care.
3910 // Emit a library call.
3911 TargetLowering::ArgListTy Args;
3912 TargetLowering::ArgListEntry Entry;
3913 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
3914 Entry.Node = Dst; Args.push_back(Entry);
3915 Entry.Node = Src; Args.push_back(Entry);
3916 Entry.Node = Size; Args.push_back(Entry);
3917 // FIXME: pass in DebugLoc
3919 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
3920 false, false, false, false, 0,
3921 TLI.getLibcallCallingConv(RTLIB::MEMCPY),
3922 /*isTailCall=*/false,
3923 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
3924 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
3925 TLI.getPointerTy()),
3927 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
3929 return CallResult.second;
3932 SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
3933 SDValue Src, SDValue Size,
3934 unsigned Align, bool isVol,
3935 MachinePointerInfo DstPtrInfo,
3936 MachinePointerInfo SrcPtrInfo) {
3938 // Check to see if we should lower the memmove to loads and stores first.
3939 // For cases within the target-specified limits, this is the best choice.
3940 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3942 // Memmove with size zero? Just return the original chain.
3943 if (ConstantSize->isNullValue())
3947 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
3948 ConstantSize->getZExtValue(), Align, isVol,
3949 false, DstPtrInfo, SrcPtrInfo);
3950 if (Result.getNode())
3954 // Then check to see if we should lower the memmove with target-specific
3955 // code. If the target chooses to do this, this is the next best.
3957 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
3958 DstPtrInfo, SrcPtrInfo);
3959 if (Result.getNode())
3962 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
3963 // not be safe. See memcpy above for more details.
3965 // Emit a library call.
3966 TargetLowering::ArgListTy Args;
3967 TargetLowering::ArgListEntry Entry;
3968 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
3969 Entry.Node = Dst; Args.push_back(Entry);
3970 Entry.Node = Src; Args.push_back(Entry);
3971 Entry.Node = Size; Args.push_back(Entry);
3972 // FIXME: pass in DebugLoc
3974 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
3975 false, false, false, false, 0,
3976 TLI.getLibcallCallingConv(RTLIB::MEMMOVE),
3977 /*isTailCall=*/false,
3978 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
3979 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
3980 TLI.getPointerTy()),
3982 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
3984 return CallResult.second;
3987 SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
3988 SDValue Src, SDValue Size,
3989 unsigned Align, bool isVol,
3990 MachinePointerInfo DstPtrInfo) {
3992 // Check to see if we should lower the memset to stores first.
3993 // For cases within the target-specified limits, this is the best choice.
3994 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3996 // Memset with size zero? Just return the original chain.
3997 if (ConstantSize->isNullValue())
4001 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4002 Align, isVol, DstPtrInfo);
4004 if (Result.getNode())
4008 // Then check to see if we should lower the memset with target-specific
4009 // code. If the target chooses to do this, this is the next best.
4011 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4013 if (Result.getNode())
4016 // Emit a library call.
4017 Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
4018 TargetLowering::ArgListTy Args;
4019 TargetLowering::ArgListEntry Entry;
4020 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4021 Args.push_back(Entry);
4022 // Extend or truncate the argument to be an i32 value for the call.
4023 if (Src.getValueType().bitsGT(MVT::i32))
4024 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4026 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4028 Entry.Ty = Type::getInt32Ty(*getContext());
4029 Entry.isSExt = true;
4030 Args.push_back(Entry);
4032 Entry.Ty = IntPtrTy;
4033 Entry.isSExt = false;
4034 Args.push_back(Entry);
4035 // FIXME: pass in DebugLoc
4037 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4038 false, false, false, false, 0,
4039 TLI.getLibcallCallingConv(RTLIB::MEMSET),
4040 /*isTailCall=*/false,
4041 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4042 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
4043 TLI.getPointerTy()),
4045 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
4047 return CallResult.second;
4050 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4051 SDValue Chain, SDValue Ptr, SDValue Cmp,
4052 SDValue Swp, MachinePointerInfo PtrInfo,
4054 AtomicOrdering Ordering,
4055 SynchronizationScope SynchScope) {
4056 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4057 Alignment = getEVTAlignment(MemVT);
4059 MachineFunction &MF = getMachineFunction();
4061 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4062 // For now, atomics are considered to be volatile always.
4063 // FIXME: Volatile isn't really correct; we should keep track of atomic
4064 // orderings in the memoperand.
4065 unsigned Flags = MachineMemOperand::MOVolatile;
4066 if (Opcode != ISD::ATOMIC_STORE)
4067 Flags |= MachineMemOperand::MOLoad;
4068 if (Opcode != ISD::ATOMIC_LOAD)
4069 Flags |= MachineMemOperand::MOStore;
4071 MachineMemOperand *MMO =
4072 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4074 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4075 Ordering, SynchScope);
4078 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4080 SDValue Ptr, SDValue Cmp,
4081 SDValue Swp, MachineMemOperand *MMO,
4082 AtomicOrdering Ordering,
4083 SynchronizationScope SynchScope) {
4084 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4085 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4087 EVT VT = Cmp.getValueType();
4089 SDVTList VTs = getVTList(VT, MVT::Other);
4090 FoldingSetNodeID ID;
4091 ID.AddInteger(MemVT.getRawBits());
4092 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4093 AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
4094 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4096 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4097 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4098 return SDValue(E, 0);
4100 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
4101 Ptr, Cmp, Swp, MMO, Ordering,
4103 CSEMap.InsertNode(N, IP);
4104 AllNodes.push_back(N);
4105 return SDValue(N, 0);
4108 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4110 SDValue Ptr, SDValue Val,
4111 const Value* PtrVal,
4113 AtomicOrdering Ordering,
4114 SynchronizationScope SynchScope) {
4115 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4116 Alignment = getEVTAlignment(MemVT);
4118 MachineFunction &MF = getMachineFunction();
4119 // An atomic store does not load. An atomic load does not store.
4120 // (An atomicrmw obviously both loads and stores.)
4121 // For now, atomics are considered to be volatile always, and they are
4123 // FIXME: Volatile isn't really correct; we should keep track of atomic
4124 // orderings in the memoperand.
4125 unsigned Flags = MachineMemOperand::MOVolatile;
4126 if (Opcode != ISD::ATOMIC_STORE)
4127 Flags |= MachineMemOperand::MOLoad;
4128 if (Opcode != ISD::ATOMIC_LOAD)
4129 Flags |= MachineMemOperand::MOStore;
4131 MachineMemOperand *MMO =
4132 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4133 MemVT.getStoreSize(), Alignment);
4135 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4136 Ordering, SynchScope);
4139 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4141 SDValue Ptr, SDValue Val,
4142 MachineMemOperand *MMO,
4143 AtomicOrdering Ordering,
4144 SynchronizationScope SynchScope) {
4145 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4146 Opcode == ISD::ATOMIC_LOAD_SUB ||
4147 Opcode == ISD::ATOMIC_LOAD_AND ||
4148 Opcode == ISD::ATOMIC_LOAD_OR ||
4149 Opcode == ISD::ATOMIC_LOAD_XOR ||
4150 Opcode == ISD::ATOMIC_LOAD_NAND ||
4151 Opcode == ISD::ATOMIC_LOAD_MIN ||
4152 Opcode == ISD::ATOMIC_LOAD_MAX ||
4153 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4154 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4155 Opcode == ISD::ATOMIC_SWAP ||
4156 Opcode == ISD::ATOMIC_STORE) &&
4157 "Invalid Atomic Op");
4159 EVT VT = Val.getValueType();
4161 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4162 getVTList(VT, MVT::Other);
4163 FoldingSetNodeID ID;
4164 ID.AddInteger(MemVT.getRawBits());
4165 SDValue Ops[] = {Chain, Ptr, Val};
4166 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
4167 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4169 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4170 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4171 return SDValue(E, 0);
4173 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
4175 Ordering, SynchScope);
4176 CSEMap.InsertNode(N, IP);
4177 AllNodes.push_back(N);
4178 return SDValue(N, 0);
4181 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4182 EVT VT, SDValue Chain,
4184 const Value* PtrVal,
4186 AtomicOrdering Ordering,
4187 SynchronizationScope SynchScope) {
4188 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4189 Alignment = getEVTAlignment(MemVT);
4191 MachineFunction &MF = getMachineFunction();
4192 // An atomic store does not load. An atomic load does not store.
4193 // (An atomicrmw obviously both loads and stores.)
4194 // For now, atomics are considered to be volatile always, and they are
4196 // FIXME: Volatile isn't really correct; we should keep track of atomic
4197 // orderings in the memoperand.
4198 unsigned Flags = MachineMemOperand::MOVolatile;
4199 if (Opcode != ISD::ATOMIC_STORE)
4200 Flags |= MachineMemOperand::MOLoad;
4201 if (Opcode != ISD::ATOMIC_LOAD)
4202 Flags |= MachineMemOperand::MOStore;
4204 MachineMemOperand *MMO =
4205 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4206 MemVT.getStoreSize(), Alignment);
4208 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
4209 Ordering, SynchScope);
4212 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4213 EVT VT, SDValue Chain,
4215 MachineMemOperand *MMO,
4216 AtomicOrdering Ordering,
4217 SynchronizationScope SynchScope) {
4218 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4220 SDVTList VTs = getVTList(VT, MVT::Other);
4221 FoldingSetNodeID ID;
4222 ID.AddInteger(MemVT.getRawBits());
4223 SDValue Ops[] = {Chain, Ptr};
4224 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
4225 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4227 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4228 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4229 return SDValue(E, 0);
4231 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
4232 Ptr, MMO, Ordering, SynchScope);
4233 CSEMap.InsertNode(N, IP);
4234 AllNodes.push_back(N);
4235 return SDValue(N, 0);
4238 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4239 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
4244 SmallVector<EVT, 4> VTs;
4245 VTs.reserve(NumOps);
4246 for (unsigned i = 0; i < NumOps; ++i)
4247 VTs.push_back(Ops[i].getValueType());
4248 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
4253 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
4254 const EVT *VTs, unsigned NumVTs,
4255 const SDValue *Ops, unsigned NumOps,
4256 EVT MemVT, MachinePointerInfo PtrInfo,
4257 unsigned Align, bool Vol,
4258 bool ReadMem, bool WriteMem) {
4259 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
4260 MemVT, PtrInfo, Align, Vol,
4265 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
4266 const SDValue *Ops, unsigned NumOps,
4267 EVT MemVT, MachinePointerInfo PtrInfo,
4268 unsigned Align, bool Vol,
4269 bool ReadMem, bool WriteMem) {
4270 if (Align == 0) // Ensure that codegen never sees alignment 0
4271 Align = getEVTAlignment(MemVT);
4273 MachineFunction &MF = getMachineFunction();
4276 Flags |= MachineMemOperand::MOStore;
4278 Flags |= MachineMemOperand::MOLoad;
4280 Flags |= MachineMemOperand::MOVolatile;
4281 MachineMemOperand *MMO =
4282 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4284 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
4288 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
4289 const SDValue *Ops, unsigned NumOps,
4290 EVT MemVT, MachineMemOperand *MMO) {
4291 assert((Opcode == ISD::INTRINSIC_VOID ||
4292 Opcode == ISD::INTRINSIC_W_CHAIN ||
4293 Opcode == ISD::PREFETCH ||
4294 Opcode == ISD::LIFETIME_START ||
4295 Opcode == ISD::LIFETIME_END ||
4296 (Opcode <= INT_MAX &&
4297 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4298 "Opcode is not a memory-accessing opcode!");
4300 // Memoize the node unless it returns a flag.
4301 MemIntrinsicSDNode *N;
4302 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4303 FoldingSetNodeID ID;
4304 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4305 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4307 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4308 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4309 return SDValue(E, 0);
4312 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps,
4314 CSEMap.InsertNode(N, IP);
4316 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps,
4319 AllNodes.push_back(N);
4320 return SDValue(N, 0);
4323 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4324 /// MachinePointerInfo record from it. This is particularly useful because the
4325 /// code generator has many cases where it doesn't bother passing in a
4326 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4327 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4328 // If this is FI+Offset, we can model it.
4329 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4330 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4332 // If this is (FI+Offset1)+Offset2, we can model it.
4333 if (Ptr.getOpcode() != ISD::ADD ||
4334 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4335 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4336 return MachinePointerInfo();
4338 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4339 return MachinePointerInfo::getFixedStack(FI, Offset+
4340 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4343 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4344 /// MachinePointerInfo record from it. This is particularly useful because the
4345 /// code generator has many cases where it doesn't bother passing in a
4346 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4347 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4348 // If the 'Offset' value isn't a constant, we can't handle this.
4349 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4350 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4351 if (OffsetOp.getOpcode() == ISD::UNDEF)
4352 return InferPointerInfo(Ptr);
4353 return MachinePointerInfo();
4358 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4359 EVT VT, DebugLoc dl, SDValue Chain,
4360 SDValue Ptr, SDValue Offset,
4361 MachinePointerInfo PtrInfo, EVT MemVT,
4362 bool isVolatile, bool isNonTemporal, bool isInvariant,
4363 unsigned Alignment, const MDNode *TBAAInfo,
4364 const MDNode *Ranges) {
4365 assert(Chain.getValueType() == MVT::Other &&
4366 "Invalid chain type");
4367 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4368 Alignment = getEVTAlignment(VT);
4370 unsigned Flags = MachineMemOperand::MOLoad;
4372 Flags |= MachineMemOperand::MOVolatile;
4374 Flags |= MachineMemOperand::MONonTemporal;
4376 Flags |= MachineMemOperand::MOInvariant;
4378 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4381 PtrInfo = InferPointerInfo(Ptr, Offset);
4383 MachineFunction &MF = getMachineFunction();
4384 MachineMemOperand *MMO =
4385 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4387 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4391 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4392 EVT VT, DebugLoc dl, SDValue Chain,
4393 SDValue Ptr, SDValue Offset, EVT MemVT,
4394 MachineMemOperand *MMO) {
4396 ExtType = ISD::NON_EXTLOAD;
4397 } else if (ExtType == ISD::NON_EXTLOAD) {
4398 assert(VT == MemVT && "Non-extending load from different memory type!");
4401 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4402 "Should only be an extending load, not truncating!");
4403 assert(VT.isInteger() == MemVT.isInteger() &&
4404 "Cannot convert from FP to Int or Int -> FP!");
4405 assert(VT.isVector() == MemVT.isVector() &&
4406 "Cannot use trunc store to convert to or from a vector!");
4407 assert((!VT.isVector() ||
4408 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4409 "Cannot use trunc store to change the number of vector elements!");
4412 bool Indexed = AM != ISD::UNINDEXED;
4413 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4414 "Unindexed load with an offset!");
4416 SDVTList VTs = Indexed ?
4417 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4418 SDValue Ops[] = { Chain, Ptr, Offset };
4419 FoldingSetNodeID ID;
4420 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4421 ID.AddInteger(MemVT.getRawBits());
4422 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4423 MMO->isNonTemporal(),
4424 MMO->isInvariant()));
4425 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4427 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4428 cast<LoadSDNode>(E)->refineAlignment(MMO);
4429 return SDValue(E, 0);
4431 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl, VTs, AM, ExtType,
4433 CSEMap.InsertNode(N, IP);
4434 AllNodes.push_back(N);
4435 return SDValue(N, 0);
4438 SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
4439 SDValue Chain, SDValue Ptr,
4440 MachinePointerInfo PtrInfo,
4441 bool isVolatile, bool isNonTemporal,
4442 bool isInvariant, unsigned Alignment,
4443 const MDNode *TBAAInfo,
4444 const MDNode *Ranges) {
4445 SDValue Undef = getUNDEF(Ptr.getValueType());
4446 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4447 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4451 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
4452 SDValue Chain, SDValue Ptr,
4453 MachinePointerInfo PtrInfo, EVT MemVT,
4454 bool isVolatile, bool isNonTemporal,
4455 unsigned Alignment, const MDNode *TBAAInfo) {
4456 SDValue Undef = getUNDEF(Ptr.getValueType());
4457 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4458 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4464 SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
4465 SDValue Offset, ISD::MemIndexedMode AM) {
4466 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4467 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4468 "Load is already a indexed load!");
4469 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4470 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4471 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4472 false, LD->getAlignment());
4475 SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
4476 SDValue Ptr, MachinePointerInfo PtrInfo,
4477 bool isVolatile, bool isNonTemporal,
4478 unsigned Alignment, const MDNode *TBAAInfo) {
4479 assert(Chain.getValueType() == MVT::Other &&
4480 "Invalid chain type");
4481 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4482 Alignment = getEVTAlignment(Val.getValueType());
4484 unsigned Flags = MachineMemOperand::MOStore;
4486 Flags |= MachineMemOperand::MOVolatile;
4488 Flags |= MachineMemOperand::MONonTemporal;
4491 PtrInfo = InferPointerInfo(Ptr);
4493 MachineFunction &MF = getMachineFunction();
4494 MachineMemOperand *MMO =
4495 MF.getMachineMemOperand(PtrInfo, Flags,
4496 Val.getValueType().getStoreSize(), Alignment,
4499 return getStore(Chain, dl, Val, Ptr, MMO);
4502 SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
4503 SDValue Ptr, MachineMemOperand *MMO) {
4504 assert(Chain.getValueType() == MVT::Other &&
4505 "Invalid chain type");
4506 EVT VT = Val.getValueType();
4507 SDVTList VTs = getVTList(MVT::Other);
4508 SDValue Undef = getUNDEF(Ptr.getValueType());
4509 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4510 FoldingSetNodeID ID;
4511 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4512 ID.AddInteger(VT.getRawBits());
4513 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4514 MMO->isNonTemporal(), MMO->isInvariant()));
4515 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4517 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4518 cast<StoreSDNode>(E)->refineAlignment(MMO);
4519 return SDValue(E, 0);
4521 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED,
4523 CSEMap.InsertNode(N, IP);
4524 AllNodes.push_back(N);
4525 return SDValue(N, 0);
4528 SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
4529 SDValue Ptr, MachinePointerInfo PtrInfo,
4530 EVT SVT,bool isVolatile, bool isNonTemporal,
4532 const MDNode *TBAAInfo) {
4533 assert(Chain.getValueType() == MVT::Other &&
4534 "Invalid chain type");
4535 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4536 Alignment = getEVTAlignment(SVT);
4538 unsigned Flags = MachineMemOperand::MOStore;
4540 Flags |= MachineMemOperand::MOVolatile;
4542 Flags |= MachineMemOperand::MONonTemporal;
4545 PtrInfo = InferPointerInfo(Ptr);
4547 MachineFunction &MF = getMachineFunction();
4548 MachineMemOperand *MMO =
4549 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4552 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4555 SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
4556 SDValue Ptr, EVT SVT,
4557 MachineMemOperand *MMO) {
4558 EVT VT = Val.getValueType();
4560 assert(Chain.getValueType() == MVT::Other &&
4561 "Invalid chain type");
4563 return getStore(Chain, dl, Val, Ptr, MMO);
4565 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4566 "Should only be a truncating store, not extending!");
4567 assert(VT.isInteger() == SVT.isInteger() &&
4568 "Can't do FP-INT conversion!");
4569 assert(VT.isVector() == SVT.isVector() &&
4570 "Cannot use trunc store to convert to or from a vector!");
4571 assert((!VT.isVector() ||
4572 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4573 "Cannot use trunc store to change the number of vector elements!");
4575 SDVTList VTs = getVTList(MVT::Other);
4576 SDValue Undef = getUNDEF(Ptr.getValueType());
4577 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4578 FoldingSetNodeID ID;
4579 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4580 ID.AddInteger(SVT.getRawBits());
4581 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4582 MMO->isNonTemporal(), MMO->isInvariant()));
4583 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4585 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4586 cast<StoreSDNode>(E)->refineAlignment(MMO);
4587 return SDValue(E, 0);
4589 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED,
4591 CSEMap.InsertNode(N, IP);
4592 AllNodes.push_back(N);
4593 return SDValue(N, 0);
4597 SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
4598 SDValue Offset, ISD::MemIndexedMode AM) {
4599 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4600 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4601 "Store is already a indexed store!");
4602 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4603 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4604 FoldingSetNodeID ID;
4605 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4606 ID.AddInteger(ST->getMemoryVT().getRawBits());
4607 ID.AddInteger(ST->getRawSubclassData());
4608 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4610 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4611 return SDValue(E, 0);
4613 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, AM,
4614 ST->isTruncatingStore(),
4616 ST->getMemOperand());
4617 CSEMap.InsertNode(N, IP);
4618 AllNodes.push_back(N);
4619 return SDValue(N, 0);
4622 SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
4623 SDValue Chain, SDValue Ptr,
4626 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4627 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
4630 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
4631 const SDUse *Ops, unsigned NumOps) {
4633 case 0: return getNode(Opcode, DL, VT);
4634 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4635 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4636 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4640 // Copy from an SDUse array into an SDValue array for use with
4641 // the regular getNode logic.
4642 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4643 return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
4646 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
4647 const SDValue *Ops, unsigned NumOps) {
4649 case 0: return getNode(Opcode, DL, VT);
4650 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4651 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4652 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4658 case ISD::SELECT_CC: {
4659 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4660 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4661 "LHS and RHS of condition must have same type!");
4662 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4663 "True and False arms of SelectCC must have same type!");
4664 assert(Ops[2].getValueType() == VT &&
4665 "select_cc node must be of same type as true and false value!");
4669 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4670 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4671 "LHS/RHS of comparison should match types!");
4678 SDVTList VTs = getVTList(VT);
4680 if (VT != MVT::Glue) {
4681 FoldingSetNodeID ID;
4682 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
4685 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4686 return SDValue(E, 0);
4688 N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps);
4689 CSEMap.InsertNode(N, IP);
4691 N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps);
4694 AllNodes.push_back(N);
4698 return SDValue(N, 0);
4701 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
4702 const std::vector<EVT> &ResultTys,
4703 const SDValue *Ops, unsigned NumOps) {
4704 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
4708 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
4709 const EVT *VTs, unsigned NumVTs,
4710 const SDValue *Ops, unsigned NumOps) {
4712 return getNode(Opcode, DL, VTs[0], Ops, NumOps);
4713 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
4716 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4717 const SDValue *Ops, unsigned NumOps) {
4718 if (VTList.NumVTs == 1)
4719 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
4723 // FIXME: figure out how to safely handle things like
4724 // int foo(int x) { return 1 << (x & 255); }
4725 // int bar() { return foo(256); }
4726 case ISD::SRA_PARTS:
4727 case ISD::SRL_PARTS:
4728 case ISD::SHL_PARTS:
4729 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4730 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4731 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4732 else if (N3.getOpcode() == ISD::AND)
4733 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4734 // If the and is only masking out bits that cannot effect the shift,
4735 // eliminate the and.
4736 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4737 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4738 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4744 // Memoize the node unless it returns a flag.
4746 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4747 FoldingSetNodeID ID;
4748 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4750 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4751 return SDValue(E, 0);
4754 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]);
4755 } else if (NumOps == 2) {
4756 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
4757 } else if (NumOps == 3) {
4758 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1],
4761 N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps);
4763 CSEMap.InsertNode(N, IP);
4766 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]);
4767 } else if (NumOps == 2) {
4768 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
4769 } else if (NumOps == 3) {
4770 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1],
4773 N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps);
4776 AllNodes.push_back(N);
4780 return SDValue(N, 0);
4783 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList) {
4784 return getNode(Opcode, DL, VTList, 0, 0);
4787 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4789 SDValue Ops[] = { N1 };
4790 return getNode(Opcode, DL, VTList, Ops, 1);
4793 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4794 SDValue N1, SDValue N2) {
4795 SDValue Ops[] = { N1, N2 };
4796 return getNode(Opcode, DL, VTList, Ops, 2);
4799 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4800 SDValue N1, SDValue N2, SDValue N3) {
4801 SDValue Ops[] = { N1, N2, N3 };
4802 return getNode(Opcode, DL, VTList, Ops, 3);
4805 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4806 SDValue N1, SDValue N2, SDValue N3,
4808 SDValue Ops[] = { N1, N2, N3, N4 };
4809 return getNode(Opcode, DL, VTList, Ops, 4);
4812 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4813 SDValue N1, SDValue N2, SDValue N3,
4814 SDValue N4, SDValue N5) {
4815 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4816 return getNode(Opcode, DL, VTList, Ops, 5);
4819 SDVTList SelectionDAG::getVTList(EVT VT) {
4820 return makeVTList(SDNode::getValueTypeList(VT), 1);
4823 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4824 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4825 E = VTList.rend(); I != E; ++I)
4826 if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2)
4829 EVT *Array = Allocator.Allocate<EVT>(2);
4832 SDVTList Result = makeVTList(Array, 2);
4833 VTList.push_back(Result);
4837 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
4838 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4839 E = VTList.rend(); I != E; ++I)
4840 if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4844 EVT *Array = Allocator.Allocate<EVT>(3);
4848 SDVTList Result = makeVTList(Array, 3);
4849 VTList.push_back(Result);
4853 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
4854 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4855 E = VTList.rend(); I != E; ++I)
4856 if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4857 I->VTs[2] == VT3 && I->VTs[3] == VT4)
4860 EVT *Array = Allocator.Allocate<EVT>(4);
4865 SDVTList Result = makeVTList(Array, 4);
4866 VTList.push_back(Result);
4870 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
4872 case 0: llvm_unreachable("Cannot have nodes without results!");
4873 case 1: return getVTList(VTs[0]);
4874 case 2: return getVTList(VTs[0], VTs[1]);
4875 case 3: return getVTList(VTs[0], VTs[1], VTs[2]);
4876 case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]);
4880 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4881 E = VTList.rend(); I != E; ++I) {
4882 if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1])
4885 if (std::equal(&VTs[2], &VTs[NumVTs], &I->VTs[2]))
4889 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
4890 std::copy(VTs, VTs+NumVTs, Array);
4891 SDVTList Result = makeVTList(Array, NumVTs);
4892 VTList.push_back(Result);
4897 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
4898 /// specified operands. If the resultant node already exists in the DAG,
4899 /// this does not modify the specified node, instead it returns the node that
4900 /// already exists. If the resultant node does not exist in the DAG, the
4901 /// input node is returned. As a degenerate case, if you specify the same
4902 /// input operands as the node already has, the input node is returned.
4903 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
4904 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
4906 // Check to see if there is no change.
4907 if (Op == N->getOperand(0)) return N;
4909 // See if the modified node already exists.
4910 void *InsertPos = 0;
4911 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
4914 // Nope it doesn't. Remove the node from its current place in the maps.
4916 if (!RemoveNodeFromCSEMaps(N))
4919 // Now we update the operands.
4920 N->OperandList[0].set(Op);
4922 // If this gets put into a CSE map, add it.
4923 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4927 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
4928 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
4930 // Check to see if there is no change.
4931 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
4932 return N; // No operands changed, just return the input node.
4934 // See if the modified node already exists.
4935 void *InsertPos = 0;
4936 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
4939 // Nope it doesn't. Remove the node from its current place in the maps.
4941 if (!RemoveNodeFromCSEMaps(N))
4944 // Now we update the operands.
4945 if (N->OperandList[0] != Op1)
4946 N->OperandList[0].set(Op1);
4947 if (N->OperandList[1] != Op2)
4948 N->OperandList[1].set(Op2);
4950 // If this gets put into a CSE map, add it.
4951 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4955 SDNode *SelectionDAG::
4956 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
4957 SDValue Ops[] = { Op1, Op2, Op3 };
4958 return UpdateNodeOperands(N, Ops, 3);
4961 SDNode *SelectionDAG::
4962 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
4963 SDValue Op3, SDValue Op4) {
4964 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
4965 return UpdateNodeOperands(N, Ops, 4);
4968 SDNode *SelectionDAG::
4969 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
4970 SDValue Op3, SDValue Op4, SDValue Op5) {
4971 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
4972 return UpdateNodeOperands(N, Ops, 5);
4975 SDNode *SelectionDAG::
4976 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
4977 assert(N->getNumOperands() == NumOps &&
4978 "Update with wrong number of operands");
4980 // Check to see if there is no change.
4981 bool AnyChange = false;
4982 for (unsigned i = 0; i != NumOps; ++i) {
4983 if (Ops[i] != N->getOperand(i)) {
4989 // No operands changed, just return the input node.
4990 if (!AnyChange) return N;
4992 // See if the modified node already exists.
4993 void *InsertPos = 0;
4994 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
4997 // Nope it doesn't. Remove the node from its current place in the maps.
4999 if (!RemoveNodeFromCSEMaps(N))
5002 // Now we update the operands.
5003 for (unsigned i = 0; i != NumOps; ++i)
5004 if (N->OperandList[i] != Ops[i])
5005 N->OperandList[i].set(Ops[i]);
5007 // If this gets put into a CSE map, add it.
5008 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5012 /// DropOperands - Release the operands and set this node to have
5014 void SDNode::DropOperands() {
5015 // Unlike the code in MorphNodeTo that does this, we don't need to
5016 // watch for dead nodes here.
5017 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5023 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5026 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5028 SDVTList VTs = getVTList(VT);
5029 return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
5032 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5033 EVT VT, SDValue Op1) {
5034 SDVTList VTs = getVTList(VT);
5035 SDValue Ops[] = { Op1 };
5036 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5039 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5040 EVT VT, SDValue Op1,
5042 SDVTList VTs = getVTList(VT);
5043 SDValue Ops[] = { Op1, Op2 };
5044 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5047 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5048 EVT VT, SDValue Op1,
5049 SDValue Op2, SDValue Op3) {
5050 SDVTList VTs = getVTList(VT);
5051 SDValue Ops[] = { Op1, Op2, Op3 };
5052 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5055 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5056 EVT VT, const SDValue *Ops,
5058 SDVTList VTs = getVTList(VT);
5059 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5062 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5063 EVT VT1, EVT VT2, const SDValue *Ops,
5065 SDVTList VTs = getVTList(VT1, VT2);
5066 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5069 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5071 SDVTList VTs = getVTList(VT1, VT2);
5072 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
5075 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5076 EVT VT1, EVT VT2, EVT VT3,
5077 const SDValue *Ops, unsigned NumOps) {
5078 SDVTList VTs = getVTList(VT1, VT2, VT3);
5079 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5082 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5083 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5084 const SDValue *Ops, unsigned NumOps) {
5085 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5086 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5089 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5092 SDVTList VTs = getVTList(VT1, VT2);
5093 SDValue Ops[] = { Op1 };
5094 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5097 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5099 SDValue Op1, SDValue Op2) {
5100 SDVTList VTs = getVTList(VT1, VT2);
5101 SDValue Ops[] = { Op1, Op2 };
5102 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5105 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5107 SDValue Op1, SDValue Op2,
5109 SDVTList VTs = getVTList(VT1, VT2);
5110 SDValue Ops[] = { Op1, Op2, Op3 };
5111 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5114 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5115 EVT VT1, EVT VT2, EVT VT3,
5116 SDValue Op1, SDValue Op2,
5118 SDVTList VTs = getVTList(VT1, VT2, VT3);
5119 SDValue Ops[] = { Op1, Op2, Op3 };
5120 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5123 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5124 SDVTList VTs, const SDValue *Ops,
5126 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5127 // Reset the NodeID to -1.
5132 /// UpdadeDebugLocOnMergedSDNode - If the opt level is -O0 then it throws away
5133 /// the line number information on the merged node since it is not possible to
5134 /// preserve the information that operation is associated with multiple lines.
5135 /// This will make the debugger working better at -O0, were there is a higher
5136 /// probability having other instructions associated with that line.
5138 SDNode *SelectionDAG::UpdadeDebugLocOnMergedSDNode(SDNode *N, DebugLoc OLoc) {
5139 DebugLoc NLoc = N->getDebugLoc();
5140 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) && (OLoc != NLoc)) {
5141 N->setDebugLoc(DebugLoc());
5146 /// MorphNodeTo - This *mutates* the specified node to have the specified
5147 /// return type, opcode, and operands.
5149 /// Note that MorphNodeTo returns the resultant node. If there is already a
5150 /// node of the specified opcode and operands, it returns that node instead of
5151 /// the current one. Note that the DebugLoc need not be the same.
5153 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5154 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5155 /// node, and because it doesn't require CSE recalculation for any of
5156 /// the node's users.
5158 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5159 SDVTList VTs, const SDValue *Ops,
5161 // If an identical node already exists, use it.
5163 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5164 FoldingSetNodeID ID;
5165 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5166 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5167 return UpdadeDebugLocOnMergedSDNode(ON, N->getDebugLoc());
5170 if (!RemoveNodeFromCSEMaps(N))
5173 // Start the morphing.
5175 N->ValueList = VTs.VTs;
5176 N->NumValues = VTs.NumVTs;
5178 // Clear the operands list, updating used nodes to remove this from their
5179 // use list. Keep track of any operands that become dead as a result.
5180 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5181 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5183 SDNode *Used = Use.getNode();
5185 if (Used->use_empty())
5186 DeadNodeSet.insert(Used);
5189 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5190 // Initialize the memory references information.
5191 MN->setMemRefs(0, 0);
5192 // If NumOps is larger than the # of operands we can have in a
5193 // MachineSDNode, reallocate the operand list.
5194 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5195 if (MN->OperandsNeedDelete)
5196 delete[] MN->OperandList;
5197 if (NumOps > array_lengthof(MN->LocalOperands))
5198 // We're creating a final node that will live unmorphed for the
5199 // remainder of the current SelectionDAG iteration, so we can allocate
5200 // the operands directly out of a pool with no recycling metadata.
5201 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5204 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5205 MN->OperandsNeedDelete = false;
5207 MN->InitOperands(MN->OperandList, Ops, NumOps);
5209 // If NumOps is larger than the # of operands we currently have, reallocate
5210 // the operand list.
5211 if (NumOps > N->NumOperands) {
5212 if (N->OperandsNeedDelete)
5213 delete[] N->OperandList;
5214 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5215 N->OperandsNeedDelete = true;
5217 N->InitOperands(N->OperandList, Ops, NumOps);
5220 // Delete any nodes that are still dead after adding the uses for the
5222 if (!DeadNodeSet.empty()) {
5223 SmallVector<SDNode *, 16> DeadNodes;
5224 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5225 E = DeadNodeSet.end(); I != E; ++I)
5226 if ((*I)->use_empty())
5227 DeadNodes.push_back(*I);
5228 RemoveDeadNodes(DeadNodes);
5232 CSEMap.InsertNode(N, IP); // Memoize the new node.
5237 /// getMachineNode - These are used for target selectors to create a new node
5238 /// with specified return type(s), MachineInstr opcode, and operands.
5240 /// Note that getMachineNode returns the resultant node. If there is already a
5241 /// node of the specified opcode and operands, it returns that node instead of
5242 /// the current one.
5244 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT) {
5245 SDVTList VTs = getVTList(VT);
5246 return getMachineNode(Opcode, dl, VTs, 0, 0);
5250 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1) {
5251 SDVTList VTs = getVTList(VT);
5252 SDValue Ops[] = { Op1 };
5253 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5257 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
5258 SDValue Op1, SDValue Op2) {
5259 SDVTList VTs = getVTList(VT);
5260 SDValue Ops[] = { Op1, Op2 };
5261 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5265 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
5266 SDValue Op1, SDValue Op2, SDValue Op3) {
5267 SDVTList VTs = getVTList(VT);
5268 SDValue Ops[] = { Op1, Op2, Op3 };
5269 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5273 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
5274 const SDValue *Ops, unsigned NumOps) {
5275 SDVTList VTs = getVTList(VT);
5276 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5280 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2) {
5281 SDVTList VTs = getVTList(VT1, VT2);
5282 return getMachineNode(Opcode, dl, VTs, 0, 0);
5286 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5287 EVT VT1, EVT VT2, SDValue Op1) {
5288 SDVTList VTs = getVTList(VT1, VT2);
5289 SDValue Ops[] = { Op1 };
5290 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5294 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5295 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5296 SDVTList VTs = getVTList(VT1, VT2);
5297 SDValue Ops[] = { Op1, Op2 };
5298 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5302 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5303 EVT VT1, EVT VT2, SDValue Op1,
5304 SDValue Op2, SDValue Op3) {
5305 SDVTList VTs = getVTList(VT1, VT2);
5306 SDValue Ops[] = { Op1, Op2, Op3 };
5307 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5311 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5313 const SDValue *Ops, unsigned NumOps) {
5314 SDVTList VTs = getVTList(VT1, VT2);
5315 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5319 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5320 EVT VT1, EVT VT2, EVT VT3,
5321 SDValue Op1, SDValue Op2) {
5322 SDVTList VTs = getVTList(VT1, VT2, VT3);
5323 SDValue Ops[] = { Op1, Op2 };
5324 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5328 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5329 EVT VT1, EVT VT2, EVT VT3,
5330 SDValue Op1, SDValue Op2, SDValue Op3) {
5331 SDVTList VTs = getVTList(VT1, VT2, VT3);
5332 SDValue Ops[] = { Op1, Op2, Op3 };
5333 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5337 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5338 EVT VT1, EVT VT2, EVT VT3,
5339 const SDValue *Ops, unsigned NumOps) {
5340 SDVTList VTs = getVTList(VT1, VT2, VT3);
5341 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5345 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
5346 EVT VT2, EVT VT3, EVT VT4,
5347 const SDValue *Ops, unsigned NumOps) {
5348 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5349 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5353 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5354 const std::vector<EVT> &ResultTys,
5355 const SDValue *Ops, unsigned NumOps) {
5356 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
5357 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5361 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
5362 const SDValue *Ops, unsigned NumOps) {
5363 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5368 FoldingSetNodeID ID;
5369 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5371 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5372 return cast<MachineSDNode>(UpdadeDebugLocOnMergedSDNode(E, DL));
5376 // Allocate a new MachineSDNode.
5377 N = new (NodeAllocator) MachineSDNode(~Opcode, DL, VTs);
5379 // Initialize the operands list.
5380 if (NumOps > array_lengthof(N->LocalOperands))
5381 // We're creating a final node that will live unmorphed for the
5382 // remainder of the current SelectionDAG iteration, so we can allocate
5383 // the operands directly out of a pool with no recycling metadata.
5384 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5387 N->InitOperands(N->LocalOperands, Ops, NumOps);
5388 N->OperandsNeedDelete = false;
5391 CSEMap.InsertNode(N, IP);
5393 AllNodes.push_back(N);
5395 VerifyMachineNode(N);
5400 /// getTargetExtractSubreg - A convenience function for creating
5401 /// TargetOpcode::EXTRACT_SUBREG nodes.
5403 SelectionDAG::getTargetExtractSubreg(int SRIdx, DebugLoc DL, EVT VT,
5405 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5406 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5407 VT, Operand, SRIdxVal);
5408 return SDValue(Subreg, 0);
5411 /// getTargetInsertSubreg - A convenience function for creating
5412 /// TargetOpcode::INSERT_SUBREG nodes.
5414 SelectionDAG::getTargetInsertSubreg(int SRIdx, DebugLoc DL, EVT VT,
5415 SDValue Operand, SDValue Subreg) {
5416 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5417 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5418 VT, Operand, Subreg, SRIdxVal);
5419 return SDValue(Result, 0);
5422 /// getNodeIfExists - Get the specified node if it's already available, or
5423 /// else return NULL.
5424 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5425 const SDValue *Ops, unsigned NumOps) {
5426 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5427 FoldingSetNodeID ID;
5428 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5430 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5436 /// getDbgValue - Creates a SDDbgValue node.
5439 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
5440 DebugLoc DL, unsigned O) {
5441 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
5445 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
5446 DebugLoc DL, unsigned O) {
5447 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5451 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5452 DebugLoc DL, unsigned O) {
5453 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5458 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5459 /// pointed to by a use iterator is deleted, increment the use iterator
5460 /// so that it doesn't dangle.
5462 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5463 SDNode::use_iterator &UI;
5464 SDNode::use_iterator &UE;
5466 virtual void NodeDeleted(SDNode *N, SDNode *E) {
5467 // Increment the iterator as needed.
5468 while (UI != UE && N == *UI)
5473 RAUWUpdateListener(SelectionDAG &d,
5474 SDNode::use_iterator &ui,
5475 SDNode::use_iterator &ue)
5476 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5481 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5482 /// This can cause recursive merging of nodes in the DAG.
5484 /// This version assumes From has a single result value.
5486 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5487 SDNode *From = FromN.getNode();
5488 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5489 "Cannot replace with this method!");
5490 assert(From != To.getNode() && "Cannot replace uses of with self");
5492 // Iterate over all the existing uses of From. New uses will be added
5493 // to the beginning of the use list, which we avoid visiting.
5494 // This specifically avoids visiting uses of From that arise while the
5495 // replacement is happening, because any such uses would be the result
5496 // of CSE: If an existing node looks like From after one of its operands
5497 // is replaced by To, we don't want to replace of all its users with To
5498 // too. See PR3018 for more info.
5499 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5500 RAUWUpdateListener Listener(*this, UI, UE);
5504 // This node is about to morph, remove its old self from the CSE maps.
5505 RemoveNodeFromCSEMaps(User);
5507 // A user can appear in a use list multiple times, and when this
5508 // happens the uses are usually next to each other in the list.
5509 // To help reduce the number of CSE recomputations, process all
5510 // the uses of this user that we can find this way.
5512 SDUse &Use = UI.getUse();
5515 } while (UI != UE && *UI == User);
5517 // Now that we have modified User, add it back to the CSE maps. If it
5518 // already exists there, recursively merge the results together.
5519 AddModifiedNodeToCSEMaps(User);
5522 // If we just RAUW'd the root, take note.
5523 if (FromN == getRoot())
5527 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5528 /// This can cause recursive merging of nodes in the DAG.
5530 /// This version assumes that for each value of From, there is a
5531 /// corresponding value in To in the same position with the same type.
5533 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5535 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5536 assert((!From->hasAnyUseOfValue(i) ||
5537 From->getValueType(i) == To->getValueType(i)) &&
5538 "Cannot use this version of ReplaceAllUsesWith!");
5541 // Handle the trivial case.
5545 // Iterate over just the existing users of From. See the comments in
5546 // the ReplaceAllUsesWith above.
5547 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5548 RAUWUpdateListener Listener(*this, UI, UE);
5552 // This node is about to morph, remove its old self from the CSE maps.
5553 RemoveNodeFromCSEMaps(User);
5555 // A user can appear in a use list multiple times, and when this
5556 // happens the uses are usually next to each other in the list.
5557 // To help reduce the number of CSE recomputations, process all
5558 // the uses of this user that we can find this way.
5560 SDUse &Use = UI.getUse();
5563 } while (UI != UE && *UI == User);
5565 // Now that we have modified User, add it back to the CSE maps. If it
5566 // already exists there, recursively merge the results together.
5567 AddModifiedNodeToCSEMaps(User);
5570 // If we just RAUW'd the root, take note.
5571 if (From == getRoot().getNode())
5572 setRoot(SDValue(To, getRoot().getResNo()));
5575 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5576 /// This can cause recursive merging of nodes in the DAG.
5578 /// This version can replace From with any result values. To must match the
5579 /// number and types of values returned by From.
5580 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5581 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5582 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5584 // Iterate over just the existing users of From. See the comments in
5585 // the ReplaceAllUsesWith above.
5586 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5587 RAUWUpdateListener Listener(*this, UI, UE);
5591 // This node is about to morph, remove its old self from the CSE maps.
5592 RemoveNodeFromCSEMaps(User);
5594 // A user can appear in a use list multiple times, and when this
5595 // happens the uses are usually next to each other in the list.
5596 // To help reduce the number of CSE recomputations, process all
5597 // the uses of this user that we can find this way.
5599 SDUse &Use = UI.getUse();
5600 const SDValue &ToOp = To[Use.getResNo()];
5603 } while (UI != UE && *UI == User);
5605 // Now that we have modified User, add it back to the CSE maps. If it
5606 // already exists there, recursively merge the results together.
5607 AddModifiedNodeToCSEMaps(User);
5610 // If we just RAUW'd the root, take note.
5611 if (From == getRoot().getNode())
5612 setRoot(SDValue(To[getRoot().getResNo()]));
5615 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5616 /// uses of other values produced by From.getNode() alone. The Deleted
5617 /// vector is handled the same way as for ReplaceAllUsesWith.
5618 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5619 // Handle the really simple, really trivial case efficiently.
5620 if (From == To) return;
5622 // Handle the simple, trivial, case efficiently.
5623 if (From.getNode()->getNumValues() == 1) {
5624 ReplaceAllUsesWith(From, To);
5628 // Iterate over just the existing users of From. See the comments in
5629 // the ReplaceAllUsesWith above.
5630 SDNode::use_iterator UI = From.getNode()->use_begin(),
5631 UE = From.getNode()->use_end();
5632 RAUWUpdateListener Listener(*this, UI, UE);
5635 bool UserRemovedFromCSEMaps = false;
5637 // A user can appear in a use list multiple times, and when this
5638 // happens the uses are usually next to each other in the list.
5639 // To help reduce the number of CSE recomputations, process all
5640 // the uses of this user that we can find this way.
5642 SDUse &Use = UI.getUse();
5644 // Skip uses of different values from the same node.
5645 if (Use.getResNo() != From.getResNo()) {
5650 // If this node hasn't been modified yet, it's still in the CSE maps,
5651 // so remove its old self from the CSE maps.
5652 if (!UserRemovedFromCSEMaps) {
5653 RemoveNodeFromCSEMaps(User);
5654 UserRemovedFromCSEMaps = true;
5659 } while (UI != UE && *UI == User);
5661 // We are iterating over all uses of the From node, so if a use
5662 // doesn't use the specific value, no changes are made.
5663 if (!UserRemovedFromCSEMaps)
5666 // Now that we have modified User, add it back to the CSE maps. If it
5667 // already exists there, recursively merge the results together.
5668 AddModifiedNodeToCSEMaps(User);
5671 // If we just RAUW'd the root, take note.
5672 if (From == getRoot())
5677 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5678 /// to record information about a use.
5685 /// operator< - Sort Memos by User.
5686 bool operator<(const UseMemo &L, const UseMemo &R) {
5687 return (intptr_t)L.User < (intptr_t)R.User;
5691 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5692 /// uses of other values produced by From.getNode() alone. The same value
5693 /// may appear in both the From and To list. The Deleted vector is
5694 /// handled the same way as for ReplaceAllUsesWith.
5695 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5698 // Handle the simple, trivial case efficiently.
5700 return ReplaceAllUsesOfValueWith(*From, *To);
5702 // Read up all the uses and make records of them. This helps
5703 // processing new uses that are introduced during the
5704 // replacement process.
5705 SmallVector<UseMemo, 4> Uses;
5706 for (unsigned i = 0; i != Num; ++i) {
5707 unsigned FromResNo = From[i].getResNo();
5708 SDNode *FromNode = From[i].getNode();
5709 for (SDNode::use_iterator UI = FromNode->use_begin(),
5710 E = FromNode->use_end(); UI != E; ++UI) {
5711 SDUse &Use = UI.getUse();
5712 if (Use.getResNo() == FromResNo) {
5713 UseMemo Memo = { *UI, i, &Use };
5714 Uses.push_back(Memo);
5719 // Sort the uses, so that all the uses from a given User are together.
5720 std::sort(Uses.begin(), Uses.end());
5722 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5723 UseIndex != UseIndexEnd; ) {
5724 // We know that this user uses some value of From. If it is the right
5725 // value, update it.
5726 SDNode *User = Uses[UseIndex].User;
5728 // This node is about to morph, remove its old self from the CSE maps.
5729 RemoveNodeFromCSEMaps(User);
5731 // The Uses array is sorted, so all the uses for a given User
5732 // are next to each other in the list.
5733 // To help reduce the number of CSE recomputations, process all
5734 // the uses of this user that we can find this way.
5736 unsigned i = Uses[UseIndex].Index;
5737 SDUse &Use = *Uses[UseIndex].Use;
5741 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5743 // Now that we have modified User, add it back to the CSE maps. If it
5744 // already exists there, recursively merge the results together.
5745 AddModifiedNodeToCSEMaps(User);
5749 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5750 /// based on their topological order. It returns the maximum id and a vector
5751 /// of the SDNodes* in assigned order by reference.
5752 unsigned SelectionDAG::AssignTopologicalOrder() {
5754 unsigned DAGSize = 0;
5756 // SortedPos tracks the progress of the algorithm. Nodes before it are
5757 // sorted, nodes after it are unsorted. When the algorithm completes
5758 // it is at the end of the list.
5759 allnodes_iterator SortedPos = allnodes_begin();
5761 // Visit all the nodes. Move nodes with no operands to the front of
5762 // the list immediately. Annotate nodes that do have operands with their
5763 // operand count. Before we do this, the Node Id fields of the nodes
5764 // may contain arbitrary values. After, the Node Id fields for nodes
5765 // before SortedPos will contain the topological sort index, and the
5766 // Node Id fields for nodes At SortedPos and after will contain the
5767 // count of outstanding operands.
5768 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5771 unsigned Degree = N->getNumOperands();
5773 // A node with no uses, add it to the result array immediately.
5774 N->setNodeId(DAGSize++);
5775 allnodes_iterator Q = N;
5777 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5778 assert(SortedPos != AllNodes.end() && "Overran node list");
5781 // Temporarily use the Node Id as scratch space for the degree count.
5782 N->setNodeId(Degree);
5786 // Visit all the nodes. As we iterate, move nodes into sorted order,
5787 // such that by the time the end is reached all nodes will be sorted.
5788 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5791 // N is in sorted position, so all its uses have one less operand
5792 // that needs to be sorted.
5793 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5796 unsigned Degree = P->getNodeId();
5797 assert(Degree != 0 && "Invalid node degree");
5800 // All of P's operands are sorted, so P may sorted now.
5801 P->setNodeId(DAGSize++);
5803 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5804 assert(SortedPos != AllNodes.end() && "Overran node list");
5807 // Update P's outstanding operand count.
5808 P->setNodeId(Degree);
5811 if (I == SortedPos) {
5814 dbgs() << "Overran sorted position:\n";
5817 llvm_unreachable(0);
5821 assert(SortedPos == AllNodes.end() &&
5822 "Topological sort incomplete!");
5823 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
5824 "First node in topological sort is not the entry token!");
5825 assert(AllNodes.front().getNodeId() == 0 &&
5826 "First node in topological sort has non-zero id!");
5827 assert(AllNodes.front().getNumOperands() == 0 &&
5828 "First node in topological sort has operands!");
5829 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
5830 "Last node in topologic sort has unexpected id!");
5831 assert(AllNodes.back().use_empty() &&
5832 "Last node in topologic sort has users!");
5833 assert(DAGSize == allnodes_size() && "Node count mismatch!");
5837 /// AssignOrdering - Assign an order to the SDNode.
5838 void SelectionDAG::AssignOrdering(const SDNode *SD, unsigned Order) {
5839 assert(SD && "Trying to assign an order to a null node!");
5840 Ordering->add(SD, Order);
5843 /// GetOrdering - Get the order for the SDNode.
5844 unsigned SelectionDAG::GetOrdering(const SDNode *SD) const {
5845 assert(SD && "Trying to get the order of a null node!");
5846 return Ordering->getOrder(SD);
5849 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
5850 /// value is produced by SD.
5851 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
5852 DbgInfo->add(DB, SD, isParameter);
5854 SD->setHasDebugValue(true);
5857 /// TransferDbgValues - Transfer SDDbgValues.
5858 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
5859 if (From == To || !From.getNode()->getHasDebugValue())
5861 SDNode *FromNode = From.getNode();
5862 SDNode *ToNode = To.getNode();
5863 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
5864 SmallVector<SDDbgValue *, 2> ClonedDVs;
5865 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
5867 SDDbgValue *Dbg = *I;
5868 if (Dbg->getKind() == SDDbgValue::SDNODE) {
5869 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
5870 Dbg->getOffset(), Dbg->getDebugLoc(),
5872 ClonedDVs.push_back(Clone);
5875 for (SmallVector<SDDbgValue *, 2>::iterator I = ClonedDVs.begin(),
5876 E = ClonedDVs.end(); I != E; ++I)
5877 AddDbgValue(*I, ToNode, false);
5880 //===----------------------------------------------------------------------===//
5882 //===----------------------------------------------------------------------===//
5884 HandleSDNode::~HandleSDNode() {
5888 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, DebugLoc DL,
5889 const GlobalValue *GA,
5890 EVT VT, int64_t o, unsigned char TF)
5891 : SDNode(Opc, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
5895 MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
5896 MachineMemOperand *mmo)
5897 : SDNode(Opc, dl, VTs), MemoryVT(memvt), MMO(mmo) {
5898 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5899 MMO->isNonTemporal(), MMO->isInvariant());
5900 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5901 assert(isNonTemporal() == MMO->isNonTemporal() &&
5902 "Non-temporal encoding error!");
5903 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5906 MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
5907 const SDValue *Ops, unsigned NumOps, EVT memvt,
5908 MachineMemOperand *mmo)
5909 : SDNode(Opc, dl, VTs, Ops, NumOps),
5910 MemoryVT(memvt), MMO(mmo) {
5911 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5912 MMO->isNonTemporal(), MMO->isInvariant());
5913 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5914 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5917 /// Profile - Gather unique data for the node.
5919 void SDNode::Profile(FoldingSetNodeID &ID) const {
5920 AddNodeIDNode(ID, this);
5925 std::vector<EVT> VTs;
5928 VTs.reserve(MVT::LAST_VALUETYPE);
5929 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
5930 VTs.push_back(MVT((MVT::SimpleValueType)i));
5935 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
5936 static ManagedStatic<EVTArray> SimpleVTArray;
5937 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
5939 /// getValueTypeList - Return a pointer to the specified value type.
5941 const EVT *SDNode::getValueTypeList(EVT VT) {
5942 if (VT.isExtended()) {
5943 sys::SmartScopedLock<true> Lock(*VTMutex);
5944 return &(*EVTs->insert(VT).first);
5946 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
5947 "Value type out of range!");
5948 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
5952 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
5953 /// indicated value. This method ignores uses of other values defined by this
5955 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
5956 assert(Value < getNumValues() && "Bad value!");
5958 // TODO: Only iterate over uses of a given value of the node
5959 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
5960 if (UI.getUse().getResNo() == Value) {
5967 // Found exactly the right number of uses?
5972 /// hasAnyUseOfValue - Return true if there are any use of the indicated
5973 /// value. This method ignores uses of other values defined by this operation.
5974 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
5975 assert(Value < getNumValues() && "Bad value!");
5977 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
5978 if (UI.getUse().getResNo() == Value)
5985 /// isOnlyUserOf - Return true if this node is the only use of N.
5987 bool SDNode::isOnlyUserOf(SDNode *N) const {
5989 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6000 /// isOperand - Return true if this node is an operand of N.
6002 bool SDValue::isOperandOf(SDNode *N) const {
6003 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6004 if (*this == N->getOperand(i))
6009 bool SDNode::isOperandOf(SDNode *N) const {
6010 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6011 if (this == N->OperandList[i].getNode())
6016 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6017 /// be a chain) reaches the specified operand without crossing any
6018 /// side-effecting instructions on any chain path. In practice, this looks
6019 /// through token factors and non-volatile loads. In order to remain efficient,
6020 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6021 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6022 unsigned Depth) const {
6023 if (*this == Dest) return true;
6025 // Don't search too deeply, we just want to be able to see through
6026 // TokenFactor's etc.
6027 if (Depth == 0) return false;
6029 // If this is a token factor, all inputs to the TF happen in parallel. If any
6030 // of the operands of the TF does not reach dest, then we cannot do the xform.
6031 if (getOpcode() == ISD::TokenFactor) {
6032 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6033 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6038 // Loads don't have side effects, look through them.
6039 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6040 if (!Ld->isVolatile())
6041 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6046 /// hasPredecessor - Return true if N is a predecessor of this node.
6047 /// N is either an operand of this node, or can be reached by recursively
6048 /// traversing up the operands.
6049 /// NOTE: This is an expensive method. Use it carefully.
6050 bool SDNode::hasPredecessor(const SDNode *N) const {
6051 SmallPtrSet<const SDNode *, 32> Visited;
6052 SmallVector<const SDNode *, 16> Worklist;
6053 return hasPredecessorHelper(N, Visited, Worklist);
6056 bool SDNode::hasPredecessorHelper(const SDNode *N,
6057 SmallPtrSet<const SDNode *, 32> &Visited,
6058 SmallVector<const SDNode *, 16> &Worklist) const {
6059 if (Visited.empty()) {
6060 Worklist.push_back(this);
6062 // Take a look in the visited set. If we've already encountered this node
6063 // we needn't search further.
6064 if (Visited.count(N))
6068 // Haven't visited N yet. Continue the search.
6069 while (!Worklist.empty()) {
6070 const SDNode *M = Worklist.pop_back_val();
6071 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6072 SDNode *Op = M->getOperand(i).getNode();
6073 if (Visited.insert(Op))
6074 Worklist.push_back(Op);
6083 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6084 assert(Num < NumOperands && "Invalid child # of SDNode!");
6085 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6088 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6089 assert(N->getNumValues() == 1 &&
6090 "Can't unroll a vector with multiple results!");
6092 EVT VT = N->getValueType(0);
6093 unsigned NE = VT.getVectorNumElements();
6094 EVT EltVT = VT.getVectorElementType();
6095 DebugLoc dl = N->getDebugLoc();
6097 SmallVector<SDValue, 8> Scalars;
6098 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6100 // If ResNE is 0, fully unroll the vector op.
6103 else if (NE > ResNE)
6107 for (i= 0; i != NE; ++i) {
6108 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6109 SDValue Operand = N->getOperand(j);
6110 EVT OperandVT = Operand.getValueType();
6111 if (OperandVT.isVector()) {
6112 // A vector operand; extract a single element.
6113 EVT OperandEltVT = OperandVT.getVectorElementType();
6114 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6117 getConstant(i, TLI.getPointerTy()));
6119 // A scalar operand; just use it as is.
6120 Operands[j] = Operand;
6124 switch (N->getOpcode()) {
6126 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6127 &Operands[0], Operands.size()));
6130 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT,
6131 &Operands[0], Operands.size()));
6138 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6139 getShiftAmountOperand(Operands[0].getValueType(),
6142 case ISD::SIGN_EXTEND_INREG:
6143 case ISD::FP_ROUND_INREG: {
6144 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6145 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6147 getValueType(ExtVT)));
6152 for (; i < ResNE; ++i)
6153 Scalars.push_back(getUNDEF(EltVT));
6155 return getNode(ISD::BUILD_VECTOR, dl,
6156 EVT::getVectorVT(*getContext(), EltVT, ResNE),
6157 &Scalars[0], Scalars.size());
6161 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6162 /// location that is 'Dist' units away from the location that the 'Base' load
6163 /// is loading from.
6164 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6165 unsigned Bytes, int Dist) const {
6166 if (LD->getChain() != Base->getChain())
6168 EVT VT = LD->getValueType(0);
6169 if (VT.getSizeInBits() / 8 != Bytes)
6172 SDValue Loc = LD->getOperand(1);
6173 SDValue BaseLoc = Base->getOperand(1);
6174 if (Loc.getOpcode() == ISD::FrameIndex) {
6175 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6177 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6178 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6179 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6180 int FS = MFI->getObjectSize(FI);
6181 int BFS = MFI->getObjectSize(BFI);
6182 if (FS != BFS || FS != (int)Bytes) return false;
6183 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6187 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6188 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6191 const GlobalValue *GV1 = NULL;
6192 const GlobalValue *GV2 = NULL;
6193 int64_t Offset1 = 0;
6194 int64_t Offset2 = 0;
6195 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6196 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6197 if (isGA1 && isGA2 && GV1 == GV2)
6198 return Offset1 == (Offset2 + Dist*Bytes);
6203 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6204 /// it cannot be inferred.
6205 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6206 // If this is a GlobalAddress + cst, return the alignment.
6207 const GlobalValue *GV;
6208 int64_t GVOffset = 0;
6209 if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6210 unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
6211 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6212 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6213 TLI.getDataLayout());
6214 unsigned AlignBits = KnownZero.countTrailingOnes();
6215 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6217 return MinAlign(Align, GVOffset);
6220 // If this is a direct reference to a stack slot, use information about the
6221 // stack slot's alignment.
6222 int FrameIdx = 1 << 31;
6223 int64_t FrameOffset = 0;
6224 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6225 FrameIdx = FI->getIndex();
6226 } else if (isBaseWithConstantOffset(Ptr) &&
6227 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6229 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6230 FrameOffset = Ptr.getConstantOperandVal(1);
6233 if (FrameIdx != (1 << 31)) {
6234 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6235 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6243 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6244 unsigned GlobalAddressSDNode::getAddressSpace() const {
6245 return getGlobal()->getType()->getAddressSpace();
6249 Type *ConstantPoolSDNode::getType() const {
6250 if (isMachineConstantPoolEntry())
6251 return Val.MachineCPVal->getType();
6252 return Val.ConstVal->getType();
6255 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6257 unsigned &SplatBitSize,
6259 unsigned MinSplatBits,
6261 EVT VT = getValueType(0);
6262 assert(VT.isVector() && "Expected a vector type");
6263 unsigned sz = VT.getSizeInBits();
6264 if (MinSplatBits > sz)
6267 SplatValue = APInt(sz, 0);
6268 SplatUndef = APInt(sz, 0);
6270 // Get the bits. Bits with undefined values (when the corresponding element
6271 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6272 // in SplatValue. If any of the values are not constant, give up and return
6274 unsigned int nOps = getNumOperands();
6275 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6276 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6278 for (unsigned j = 0; j < nOps; ++j) {
6279 unsigned i = isBigEndian ? nOps-1-j : j;
6280 SDValue OpVal = getOperand(i);
6281 unsigned BitPos = j * EltBitSize;
6283 if (OpVal.getOpcode() == ISD::UNDEF)
6284 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6285 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6286 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6287 zextOrTrunc(sz) << BitPos;
6288 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6289 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6294 // The build_vector is all constants or undefs. Find the smallest element
6295 // size that splats the vector.
6297 HasAnyUndefs = (SplatUndef != 0);
6300 unsigned HalfSize = sz / 2;
6301 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6302 APInt LowValue = SplatValue.trunc(HalfSize);
6303 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6304 APInt LowUndef = SplatUndef.trunc(HalfSize);
6306 // If the two halves do not match (ignoring undef bits), stop here.
6307 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6308 MinSplatBits > HalfSize)
6311 SplatValue = HighValue | LowValue;
6312 SplatUndef = HighUndef & LowUndef;
6321 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6322 // Find the first non-undef value in the shuffle mask.
6324 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6327 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6329 // Make sure all remaining elements are either undef or the same as the first
6331 for (int Idx = Mask[i]; i != e; ++i)
6332 if (Mask[i] >= 0 && Mask[i] != Idx)
6338 static void checkForCyclesHelper(const SDNode *N,
6339 SmallPtrSet<const SDNode*, 32> &Visited,
6340 SmallPtrSet<const SDNode*, 32> &Checked) {
6341 // If this node has already been checked, don't check it again.
6342 if (Checked.count(N))
6345 // If a node has already been visited on this depth-first walk, reject it as
6347 if (!Visited.insert(N)) {
6348 dbgs() << "Offending node:\n";
6350 errs() << "Detected cycle in SelectionDAG\n";
6354 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6355 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6362 void llvm::checkForCycles(const llvm::SDNode *N) {
6364 assert(N && "Checking nonexistant SDNode");
6365 SmallPtrSet<const SDNode*, 32> visited;
6366 SmallPtrSet<const SDNode*, 32> checked;
6367 checkForCyclesHelper(N, visited, checked);
6371 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6372 checkForCycles(DAG->getRoot().getNode());