1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DebugInfo.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/ManagedStatic.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Mutex.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetInstrInfo.h"
43 #include "llvm/Target/TargetIntrinsicInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSelectionDAGInfo.h"
49 #include "llvm/Target/TargetSubtargetInfo.h"
55 /// makeVTList - Return an instance of the SDVTList struct initialized with the
56 /// specified members.
57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
58 SDVTList Res = {VTs, NumVTs};
62 // Default null implementations of the callbacks.
63 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
64 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
66 //===----------------------------------------------------------------------===//
67 // ConstantFPSDNode Class
68 //===----------------------------------------------------------------------===//
70 /// isExactlyValue - We don't rely on operator== working on double values, as
71 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
72 /// As such, this method can be used to do an exact bit-for-bit comparison of
73 /// two floating point values.
74 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
75 return getValueAPF().bitwiseIsEqual(V);
78 bool ConstantFPSDNode::isValueValidForType(EVT VT,
80 assert(VT.isFloatingPoint() && "Can only convert between FP types");
82 // convert modifies in place, so make a copy.
83 APFloat Val2 = APFloat(Val);
85 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
86 APFloat::rmNearestTiesToEven,
91 //===----------------------------------------------------------------------===//
93 //===----------------------------------------------------------------------===//
95 /// isBuildVectorAllOnes - Return true if the specified node is a
96 /// BUILD_VECTOR where all of the elements are ~0 or undef.
97 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
98 // Look through a bit convert.
99 while (N->getOpcode() == ISD::BITCAST)
100 N = N->getOperand(0).getNode();
102 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
104 unsigned i = 0, e = N->getNumOperands();
106 // Skip over all of the undef values.
107 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
110 // Do not accept an all-undef vector.
111 if (i == e) return false;
113 // Do not accept build_vectors that aren't all constants or which have non-~0
114 // elements. We have to be a bit careful here, as the type of the constant
115 // may not be the same as the type of the vector elements due to type
116 // legalization (the elements are promoted to a legal type for the target and
117 // a vector of a type may be legal when the base element type is not).
118 // We only want to check enough bits to cover the vector elements, because
119 // we care if the resultant vector is all ones, not whether the individual
121 SDValue NotZero = N->getOperand(i);
122 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
123 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
124 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
126 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
127 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
132 // Okay, we have at least one ~0 value, check to see if the rest match or are
133 // undefs. Even with the above element type twiddling, this should be OK, as
134 // the same type legalization should have applied to all the elements.
135 for (++i; i != e; ++i)
136 if (N->getOperand(i) != NotZero &&
137 N->getOperand(i).getOpcode() != ISD::UNDEF)
143 /// isBuildVectorAllZeros - Return true if the specified node is a
144 /// BUILD_VECTOR where all of the elements are 0 or undef.
145 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
146 // Look through a bit convert.
147 while (N->getOpcode() == ISD::BITCAST)
148 N = N->getOperand(0).getNode();
150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
152 bool IsAllUndef = true;
153 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) {
154 if (N->getOperand(i).getOpcode() == ISD::UNDEF)
157 // Do not accept build_vectors that aren't all constants or which have non-0
158 // elements. We have to be a bit careful here, as the type of the constant
159 // may not be the same as the type of the vector elements due to type
160 // legalization (the elements are promoted to a legal type for the target
161 // and a vector of a type may be legal when the base element type is not).
162 // We only want to check enough bits to cover the vector elements, because
163 // we care if the resultant vector is all zeros, not whether the individual
165 SDValue Zero = N->getOperand(i);
166 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
167 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
168 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
170 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
171 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
177 // Do not accept an all-undef vector.
183 /// \brief Return true if the specified node is a BUILD_VECTOR node of
184 /// all ConstantSDNode or undef.
185 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
186 if (N->getOpcode() != ISD::BUILD_VECTOR)
189 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
190 SDValue Op = N->getOperand(i);
191 if (Op.getOpcode() == ISD::UNDEF)
193 if (!isa<ConstantSDNode>(Op))
199 /// isScalarToVector - Return true if the specified node is a
200 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
201 /// element is not an undef.
202 bool ISD::isScalarToVector(const SDNode *N) {
203 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
206 if (N->getOpcode() != ISD::BUILD_VECTOR)
208 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
210 unsigned NumElems = N->getNumOperands();
213 for (unsigned i = 1; i < NumElems; ++i) {
214 SDValue V = N->getOperand(i);
215 if (V.getOpcode() != ISD::UNDEF)
221 /// allOperandsUndef - Return true if the node has at least one operand
222 /// and all operands of the specified node are ISD::UNDEF.
223 bool ISD::allOperandsUndef(const SDNode *N) {
224 // Return false if the node has no operands.
225 // This is "logically inconsistent" with the definition of "all" but
226 // is probably the desired behavior.
227 if (N->getNumOperands() == 0)
230 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
231 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
237 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) {
240 return ISD::ANY_EXTEND;
242 return ISD::SIGN_EXTEND;
244 return ISD::ZERO_EXTEND;
249 llvm_unreachable("Invalid LoadExtType");
252 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
253 /// when given the operation for (X op Y).
254 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
255 // To perform this operation, we just need to swap the L and G bits of the
257 unsigned OldL = (Operation >> 2) & 1;
258 unsigned OldG = (Operation >> 1) & 1;
259 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
260 (OldL << 1) | // New G bit
261 (OldG << 2)); // New L bit.
264 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
265 /// 'op' is a valid SetCC operation.
266 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
267 unsigned Operation = Op;
269 Operation ^= 7; // Flip L, G, E bits, but not U.
271 Operation ^= 15; // Flip all of the condition bits.
273 if (Operation > ISD::SETTRUE2)
274 Operation &= ~8; // Don't let N and U bits get set.
276 return ISD::CondCode(Operation);
280 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
281 /// signed operation and 2 if the result is an unsigned comparison. Return zero
282 /// if the operation does not depend on the sign of the input (setne and seteq).
283 static int isSignedOp(ISD::CondCode Opcode) {
285 default: llvm_unreachable("Illegal integer setcc operation!");
287 case ISD::SETNE: return 0;
291 case ISD::SETGE: return 1;
295 case ISD::SETUGE: return 2;
299 /// getSetCCOrOperation - Return the result of a logical OR between different
300 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
301 /// returns SETCC_INVALID if it is not possible to represent the resultant
303 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
305 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
306 // Cannot fold a signed integer setcc with an unsigned integer setcc.
307 return ISD::SETCC_INVALID;
309 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
311 // If the N and U bits get set then the resultant comparison DOES suddenly
312 // care about orderedness, and is true when ordered.
313 if (Op > ISD::SETTRUE2)
314 Op &= ~16; // Clear the U bit if the N bit is set.
316 // Canonicalize illegal integer setcc's.
317 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
320 return ISD::CondCode(Op);
323 /// getSetCCAndOperation - Return the result of a logical AND between different
324 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
325 /// function returns zero if it is not possible to represent the resultant
327 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
329 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
330 // Cannot fold a signed setcc with an unsigned setcc.
331 return ISD::SETCC_INVALID;
333 // Combine all of the condition bits.
334 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
336 // Canonicalize illegal integer setcc's.
340 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
341 case ISD::SETOEQ: // SETEQ & SETU[LG]E
342 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
343 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
344 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
351 //===----------------------------------------------------------------------===//
352 // SDNode Profile Support
353 //===----------------------------------------------------------------------===//
355 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
357 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
361 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
362 /// solely with their pointer.
363 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
364 ID.AddPointer(VTList.VTs);
367 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
369 static void AddNodeIDOperands(FoldingSetNodeID &ID,
370 ArrayRef<SDValue> Ops) {
371 for (auto& Op : Ops) {
372 ID.AddPointer(Op.getNode());
373 ID.AddInteger(Op.getResNo());
377 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
379 static void AddNodeIDOperands(FoldingSetNodeID &ID,
380 ArrayRef<SDUse> Ops) {
381 for (auto& Op : Ops) {
382 ID.AddPointer(Op.getNode());
383 ID.AddInteger(Op.getResNo());
387 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, bool nuw, bool nsw,
391 ID.AddBoolean(exact);
394 /// AddBinaryNodeIDCustom - Add BinarySDNodes special infos
395 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, unsigned Opcode,
396 bool nuw, bool nsw, bool exact) {
397 if (isBinOpWithFlags(Opcode))
398 AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
401 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
402 SDVTList VTList, ArrayRef<SDValue> OpList) {
403 AddNodeIDOpcode(ID, OpC);
404 AddNodeIDValueTypes(ID, VTList);
405 AddNodeIDOperands(ID, OpList);
408 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
410 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
411 switch (N->getOpcode()) {
412 case ISD::TargetExternalSymbol:
413 case ISD::ExternalSymbol:
414 llvm_unreachable("Should only be used on nodes with operands");
415 default: break; // Normal nodes don't need extra info.
416 case ISD::TargetConstant:
417 case ISD::Constant: {
418 const ConstantSDNode *C = cast<ConstantSDNode>(N);
419 ID.AddPointer(C->getConstantIntValue());
420 ID.AddBoolean(C->isOpaque());
423 case ISD::TargetConstantFP:
424 case ISD::ConstantFP: {
425 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
428 case ISD::TargetGlobalAddress:
429 case ISD::GlobalAddress:
430 case ISD::TargetGlobalTLSAddress:
431 case ISD::GlobalTLSAddress: {
432 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
433 ID.AddPointer(GA->getGlobal());
434 ID.AddInteger(GA->getOffset());
435 ID.AddInteger(GA->getTargetFlags());
436 ID.AddInteger(GA->getAddressSpace());
439 case ISD::BasicBlock:
440 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
443 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
445 case ISD::RegisterMask:
446 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
449 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
451 case ISD::FrameIndex:
452 case ISD::TargetFrameIndex:
453 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
456 case ISD::TargetJumpTable:
457 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
458 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
460 case ISD::ConstantPool:
461 case ISD::TargetConstantPool: {
462 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
463 ID.AddInteger(CP->getAlignment());
464 ID.AddInteger(CP->getOffset());
465 if (CP->isMachineConstantPoolEntry())
466 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
468 ID.AddPointer(CP->getConstVal());
469 ID.AddInteger(CP->getTargetFlags());
472 case ISD::TargetIndex: {
473 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
474 ID.AddInteger(TI->getIndex());
475 ID.AddInteger(TI->getOffset());
476 ID.AddInteger(TI->getTargetFlags());
480 const LoadSDNode *LD = cast<LoadSDNode>(N);
481 ID.AddInteger(LD->getMemoryVT().getRawBits());
482 ID.AddInteger(LD->getRawSubclassData());
483 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
487 const StoreSDNode *ST = cast<StoreSDNode>(N);
488 ID.AddInteger(ST->getMemoryVT().getRawBits());
489 ID.AddInteger(ST->getRawSubclassData());
490 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
501 const BinaryWithFlagsSDNode *BinNode = cast<BinaryWithFlagsSDNode>(N);
502 AddBinaryNodeIDCustom(ID, N->getOpcode(), BinNode->hasNoUnsignedWrap(),
503 BinNode->hasNoSignedWrap(), BinNode->isExact());
506 case ISD::ATOMIC_CMP_SWAP:
507 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
508 case ISD::ATOMIC_SWAP:
509 case ISD::ATOMIC_LOAD_ADD:
510 case ISD::ATOMIC_LOAD_SUB:
511 case ISD::ATOMIC_LOAD_AND:
512 case ISD::ATOMIC_LOAD_OR:
513 case ISD::ATOMIC_LOAD_XOR:
514 case ISD::ATOMIC_LOAD_NAND:
515 case ISD::ATOMIC_LOAD_MIN:
516 case ISD::ATOMIC_LOAD_MAX:
517 case ISD::ATOMIC_LOAD_UMIN:
518 case ISD::ATOMIC_LOAD_UMAX:
519 case ISD::ATOMIC_LOAD:
520 case ISD::ATOMIC_STORE: {
521 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
522 ID.AddInteger(AT->getMemoryVT().getRawBits());
523 ID.AddInteger(AT->getRawSubclassData());
524 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
527 case ISD::PREFETCH: {
528 const MemSDNode *PF = cast<MemSDNode>(N);
529 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
532 case ISD::VECTOR_SHUFFLE: {
533 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
534 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
536 ID.AddInteger(SVN->getMaskElt(i));
539 case ISD::TargetBlockAddress:
540 case ISD::BlockAddress: {
541 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
542 ID.AddPointer(BA->getBlockAddress());
543 ID.AddInteger(BA->getOffset());
544 ID.AddInteger(BA->getTargetFlags());
547 } // end switch (N->getOpcode())
549 // Target specific memory nodes could also have address spaces to check.
550 if (N->isTargetMemoryOpcode())
551 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
554 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
556 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
557 AddNodeIDOpcode(ID, N->getOpcode());
558 // Add the return value info.
559 AddNodeIDValueTypes(ID, N->getVTList());
560 // Add the operand info.
561 AddNodeIDOperands(ID, N->ops());
563 // Handle SDNode leafs with special info.
564 AddNodeIDCustom(ID, N);
567 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
568 /// the CSE map that carries volatility, temporalness, indexing mode, and
569 /// extension/truncation information.
571 static inline unsigned
572 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
573 bool isNonTemporal, bool isInvariant) {
574 assert((ConvType & 3) == ConvType &&
575 "ConvType may not require more than 2 bits!");
576 assert((AM & 7) == AM &&
577 "AM may not require more than 3 bits!");
581 (isNonTemporal << 6) |
585 //===----------------------------------------------------------------------===//
586 // SelectionDAG Class
587 //===----------------------------------------------------------------------===//
589 /// doNotCSE - Return true if CSE should not be performed for this node.
590 static bool doNotCSE(SDNode *N) {
591 if (N->getValueType(0) == MVT::Glue)
592 return true; // Never CSE anything that produces a flag.
594 switch (N->getOpcode()) {
596 case ISD::HANDLENODE:
598 return true; // Never CSE these nodes.
601 // Check that remaining values produced are not flags.
602 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
603 if (N->getValueType(i) == MVT::Glue)
604 return true; // Never CSE anything that produces a flag.
609 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
611 void SelectionDAG::RemoveDeadNodes() {
612 // Create a dummy node (which is not added to allnodes), that adds a reference
613 // to the root node, preventing it from being deleted.
614 HandleSDNode Dummy(getRoot());
616 SmallVector<SDNode*, 128> DeadNodes;
618 // Add all obviously-dead nodes to the DeadNodes worklist.
619 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
621 DeadNodes.push_back(I);
623 RemoveDeadNodes(DeadNodes);
625 // If the root changed (e.g. it was a dead load, update the root).
626 setRoot(Dummy.getValue());
629 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
630 /// given list, and any nodes that become unreachable as a result.
631 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
633 // Process the worklist, deleting the nodes and adding their uses to the
635 while (!DeadNodes.empty()) {
636 SDNode *N = DeadNodes.pop_back_val();
638 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
639 DUL->NodeDeleted(N, nullptr);
641 // Take the node out of the appropriate CSE map.
642 RemoveNodeFromCSEMaps(N);
644 // Next, brutally remove the operand list. This is safe to do, as there are
645 // no cycles in the graph.
646 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
648 SDNode *Operand = Use.getNode();
651 // Now that we removed this operand, see if there are no uses of it left.
652 if (Operand->use_empty())
653 DeadNodes.push_back(Operand);
660 void SelectionDAG::RemoveDeadNode(SDNode *N){
661 SmallVector<SDNode*, 16> DeadNodes(1, N);
663 // Create a dummy node that adds a reference to the root node, preventing
664 // it from being deleted. (This matters if the root is an operand of the
666 HandleSDNode Dummy(getRoot());
668 RemoveDeadNodes(DeadNodes);
671 void SelectionDAG::DeleteNode(SDNode *N) {
672 // First take this out of the appropriate CSE map.
673 RemoveNodeFromCSEMaps(N);
675 // Finally, remove uses due to operands of this node, remove from the
676 // AllNodes list, and delete the node.
677 DeleteNodeNotInCSEMaps(N);
680 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
681 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
682 assert(N->use_empty() && "Cannot delete a node that is not dead!");
684 // Drop all of the operands and decrement used node's use counts.
690 void SDDbgInfo::erase(const SDNode *Node) {
691 DbgValMapType::iterator I = DbgValMap.find(Node);
692 if (I == DbgValMap.end())
694 for (auto &Val: I->second)
695 Val->setIsInvalidated();
699 void SelectionDAG::DeallocateNode(SDNode *N) {
700 if (N->OperandsNeedDelete)
701 delete[] N->OperandList;
703 // Set the opcode to DELETED_NODE to help catch bugs when node
704 // memory is reallocated.
705 N->NodeType = ISD::DELETED_NODE;
707 NodeAllocator.Deallocate(AllNodes.remove(N));
709 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
710 // them and forget about that node.
715 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
716 static void VerifySDNode(SDNode *N) {
717 switch (N->getOpcode()) {
720 case ISD::BUILD_PAIR: {
721 EVT VT = N->getValueType(0);
722 assert(N->getNumValues() == 1 && "Too many results!");
723 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
724 "Wrong return type!");
725 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
726 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
727 "Mismatched operand types!");
728 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
729 "Wrong operand type!");
730 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
731 "Wrong return type size");
734 case ISD::BUILD_VECTOR: {
735 assert(N->getNumValues() == 1 && "Too many results!");
736 assert(N->getValueType(0).isVector() && "Wrong return type!");
737 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
738 "Wrong number of operands!");
739 EVT EltVT = N->getValueType(0).getVectorElementType();
740 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
741 assert((I->getValueType() == EltVT ||
742 (EltVT.isInteger() && I->getValueType().isInteger() &&
743 EltVT.bitsLE(I->getValueType()))) &&
744 "Wrong operand type!");
745 assert(I->getValueType() == N->getOperand(0).getValueType() &&
746 "Operands must all have the same type");
754 /// \brief Insert a newly allocated node into the DAG.
756 /// Handles insertion into the all nodes list and CSE map, as well as
757 /// verification and other common operations when a new node is allocated.
758 void SelectionDAG::InsertNode(SDNode *N) {
759 AllNodes.push_back(N);
765 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
766 /// correspond to it. This is useful when we're about to delete or repurpose
767 /// the node. We don't want future request for structurally identical nodes
768 /// to return N anymore.
769 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
771 switch (N->getOpcode()) {
772 case ISD::HANDLENODE: return false; // noop.
774 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
775 "Cond code doesn't exist!");
776 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
777 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
779 case ISD::ExternalSymbol:
780 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
782 case ISD::TargetExternalSymbol: {
783 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
784 Erased = TargetExternalSymbols.erase(
785 std::pair<std::string,unsigned char>(ESN->getSymbol(),
786 ESN->getTargetFlags()));
789 case ISD::VALUETYPE: {
790 EVT VT = cast<VTSDNode>(N)->getVT();
791 if (VT.isExtended()) {
792 Erased = ExtendedValueTypeNodes.erase(VT);
794 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
795 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
800 // Remove it from the CSE Map.
801 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
802 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
803 Erased = CSEMap.RemoveNode(N);
807 // Verify that the node was actually in one of the CSE maps, unless it has a
808 // flag result (which cannot be CSE'd) or is one of the special cases that are
809 // not subject to CSE.
810 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
811 !N->isMachineOpcode() && !doNotCSE(N)) {
814 llvm_unreachable("Node is not in map!");
820 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
821 /// maps and modified in place. Add it back to the CSE maps, unless an identical
822 /// node already exists, in which case transfer all its users to the existing
823 /// node. This transfer can potentially trigger recursive merging.
826 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
827 // For node types that aren't CSE'd, just act as if no identical node
830 SDNode *Existing = CSEMap.GetOrInsertNode(N);
832 // If there was already an existing matching node, use ReplaceAllUsesWith
833 // to replace the dead one with the existing one. This can cause
834 // recursive merging of other unrelated nodes down the line.
835 ReplaceAllUsesWith(N, Existing);
837 // N is now dead. Inform the listeners and delete it.
838 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
839 DUL->NodeDeleted(N, Existing);
840 DeleteNodeNotInCSEMaps(N);
845 // If the node doesn't already exist, we updated it. Inform listeners.
846 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
850 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
851 /// were replaced with those specified. If this node is never memoized,
852 /// return null, otherwise return a pointer to the slot it would take. If a
853 /// node already exists with these operands, the slot will be non-null.
854 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
859 SDValue Ops[] = { Op };
861 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
862 AddNodeIDCustom(ID, N);
863 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
867 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
868 /// were replaced with those specified. If this node is never memoized,
869 /// return null, otherwise return a pointer to the slot it would take. If a
870 /// node already exists with these operands, the slot will be non-null.
871 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
872 SDValue Op1, SDValue Op2,
877 SDValue Ops[] = { Op1, Op2 };
879 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
880 AddNodeIDCustom(ID, N);
881 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
886 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
887 /// were replaced with those specified. If this node is never memoized,
888 /// return null, otherwise return a pointer to the slot it would take. If a
889 /// node already exists with these operands, the slot will be non-null.
890 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
896 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
897 AddNodeIDCustom(ID, N);
898 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
902 /// getEVTAlignment - Compute the default alignment value for the
905 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
906 Type *Ty = VT == MVT::iPTR ?
907 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
908 VT.getTypeForEVT(*getContext());
910 return TLI->getDataLayout()->getABITypeAlignment(Ty);
913 // EntryNode could meaningfully have debug info if we can find it...
914 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
915 : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
916 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
917 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
918 UpdateListeners(nullptr) {
919 AllNodes.push_back(&EntryNode);
920 DbgInfo = new SDDbgInfo();
923 void SelectionDAG::init(MachineFunction &mf) {
925 TLI = getSubtarget().getTargetLowering();
926 TSI = getSubtarget().getSelectionDAGInfo();
927 Context = &mf.getFunction()->getContext();
930 SelectionDAG::~SelectionDAG() {
931 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
936 void SelectionDAG::allnodes_clear() {
937 assert(&*AllNodes.begin() == &EntryNode);
938 AllNodes.remove(AllNodes.begin());
939 while (!AllNodes.empty())
940 DeallocateNode(AllNodes.begin());
943 BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
944 SDVTList VTs, SDValue N1,
945 SDValue N2, bool nuw, bool nsw,
947 if (isBinOpWithFlags(Opcode)) {
948 BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
949 Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
950 FN->setHasNoUnsignedWrap(nuw);
951 FN->setHasNoSignedWrap(nsw);
952 FN->setIsExact(exact);
957 BinarySDNode *N = new (NodeAllocator)
958 BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
962 void SelectionDAG::clear() {
964 OperandAllocator.Reset();
967 ExtendedValueTypeNodes.clear();
968 ExternalSymbols.clear();
969 TargetExternalSymbols.clear();
970 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
971 static_cast<CondCodeSDNode*>(nullptr));
972 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
973 static_cast<SDNode*>(nullptr));
975 EntryNode.UseList = nullptr;
976 AllNodes.push_back(&EntryNode);
977 Root = getEntryNode();
981 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
982 return VT.bitsGT(Op.getValueType()) ?
983 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
984 getNode(ISD::TRUNCATE, DL, VT, Op);
987 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
988 return VT.bitsGT(Op.getValueType()) ?
989 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
990 getNode(ISD::TRUNCATE, DL, VT, Op);
993 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
994 return VT.bitsGT(Op.getValueType()) ?
995 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
996 getNode(ISD::TRUNCATE, DL, VT, Op);
999 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT,
1001 if (VT.bitsLE(Op.getValueType()))
1002 return getNode(ISD::TRUNCATE, SL, VT, Op);
1004 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1005 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1008 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
1009 assert(!VT.isVector() &&
1010 "getZeroExtendInReg should use the vector element type instead of "
1011 "the vector type!");
1012 if (Op.getValueType() == VT) return Op;
1013 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1014 APInt Imm = APInt::getLowBitsSet(BitWidth,
1015 VT.getSizeInBits());
1016 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1017 getConstant(Imm, Op.getValueType()));
1020 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1021 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1022 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1023 "The sizes of the input and result must match in order to perform the "
1024 "extend in-register.");
1025 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1026 "The destination vector type must have fewer lanes than the input.");
1027 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1030 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1031 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1032 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1033 "The sizes of the input and result must match in order to perform the "
1034 "extend in-register.");
1035 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1036 "The destination vector type must have fewer lanes than the input.");
1037 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1040 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
1041 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1042 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
1043 "The sizes of the input and result must match in order to perform the "
1044 "extend in-register.");
1045 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1046 "The destination vector type must have fewer lanes than the input.");
1047 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1050 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1052 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
1053 EVT EltVT = VT.getScalarType();
1055 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
1056 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1059 SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) {
1060 EVT EltVT = VT.getScalarType();
1062 switch (TLI->getBooleanContents(VT)) {
1063 case TargetLowering::ZeroOrOneBooleanContent:
1064 case TargetLowering::UndefinedBooleanContent:
1065 TrueValue = getConstant(1, VT);
1067 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1068 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()),
1072 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1075 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
1076 EVT EltVT = VT.getScalarType();
1077 assert((EltVT.getSizeInBits() >= 64 ||
1078 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1079 "getConstant with a uint64_t value that doesn't fit in the type!");
1080 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
1083 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
1085 return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
1088 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
1090 assert(VT.isInteger() && "Cannot create FP integer constant!");
1092 EVT EltVT = VT.getScalarType();
1093 const ConstantInt *Elt = &Val;
1095 // In some cases the vector type is legal but the element type is illegal and
1096 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1097 // inserted value (the type does not need to match the vector element type).
1098 // Any extra bits introduced will be truncated away.
1099 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1100 TargetLowering::TypePromoteInteger) {
1101 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1102 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1103 Elt = ConstantInt::get(*getContext(), NewVal);
1105 // In other cases the element type is illegal and needs to be expanded, for
1106 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1107 // the value into n parts and use a vector type with n-times the elements.
1108 // Then bitcast to the type requested.
1109 // Legalizing constants too early makes the DAGCombiner's job harder so we
1110 // only legalize if the DAG tells us we must produce legal types.
1111 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1112 TLI->getTypeAction(*getContext(), EltVT) ==
1113 TargetLowering::TypeExpandInteger) {
1114 APInt NewVal = Elt->getValue();
1115 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1116 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1117 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1118 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1120 // Check the temporary vector is the correct size. If this fails then
1121 // getTypeToTransformTo() probably returned a type whose size (in bits)
1122 // isn't a power-of-2 factor of the requested type size.
1123 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1125 SmallVector<SDValue, 2> EltParts;
1126 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1127 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1128 .trunc(ViaEltSizeInBits),
1129 ViaEltVT, isT, isO));
1132 // EltParts is currently in little endian order. If we actually want
1133 // big-endian order then reverse it now.
1134 if (TLI->isBigEndian())
1135 std::reverse(EltParts.begin(), EltParts.end());
1137 // The elements must be reversed when the element order is different
1138 // to the endianness of the elements (because the BITCAST is itself a
1139 // vector shuffle in this situation). However, we do not need any code to
1140 // perform this reversal because getConstant() is producing a vector
1142 // This situation occurs in MIPS MSA.
1144 SmallVector<SDValue, 8> Ops;
1145 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1146 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1148 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1149 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1154 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1155 "APInt size does not match type size!");
1156 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1157 FoldingSetNodeID ID;
1158 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1162 SDNode *N = nullptr;
1163 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1165 return SDValue(N, 0);
1168 N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
1169 CSEMap.InsertNode(N, IP);
1173 SDValue Result(N, 0);
1174 if (VT.isVector()) {
1175 SmallVector<SDValue, 8> Ops;
1176 Ops.assign(VT.getVectorNumElements(), Result);
1177 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1182 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1183 return getConstant(Val, TLI->getPointerTy(), isTarget);
1187 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1188 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1191 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1192 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1194 EVT EltVT = VT.getScalarType();
1196 // Do the map lookup using the actual bit pattern for the floating point
1197 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1198 // we don't have issues with SNANs.
1199 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1200 FoldingSetNodeID ID;
1201 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1204 SDNode *N = nullptr;
1205 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1207 return SDValue(N, 0);
1210 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1211 CSEMap.InsertNode(N, IP);
1215 SDValue Result(N, 0);
1216 if (VT.isVector()) {
1217 SmallVector<SDValue, 8> Ops;
1218 Ops.assign(VT.getVectorNumElements(), Result);
1219 // FIXME SDLoc info might be appropriate here
1220 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1225 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1226 EVT EltVT = VT.getScalarType();
1227 if (EltVT==MVT::f32)
1228 return getConstantFP(APFloat((float)Val), VT, isTarget);
1229 else if (EltVT==MVT::f64)
1230 return getConstantFP(APFloat(Val), VT, isTarget);
1231 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1234 APFloat apf = APFloat(Val);
1235 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1237 return getConstantFP(apf, VT, isTarget);
1239 llvm_unreachable("Unsupported type in getConstantFP");
1242 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1243 EVT VT, int64_t Offset,
1245 unsigned char TargetFlags) {
1246 assert((TargetFlags == 0 || isTargetGA) &&
1247 "Cannot set target flags on target-independent globals");
1249 // Truncate (with sign-extension) the offset value to the pointer size.
1250 unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
1252 Offset = SignExtend64(Offset, BitWidth);
1255 if (GV->isThreadLocal())
1256 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1258 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1260 FoldingSetNodeID ID;
1261 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1263 ID.AddInteger(Offset);
1264 ID.AddInteger(TargetFlags);
1265 ID.AddInteger(GV->getType()->getAddressSpace());
1267 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1268 return SDValue(E, 0);
1270 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1271 DL.getDebugLoc(), GV, VT,
1272 Offset, TargetFlags);
1273 CSEMap.InsertNode(N, IP);
1275 return SDValue(N, 0);
1278 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1279 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1280 FoldingSetNodeID ID;
1281 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1284 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1285 return SDValue(E, 0);
1287 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1288 CSEMap.InsertNode(N, IP);
1290 return SDValue(N, 0);
1293 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1294 unsigned char TargetFlags) {
1295 assert((TargetFlags == 0 || isTarget) &&
1296 "Cannot set target flags on target-independent jump tables");
1297 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1298 FoldingSetNodeID ID;
1299 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1301 ID.AddInteger(TargetFlags);
1303 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1304 return SDValue(E, 0);
1306 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1308 CSEMap.InsertNode(N, IP);
1310 return SDValue(N, 0);
1313 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1314 unsigned Alignment, int Offset,
1316 unsigned char TargetFlags) {
1317 assert((TargetFlags == 0 || isTarget) &&
1318 "Cannot set target flags on target-independent globals");
1320 Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
1321 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1322 FoldingSetNodeID ID;
1323 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1324 ID.AddInteger(Alignment);
1325 ID.AddInteger(Offset);
1327 ID.AddInteger(TargetFlags);
1329 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1330 return SDValue(E, 0);
1332 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1333 Alignment, TargetFlags);
1334 CSEMap.InsertNode(N, IP);
1336 return SDValue(N, 0);
1340 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1341 unsigned Alignment, int Offset,
1343 unsigned char TargetFlags) {
1344 assert((TargetFlags == 0 || isTarget) &&
1345 "Cannot set target flags on target-independent globals");
1347 Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
1348 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1349 FoldingSetNodeID ID;
1350 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1351 ID.AddInteger(Alignment);
1352 ID.AddInteger(Offset);
1353 C->addSelectionDAGCSEId(ID);
1354 ID.AddInteger(TargetFlags);
1356 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1357 return SDValue(E, 0);
1359 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1360 Alignment, TargetFlags);
1361 CSEMap.InsertNode(N, IP);
1363 return SDValue(N, 0);
1366 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1367 unsigned char TargetFlags) {
1368 FoldingSetNodeID ID;
1369 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1370 ID.AddInteger(Index);
1371 ID.AddInteger(Offset);
1372 ID.AddInteger(TargetFlags);
1374 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1375 return SDValue(E, 0);
1377 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1379 CSEMap.InsertNode(N, IP);
1381 return SDValue(N, 0);
1384 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1385 FoldingSetNodeID ID;
1386 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1389 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1390 return SDValue(E, 0);
1392 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1393 CSEMap.InsertNode(N, IP);
1395 return SDValue(N, 0);
1398 SDValue SelectionDAG::getValueType(EVT VT) {
1399 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1400 ValueTypeNodes.size())
1401 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1403 SDNode *&N = VT.isExtended() ?
1404 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1406 if (N) return SDValue(N, 0);
1407 N = new (NodeAllocator) VTSDNode(VT);
1409 return SDValue(N, 0);
1412 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1413 SDNode *&N = ExternalSymbols[Sym];
1414 if (N) return SDValue(N, 0);
1415 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1417 return SDValue(N, 0);
1420 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1421 unsigned char TargetFlags) {
1423 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1425 if (N) return SDValue(N, 0);
1426 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1428 return SDValue(N, 0);
1431 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1432 if ((unsigned)Cond >= CondCodeNodes.size())
1433 CondCodeNodes.resize(Cond+1);
1435 if (!CondCodeNodes[Cond]) {
1436 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1437 CondCodeNodes[Cond] = N;
1441 return SDValue(CondCodeNodes[Cond], 0);
1444 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1445 // the shuffle mask M that point at N1 to point at N2, and indices that point
1446 // N2 to point at N1.
1447 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1449 int NElts = M.size();
1450 for (int i = 0; i != NElts; ++i) {
1458 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1459 SDValue N2, const int *Mask) {
1460 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1461 "Invalid VECTOR_SHUFFLE");
1463 // Canonicalize shuffle undef, undef -> undef
1464 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1465 return getUNDEF(VT);
1467 // Validate that all indices in Mask are within the range of the elements
1468 // input to the shuffle.
1469 unsigned NElts = VT.getVectorNumElements();
1470 SmallVector<int, 8> MaskVec;
1471 for (unsigned i = 0; i != NElts; ++i) {
1472 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1473 MaskVec.push_back(Mask[i]);
1476 // Canonicalize shuffle v, v -> v, undef
1479 for (unsigned i = 0; i != NElts; ++i)
1480 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1483 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1484 if (N1.getOpcode() == ISD::UNDEF)
1485 commuteShuffle(N1, N2, MaskVec);
1487 // Canonicalize all index into lhs, -> shuffle lhs, undef
1488 // Canonicalize all index into rhs, -> shuffle rhs, undef
1489 bool AllLHS = true, AllRHS = true;
1490 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1491 for (unsigned i = 0; i != NElts; ++i) {
1492 if (MaskVec[i] >= (int)NElts) {
1497 } else if (MaskVec[i] >= 0) {
1501 if (AllLHS && AllRHS)
1502 return getUNDEF(VT);
1503 if (AllLHS && !N2Undef)
1507 commuteShuffle(N1, N2, MaskVec);
1509 // Reset our undef status after accounting for the mask.
1510 N2Undef = N2.getOpcode() == ISD::UNDEF;
1511 // Re-check whether both sides ended up undef.
1512 if (N1.getOpcode() == ISD::UNDEF && N2Undef)
1513 return getUNDEF(VT);
1515 // If Identity shuffle return that node.
1516 bool Identity = true;
1517 for (unsigned i = 0; i != NElts; ++i) {
1518 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1520 if (Identity && NElts)
1523 // Shuffling a constant splat doesn't change the result.
1527 // Look through any bitcasts. We check that these don't change the number
1528 // (and size) of elements and just changes their types.
1529 while (V.getOpcode() == ISD::BITCAST)
1530 V = V->getOperand(0);
1532 // A splat should always show up as a build vector node.
1533 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1534 BitVector UndefElements;
1535 SDValue Splat = BV->getSplatValue(&UndefElements);
1536 // If this is a splat of an undef, shuffling it is also undef.
1537 if (Splat && Splat.getOpcode() == ISD::UNDEF)
1538 return getUNDEF(VT);
1540 // We only have a splat which can skip shuffles if there is a splatted
1541 // value and no undef lanes rearranged by the shuffle.
1542 if (Splat && UndefElements.none()) {
1543 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1544 // number of elements match or the value splatted is a zero constant.
1545 if (V.getValueType().getVectorNumElements() ==
1546 VT.getVectorNumElements())
1548 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1549 if (C->isNullValue())
1555 FoldingSetNodeID ID;
1556 SDValue Ops[2] = { N1, N2 };
1557 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1558 for (unsigned i = 0; i != NElts; ++i)
1559 ID.AddInteger(MaskVec[i]);
1562 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1563 return SDValue(E, 0);
1565 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1566 // SDNode doesn't have access to it. This memory will be "leaked" when
1567 // the node is deallocated, but recovered when the NodeAllocator is released.
1568 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1569 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1571 ShuffleVectorSDNode *N =
1572 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1573 dl.getDebugLoc(), N1, N2,
1575 CSEMap.InsertNode(N, IP);
1577 return SDValue(N, 0);
1580 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1581 MVT VT = SV.getSimpleValueType(0);
1582 unsigned NumElems = VT.getVectorNumElements();
1583 SmallVector<int, 8> MaskVec;
1585 for (unsigned i = 0; i != NumElems; ++i) {
1586 int Idx = SV.getMaskElt(i);
1588 if (Idx < (int)NumElems)
1593 MaskVec.push_back(Idx);
1596 SDValue Op0 = SV.getOperand(0);
1597 SDValue Op1 = SV.getOperand(1);
1598 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]);
1601 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1602 SDValue Val, SDValue DTy,
1603 SDValue STy, SDValue Rnd, SDValue Sat,
1604 ISD::CvtCode Code) {
1605 // If the src and dest types are the same and the conversion is between
1606 // integer types of the same sign or two floats, no conversion is necessary.
1608 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1611 FoldingSetNodeID ID;
1612 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1613 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
1615 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1616 return SDValue(E, 0);
1618 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1621 CSEMap.InsertNode(N, IP);
1623 return SDValue(N, 0);
1626 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1627 FoldingSetNodeID ID;
1628 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1629 ID.AddInteger(RegNo);
1631 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1632 return SDValue(E, 0);
1634 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1635 CSEMap.InsertNode(N, IP);
1637 return SDValue(N, 0);
1640 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1641 FoldingSetNodeID ID;
1642 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1643 ID.AddPointer(RegMask);
1645 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1646 return SDValue(E, 0);
1648 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1649 CSEMap.InsertNode(N, IP);
1651 return SDValue(N, 0);
1654 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1655 FoldingSetNodeID ID;
1656 SDValue Ops[] = { Root };
1657 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
1658 ID.AddPointer(Label);
1660 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1661 return SDValue(E, 0);
1663 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1664 dl.getDebugLoc(), Root, Label);
1665 CSEMap.InsertNode(N, IP);
1667 return SDValue(N, 0);
1671 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1674 unsigned char TargetFlags) {
1675 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1677 FoldingSetNodeID ID;
1678 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1680 ID.AddInteger(Offset);
1681 ID.AddInteger(TargetFlags);
1683 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1684 return SDValue(E, 0);
1686 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1688 CSEMap.InsertNode(N, IP);
1690 return SDValue(N, 0);
1693 SDValue SelectionDAG::getSrcValue(const Value *V) {
1694 assert((!V || V->getType()->isPointerTy()) &&
1695 "SrcValue is not a pointer?");
1697 FoldingSetNodeID ID;
1698 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1702 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1703 return SDValue(E, 0);
1705 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1706 CSEMap.InsertNode(N, IP);
1708 return SDValue(N, 0);
1711 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1712 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1713 FoldingSetNodeID ID;
1714 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1718 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1719 return SDValue(E, 0);
1721 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1722 CSEMap.InsertNode(N, IP);
1724 return SDValue(N, 0);
1727 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
1728 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1729 unsigned SrcAS, unsigned DestAS) {
1730 SDValue Ops[] = {Ptr};
1731 FoldingSetNodeID ID;
1732 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1733 ID.AddInteger(SrcAS);
1734 ID.AddInteger(DestAS);
1737 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1738 return SDValue(E, 0);
1740 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1742 VT, Ptr, SrcAS, DestAS);
1743 CSEMap.InsertNode(N, IP);
1745 return SDValue(N, 0);
1748 /// getShiftAmountOperand - Return the specified value casted to
1749 /// the target's desired shift amount type.
1750 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1751 EVT OpTy = Op.getValueType();
1752 EVT ShTy = TLI->getShiftAmountTy(LHSTy);
1753 if (OpTy == ShTy || OpTy.isVector()) return Op;
1755 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1756 return getNode(Opcode, SDLoc(Op), ShTy, Op);
1759 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1760 /// specified value type.
1761 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1762 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1763 unsigned ByteSize = VT.getStoreSize();
1764 Type *Ty = VT.getTypeForEVT(*getContext());
1765 unsigned StackAlign =
1766 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1768 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1769 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1772 /// CreateStackTemporary - Create a stack temporary suitable for holding
1773 /// either of the specified value types.
1774 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1775 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1776 VT2.getStoreSizeInBits())/8;
1777 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1778 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1779 const DataLayout *TD = TLI->getDataLayout();
1780 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1781 TD->getPrefTypeAlignment(Ty2));
1783 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1784 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1785 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1788 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1789 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1790 // These setcc operations always fold.
1794 case ISD::SETFALSE2: return getConstant(0, VT);
1796 case ISD::SETTRUE2: {
1797 TargetLowering::BooleanContent Cnt =
1798 TLI->getBooleanContents(N1->getValueType(0));
1800 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1813 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1817 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1818 const APInt &C2 = N2C->getAPIntValue();
1819 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1820 const APInt &C1 = N1C->getAPIntValue();
1823 default: llvm_unreachable("Unknown integer setcc!");
1824 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1825 case ISD::SETNE: return getConstant(C1 != C2, VT);
1826 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1827 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1828 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1829 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1830 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1831 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1832 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1833 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1837 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1838 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1839 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1842 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1843 return getUNDEF(VT);
1845 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1846 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1847 return getUNDEF(VT);
1849 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1850 R==APFloat::cmpLessThan, VT);
1851 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1852 return getUNDEF(VT);
1854 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1855 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1856 return getUNDEF(VT);
1858 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1859 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1860 return getUNDEF(VT);
1862 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1863 R==APFloat::cmpEqual, VT);
1864 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1865 return getUNDEF(VT);
1867 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1868 R==APFloat::cmpEqual, VT);
1869 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1870 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1871 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1872 R==APFloat::cmpEqual, VT);
1873 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1874 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1875 R==APFloat::cmpLessThan, VT);
1876 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1877 R==APFloat::cmpUnordered, VT);
1878 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1879 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1882 // Ensure that the constant occurs on the RHS.
1883 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1884 MVT CompVT = N1.getValueType().getSimpleVT();
1885 if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
1888 return getSetCC(dl, VT, N2, N1, SwappedCond);
1892 // Could not fold it.
1896 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1897 /// use this predicate to simplify operations downstream.
1898 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1899 // This predicate is not safe for vector operations.
1900 if (Op.getValueType().isVector())
1903 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1904 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1907 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1908 /// this predicate to simplify operations downstream. Mask is known to be zero
1909 /// for bits that V cannot have.
1910 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1911 unsigned Depth) const {
1912 APInt KnownZero, KnownOne;
1913 computeKnownBits(Op, KnownZero, KnownOne, Depth);
1914 return (KnownZero & Mask) == Mask;
1917 /// Determine which bits of Op are known to be either zero or one and return
1918 /// them in the KnownZero/KnownOne bitsets.
1919 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
1920 APInt &KnownOne, unsigned Depth) const {
1921 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1923 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1925 return; // Limit search depth.
1927 APInt KnownZero2, KnownOne2;
1929 switch (Op.getOpcode()) {
1931 // We know all of the bits for a constant!
1932 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1933 KnownZero = ~KnownOne;
1936 // If either the LHS or the RHS are Zero, the result is zero.
1937 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1938 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1940 // Output known-1 bits are only known if set in both the LHS & RHS.
1941 KnownOne &= KnownOne2;
1942 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1943 KnownZero |= KnownZero2;
1946 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1947 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1949 // Output known-0 bits are only known if clear in both the LHS & RHS.
1950 KnownZero &= KnownZero2;
1951 // Output known-1 are known to be set if set in either the LHS | RHS.
1952 KnownOne |= KnownOne2;
1955 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1956 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1958 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1959 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1960 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1961 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1962 KnownZero = KnownZeroOut;
1966 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1967 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1969 // If low bits are zero in either operand, output low known-0 bits.
1970 // Also compute a conserative estimate for high known-0 bits.
1971 // More trickiness is possible, but this is sufficient for the
1972 // interesting case of alignment computation.
1973 KnownOne.clearAllBits();
1974 unsigned TrailZ = KnownZero.countTrailingOnes() +
1975 KnownZero2.countTrailingOnes();
1976 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1977 KnownZero2.countLeadingOnes(),
1978 BitWidth) - BitWidth;
1980 TrailZ = std::min(TrailZ, BitWidth);
1981 LeadZ = std::min(LeadZ, BitWidth);
1982 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1983 APInt::getHighBitsSet(BitWidth, LeadZ);
1987 // For the purposes of computing leading zeros we can conservatively
1988 // treat a udiv as a logical right shift by the power of 2 known to
1989 // be less than the denominator.
1990 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1991 unsigned LeadZ = KnownZero2.countLeadingOnes();
1993 KnownOne2.clearAllBits();
1994 KnownZero2.clearAllBits();
1995 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1996 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1997 if (RHSUnknownLeadingOnes != BitWidth)
1998 LeadZ = std::min(BitWidth,
1999 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
2001 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
2005 computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
2006 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2008 // Only known if known in both the LHS and RHS.
2009 KnownOne &= KnownOne2;
2010 KnownZero &= KnownZero2;
2012 case ISD::SELECT_CC:
2013 computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
2014 computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
2016 // Only known if known in both the LHS and RHS.
2017 KnownOne &= KnownOne2;
2018 KnownZero &= KnownZero2;
2026 if (Op.getResNo() != 1)
2028 // The boolean result conforms to getBooleanContents.
2029 // If we know the result of a setcc has the top bits zero, use this info.
2030 // We know that we have an integer-based boolean since these operations
2031 // are only available for integer.
2032 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2033 TargetLowering::ZeroOrOneBooleanContent &&
2035 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2038 // If we know the result of a setcc has the top bits zero, use this info.
2039 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2040 TargetLowering::ZeroOrOneBooleanContent &&
2042 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2045 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
2046 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2047 unsigned ShAmt = SA->getZExtValue();
2049 // If the shift count is an invalid immediate, don't do anything.
2050 if (ShAmt >= BitWidth)
2053 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2054 KnownZero <<= ShAmt;
2056 // low bits known zero.
2057 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
2061 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
2062 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2063 unsigned ShAmt = SA->getZExtValue();
2065 // If the shift count is an invalid immediate, don't do anything.
2066 if (ShAmt >= BitWidth)
2069 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2070 KnownZero = KnownZero.lshr(ShAmt);
2071 KnownOne = KnownOne.lshr(ShAmt);
2073 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2074 KnownZero |= HighBits; // High bits known zero.
2078 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2079 unsigned ShAmt = SA->getZExtValue();
2081 // If the shift count is an invalid immediate, don't do anything.
2082 if (ShAmt >= BitWidth)
2085 // If any of the demanded bits are produced by the sign extension, we also
2086 // demand the input sign bit.
2087 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
2089 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2090 KnownZero = KnownZero.lshr(ShAmt);
2091 KnownOne = KnownOne.lshr(ShAmt);
2093 // Handle the sign bits.
2094 APInt SignBit = APInt::getSignBit(BitWidth);
2095 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
2097 if (KnownZero.intersects(SignBit)) {
2098 KnownZero |= HighBits; // New bits are known zero.
2099 } else if (KnownOne.intersects(SignBit)) {
2100 KnownOne |= HighBits; // New bits are known one.
2104 case ISD::SIGN_EXTEND_INREG: {
2105 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2106 unsigned EBits = EVT.getScalarType().getSizeInBits();
2108 // Sign extension. Compute the demanded bits in the result that are not
2109 // present in the input.
2110 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2112 APInt InSignBit = APInt::getSignBit(EBits);
2113 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2115 // If the sign extended bits are demanded, we know that the sign
2117 InSignBit = InSignBit.zext(BitWidth);
2118 if (NewBits.getBoolValue())
2119 InputDemandedBits |= InSignBit;
2121 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2122 KnownOne &= InputDemandedBits;
2123 KnownZero &= InputDemandedBits;
2125 // If the sign bit of the input is known set or clear, then we know the
2126 // top bits of the result.
2127 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
2128 KnownZero |= NewBits;
2129 KnownOne &= ~NewBits;
2130 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
2131 KnownOne |= NewBits;
2132 KnownZero &= ~NewBits;
2133 } else { // Input sign bit unknown
2134 KnownZero &= ~NewBits;
2135 KnownOne &= ~NewBits;
2140 case ISD::CTTZ_ZERO_UNDEF:
2142 case ISD::CTLZ_ZERO_UNDEF:
2144 unsigned LowBits = Log2_32(BitWidth)+1;
2145 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2146 KnownOne.clearAllBits();
2150 LoadSDNode *LD = cast<LoadSDNode>(Op);
2151 // If this is a ZEXTLoad and we are looking at the loaded value.
2152 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2153 EVT VT = LD->getMemoryVT();
2154 unsigned MemBits = VT.getScalarType().getSizeInBits();
2155 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2156 } else if (const MDNode *Ranges = LD->getRanges()) {
2157 computeKnownBitsFromRangeMetadata(*Ranges, KnownZero);
2161 case ISD::ZERO_EXTEND: {
2162 EVT InVT = Op.getOperand(0).getValueType();
2163 unsigned InBits = InVT.getScalarType().getSizeInBits();
2164 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2165 KnownZero = KnownZero.trunc(InBits);
2166 KnownOne = KnownOne.trunc(InBits);
2167 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2168 KnownZero = KnownZero.zext(BitWidth);
2169 KnownOne = KnownOne.zext(BitWidth);
2170 KnownZero |= NewBits;
2173 case ISD::SIGN_EXTEND: {
2174 EVT InVT = Op.getOperand(0).getValueType();
2175 unsigned InBits = InVT.getScalarType().getSizeInBits();
2176 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2178 KnownZero = KnownZero.trunc(InBits);
2179 KnownOne = KnownOne.trunc(InBits);
2180 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2182 // Note if the sign bit is known to be zero or one.
2183 bool SignBitKnownZero = KnownZero.isNegative();
2184 bool SignBitKnownOne = KnownOne.isNegative();
2186 KnownZero = KnownZero.zext(BitWidth);
2187 KnownOne = KnownOne.zext(BitWidth);
2189 // If the sign bit is known zero or one, the top bits match.
2190 if (SignBitKnownZero)
2191 KnownZero |= NewBits;
2192 else if (SignBitKnownOne)
2193 KnownOne |= NewBits;
2196 case ISD::ANY_EXTEND: {
2197 EVT InVT = Op.getOperand(0).getValueType();
2198 unsigned InBits = InVT.getScalarType().getSizeInBits();
2199 KnownZero = KnownZero.trunc(InBits);
2200 KnownOne = KnownOne.trunc(InBits);
2201 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2202 KnownZero = KnownZero.zext(BitWidth);
2203 KnownOne = KnownOne.zext(BitWidth);
2206 case ISD::TRUNCATE: {
2207 EVT InVT = Op.getOperand(0).getValueType();
2208 unsigned InBits = InVT.getScalarType().getSizeInBits();
2209 KnownZero = KnownZero.zext(InBits);
2210 KnownOne = KnownOne.zext(InBits);
2211 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2212 KnownZero = KnownZero.trunc(BitWidth);
2213 KnownOne = KnownOne.trunc(BitWidth);
2216 case ISD::AssertZext: {
2217 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2218 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2219 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2220 KnownZero |= (~InMask);
2221 KnownOne &= (~KnownZero);
2225 // All bits are zero except the low bit.
2226 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2230 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2231 // We know that the top bits of C-X are clear if X contains less bits
2232 // than C (i.e. no wrap-around can happen). For example, 20-X is
2233 // positive if we can prove that X is >= 0 and < 16.
2234 if (CLHS->getAPIntValue().isNonNegative()) {
2235 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2236 // NLZ can't be BitWidth with no sign bit
2237 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2238 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2240 // If all of the MaskV bits are known to be zero, then we know the
2241 // output top bits are zero, because we now know that the output is
2243 if ((KnownZero2 & MaskV) == MaskV) {
2244 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2245 // Top bits known zero.
2246 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2254 // Output known-0 bits are known if clear or set in both the low clear bits
2255 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2256 // low 3 bits clear.
2257 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2258 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2260 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2261 KnownZeroOut = std::min(KnownZeroOut,
2262 KnownZero2.countTrailingOnes());
2264 if (Op.getOpcode() == ISD::ADD) {
2265 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2269 // With ADDE, a carry bit may be added in, so we can only use this
2270 // information if we know (at least) that the low two bits are clear. We
2271 // then return to the caller that the low bit is unknown but that other bits
2273 if (KnownZeroOut >= 2) // ADDE
2274 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2278 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2279 const APInt &RA = Rem->getAPIntValue().abs();
2280 if (RA.isPowerOf2()) {
2281 APInt LowBits = RA - 1;
2282 computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2284 // The low bits of the first operand are unchanged by the srem.
2285 KnownZero = KnownZero2 & LowBits;
2286 KnownOne = KnownOne2 & LowBits;
2288 // If the first operand is non-negative or has all low bits zero, then
2289 // the upper bits are all zero.
2290 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2291 KnownZero |= ~LowBits;
2293 // If the first operand is negative and not all low bits are zero, then
2294 // the upper bits are all one.
2295 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2296 KnownOne |= ~LowBits;
2297 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2302 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2303 const APInt &RA = Rem->getAPIntValue();
2304 if (RA.isPowerOf2()) {
2305 APInt LowBits = (RA - 1);
2306 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1);
2308 // The upper bits are all zero, the lower ones are unchanged.
2309 KnownZero = KnownZero2 | ~LowBits;
2310 KnownOne = KnownOne2 & LowBits;
2315 // Since the result is less than or equal to either operand, any leading
2316 // zero bits in either operand must also exist in the result.
2317 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2318 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2320 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2321 KnownZero2.countLeadingOnes());
2322 KnownOne.clearAllBits();
2323 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2326 case ISD::FrameIndex:
2327 case ISD::TargetFrameIndex:
2328 if (unsigned Align = InferPtrAlignment(Op)) {
2329 // The low bits are known zero if the pointer is aligned.
2330 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2336 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2339 case ISD::INTRINSIC_WO_CHAIN:
2340 case ISD::INTRINSIC_W_CHAIN:
2341 case ISD::INTRINSIC_VOID:
2342 // Allow the target to implement this method for its nodes.
2343 TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2347 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2350 /// ComputeNumSignBits - Return the number of times the sign bit of the
2351 /// register is replicated into the other bits. We know that at least 1 bit
2352 /// is always equal to the sign bit (itself), but other cases can give us
2353 /// information. For example, immediately after an "SRA X, 2", we know that
2354 /// the top 3 bits are all equal to each other, so we return 3.
2355 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2356 EVT VT = Op.getValueType();
2357 assert(VT.isInteger() && "Invalid VT!");
2358 unsigned VTBits = VT.getScalarType().getSizeInBits();
2360 unsigned FirstAnswer = 1;
2363 return 1; // Limit search depth.
2365 switch (Op.getOpcode()) {
2367 case ISD::AssertSext:
2368 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2369 return VTBits-Tmp+1;
2370 case ISD::AssertZext:
2371 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2374 case ISD::Constant: {
2375 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2376 return Val.getNumSignBits();
2379 case ISD::SIGN_EXTEND:
2381 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2382 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2384 case ISD::SIGN_EXTEND_INREG:
2385 // Max of the input and what this extends.
2387 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2390 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2391 return std::max(Tmp, Tmp2);
2394 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2395 // SRA X, C -> adds C sign bits.
2396 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2397 Tmp += C->getZExtValue();
2398 if (Tmp > VTBits) Tmp = VTBits;
2402 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2403 // shl destroys sign bits.
2404 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2405 if (C->getZExtValue() >= VTBits || // Bad shift.
2406 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2407 return Tmp - C->getZExtValue();
2412 case ISD::XOR: // NOT is handled here.
2413 // Logical binary ops preserve the number of sign bits at the worst.
2414 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2416 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2417 FirstAnswer = std::min(Tmp, Tmp2);
2418 // We computed what we know about the sign bits as our first
2419 // answer. Now proceed to the generic code that uses
2420 // computeKnownBits, and pick whichever answer is better.
2425 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2426 if (Tmp == 1) return 1; // Early out.
2427 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2428 return std::min(Tmp, Tmp2);
2436 if (Op.getResNo() != 1)
2438 // The boolean result conforms to getBooleanContents. Fall through.
2439 // If setcc returns 0/-1, all bits are sign bits.
2440 // We know that we have an integer-based boolean since these operations
2441 // are only available for integer.
2442 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2443 TargetLowering::ZeroOrNegativeOneBooleanContent)
2447 // If setcc returns 0/-1, all bits are sign bits.
2448 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2449 TargetLowering::ZeroOrNegativeOneBooleanContent)
2454 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2455 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2457 // Handle rotate right by N like a rotate left by 32-N.
2458 if (Op.getOpcode() == ISD::ROTR)
2459 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2461 // If we aren't rotating out all of the known-in sign bits, return the
2462 // number that are left. This handles rotl(sext(x), 1) for example.
2463 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2464 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2468 // Add can have at most one carry bit. Thus we know that the output
2469 // is, at worst, one more bit than the inputs.
2470 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2471 if (Tmp == 1) return 1; // Early out.
2473 // Special case decrementing a value (ADD X, -1):
2474 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2475 if (CRHS->isAllOnesValue()) {
2476 APInt KnownZero, KnownOne;
2477 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2479 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2481 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2484 // If we are subtracting one from a positive number, there is no carry
2485 // out of the result.
2486 if (KnownZero.isNegative())
2490 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2491 if (Tmp2 == 1) return 1;
2492 return std::min(Tmp, Tmp2)-1;
2495 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2496 if (Tmp2 == 1) return 1;
2499 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2500 if (CLHS->isNullValue()) {
2501 APInt KnownZero, KnownOne;
2502 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2503 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2505 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2508 // If the input is known to be positive (the sign bit is known clear),
2509 // the output of the NEG has the same number of sign bits as the input.
2510 if (KnownZero.isNegative())
2513 // Otherwise, we treat this like a SUB.
2516 // Sub can have at most one carry bit. Thus we know that the output
2517 // is, at worst, one more bit than the inputs.
2518 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2519 if (Tmp == 1) return 1; // Early out.
2520 return std::min(Tmp, Tmp2)-1;
2522 // FIXME: it's tricky to do anything useful for this, but it is an important
2523 // case for targets like X86.
2527 // If we are looking at the loaded value of the SDNode.
2528 if (Op.getResNo() == 0) {
2529 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2530 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2531 unsigned ExtType = LD->getExtensionType();
2534 case ISD::SEXTLOAD: // '17' bits known
2535 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2536 return VTBits-Tmp+1;
2537 case ISD::ZEXTLOAD: // '16' bits known
2538 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2544 // Allow the target to implement this method for its nodes.
2545 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2546 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2547 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2548 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2549 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
2550 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2553 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2554 // use this information.
2555 APInt KnownZero, KnownOne;
2556 computeKnownBits(Op, KnownZero, KnownOne, Depth);
2559 if (KnownZero.isNegative()) { // sign bit is 0
2561 } else if (KnownOne.isNegative()) { // sign bit is 1;
2568 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2569 // the number of identical bits in the top of the input value.
2571 Mask <<= Mask.getBitWidth()-VTBits;
2572 // Return # leading zeros. We use 'min' here in case Val was zero before
2573 // shifting. We don't want to return '64' as for an i32 "0".
2574 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2577 /// isBaseWithConstantOffset - Return true if the specified operand is an
2578 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2579 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2580 /// semantics as an ADD. This handles the equivalence:
2581 /// X|Cst == X+Cst iff X&Cst = 0.
2582 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2583 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2584 !isa<ConstantSDNode>(Op.getOperand(1)))
2587 if (Op.getOpcode() == ISD::OR &&
2588 !MaskedValueIsZero(Op.getOperand(0),
2589 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2596 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2597 // If we're told that NaNs won't happen, assume they won't.
2598 if (getTarget().Options.NoNaNsFPMath)
2601 // If the value is a constant, we can obviously see if it is a NaN or not.
2602 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2603 return !C->getValueAPF().isNaN();
2605 // TODO: Recognize more cases here.
2610 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2611 // If the value is a constant, we can obviously see if it is a zero or not.
2612 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2613 return !C->isZero();
2615 // TODO: Recognize more cases here.
2616 switch (Op.getOpcode()) {
2619 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2620 return !C->isNullValue();
2627 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2628 // Check the obvious case.
2629 if (A == B) return true;
2631 // For for negative and positive zero.
2632 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2633 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2634 if (CA->isZero() && CB->isZero()) return true;
2636 // Otherwise they may not be equal.
2640 /// getNode - Gets or creates the specified node.
2642 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2643 FoldingSetNodeID ID;
2644 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
2646 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2647 return SDValue(E, 0);
2649 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2650 DL.getDebugLoc(), getVTList(VT));
2651 CSEMap.InsertNode(N, IP);
2654 return SDValue(N, 0);
2657 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2658 EVT VT, SDValue Operand) {
2659 // Constant fold unary operations with an integer constant operand. Even
2660 // opaque constant will be folded, because the folding of unary operations
2661 // doesn't create new constants with different values. Nevertheless, the
2662 // opaque flag is preserved during folding to prevent future folding with
2664 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2665 const APInt &Val = C->getAPIntValue();
2668 case ISD::SIGN_EXTEND:
2669 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT,
2670 C->isTargetOpcode(), C->isOpaque());
2671 case ISD::ANY_EXTEND:
2672 case ISD::ZERO_EXTEND:
2674 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT,
2675 C->isTargetOpcode(), C->isOpaque());
2676 case ISD::UINT_TO_FP:
2677 case ISD::SINT_TO_FP: {
2678 APFloat apf(EVTToAPFloatSemantics(VT),
2679 APInt::getNullValue(VT.getSizeInBits()));
2680 (void)apf.convertFromAPInt(Val,
2681 Opcode==ISD::SINT_TO_FP,
2682 APFloat::rmNearestTiesToEven);
2683 return getConstantFP(apf, VT);
2686 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
2687 return getConstantFP(APFloat(APFloat::IEEEhalf, Val), VT);
2688 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2689 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2690 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2691 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2694 return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(),
2697 return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(),
2700 case ISD::CTLZ_ZERO_UNDEF:
2701 return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(),
2704 case ISD::CTTZ_ZERO_UNDEF:
2705 return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(),
2710 // Constant fold unary operations with a floating point constant operand.
2711 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2712 APFloat V = C->getValueAPF(); // make copy
2716 return getConstantFP(V, VT);
2719 return getConstantFP(V, VT);
2721 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2722 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2723 return getConstantFP(V, VT);
2727 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2728 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2729 return getConstantFP(V, VT);
2733 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2734 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2735 return getConstantFP(V, VT);
2738 case ISD::FP_EXTEND: {
2740 // This can return overflow, underflow, or inexact; we don't care.
2741 // FIXME need to be more flexible about rounding mode.
2742 (void)V.convert(EVTToAPFloatSemantics(VT),
2743 APFloat::rmNearestTiesToEven, &ignored);
2744 return getConstantFP(V, VT);
2746 case ISD::FP_TO_SINT:
2747 case ISD::FP_TO_UINT: {
2750 static_assert(integerPartWidth >= 64, "APFloat parts too small!");
2751 // FIXME need to be more flexible about rounding mode.
2752 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2753 Opcode==ISD::FP_TO_SINT,
2754 APFloat::rmTowardZero, &ignored);
2755 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2757 APInt api(VT.getSizeInBits(), x);
2758 return getConstant(api, VT);
2761 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
2762 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), VT);
2763 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2764 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2765 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2766 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2771 // Constant fold unary operations with a vector integer operand.
2772 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand.getNode())) {
2773 if (BV->isConstant()) {
2776 // FIXME: Entirely reasonable to perform folding of other unary
2777 // operations here as the need arises.
2779 case ISD::UINT_TO_FP:
2780 case ISD::SINT_TO_FP: {
2781 SmallVector<SDValue, 8> Ops;
2782 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
2783 SDValue OpN = BV->getOperand(i);
2784 // Let the above scalar folding handle the conversion of each
2786 OpN = getNode(ISD::SINT_TO_FP, DL, VT.getVectorElementType(),
2790 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
2796 unsigned OpOpcode = Operand.getNode()->getOpcode();
2798 case ISD::TokenFactor:
2799 case ISD::MERGE_VALUES:
2800 case ISD::CONCAT_VECTORS:
2801 return Operand; // Factor, merge or concat of one node? No need.
2802 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2803 case ISD::FP_EXTEND:
2804 assert(VT.isFloatingPoint() &&
2805 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2806 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2807 assert((!VT.isVector() ||
2808 VT.getVectorNumElements() ==
2809 Operand.getValueType().getVectorNumElements()) &&
2810 "Vector element count mismatch!");
2811 if (Operand.getOpcode() == ISD::UNDEF)
2812 return getUNDEF(VT);
2814 case ISD::SIGN_EXTEND:
2815 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2816 "Invalid SIGN_EXTEND!");
2817 if (Operand.getValueType() == VT) return Operand; // noop extension
2818 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2819 "Invalid sext node, dst < src!");
2820 assert((!VT.isVector() ||
2821 VT.getVectorNumElements() ==
2822 Operand.getValueType().getVectorNumElements()) &&
2823 "Vector element count mismatch!");
2824 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2825 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2826 else if (OpOpcode == ISD::UNDEF)
2827 // sext(undef) = 0, because the top bits will all be the same.
2828 return getConstant(0, VT);
2830 case ISD::ZERO_EXTEND:
2831 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2832 "Invalid ZERO_EXTEND!");
2833 if (Operand.getValueType() == VT) return Operand; // noop extension
2834 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2835 "Invalid zext node, dst < src!");
2836 assert((!VT.isVector() ||
2837 VT.getVectorNumElements() ==
2838 Operand.getValueType().getVectorNumElements()) &&
2839 "Vector element count mismatch!");
2840 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2841 return getNode(ISD::ZERO_EXTEND, DL, VT,
2842 Operand.getNode()->getOperand(0));
2843 else if (OpOpcode == ISD::UNDEF)
2844 // zext(undef) = 0, because the top bits will be zero.
2845 return getConstant(0, VT);
2847 case ISD::ANY_EXTEND:
2848 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2849 "Invalid ANY_EXTEND!");
2850 if (Operand.getValueType() == VT) return Operand; // noop extension
2851 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2852 "Invalid anyext node, dst < src!");
2853 assert((!VT.isVector() ||
2854 VT.getVectorNumElements() ==
2855 Operand.getValueType().getVectorNumElements()) &&
2856 "Vector element count mismatch!");
2858 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2859 OpOpcode == ISD::ANY_EXTEND)
2860 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2861 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2862 else if (OpOpcode == ISD::UNDEF)
2863 return getUNDEF(VT);
2865 // (ext (trunx x)) -> x
2866 if (OpOpcode == ISD::TRUNCATE) {
2867 SDValue OpOp = Operand.getNode()->getOperand(0);
2868 if (OpOp.getValueType() == VT)
2873 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2874 "Invalid TRUNCATE!");
2875 if (Operand.getValueType() == VT) return Operand; // noop truncate
2876 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2877 "Invalid truncate node, src < dst!");
2878 assert((!VT.isVector() ||
2879 VT.getVectorNumElements() ==
2880 Operand.getValueType().getVectorNumElements()) &&
2881 "Vector element count mismatch!");
2882 if (OpOpcode == ISD::TRUNCATE)
2883 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2884 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2885 OpOpcode == ISD::ANY_EXTEND) {
2886 // If the source is smaller than the dest, we still need an extend.
2887 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2888 .bitsLT(VT.getScalarType()))
2889 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2890 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2891 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2892 return Operand.getNode()->getOperand(0);
2894 if (OpOpcode == ISD::UNDEF)
2895 return getUNDEF(VT);
2898 // Basic sanity checking.
2899 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2900 && "Cannot BITCAST between types of different sizes!");
2901 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2902 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2903 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2904 if (OpOpcode == ISD::UNDEF)
2905 return getUNDEF(VT);
2907 case ISD::SCALAR_TO_VECTOR:
2908 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2909 (VT.getVectorElementType() == Operand.getValueType() ||
2910 (VT.getVectorElementType().isInteger() &&
2911 Operand.getValueType().isInteger() &&
2912 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2913 "Illegal SCALAR_TO_VECTOR node!");
2914 if (OpOpcode == ISD::UNDEF)
2915 return getUNDEF(VT);
2916 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2917 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2918 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2919 Operand.getConstantOperandVal(1) == 0 &&
2920 Operand.getOperand(0).getValueType() == VT)
2921 return Operand.getOperand(0);
2924 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2925 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2926 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2927 Operand.getNode()->getOperand(0));
2928 if (OpOpcode == ISD::FNEG) // --X -> X
2929 return Operand.getNode()->getOperand(0);
2932 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2933 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2938 SDVTList VTs = getVTList(VT);
2939 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2940 FoldingSetNodeID ID;
2941 SDValue Ops[1] = { Operand };
2942 AddNodeIDNode(ID, Opcode, VTs, Ops);
2944 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2945 return SDValue(E, 0);
2947 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2948 DL.getDebugLoc(), VTs, Operand);
2949 CSEMap.InsertNode(N, IP);
2951 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2952 DL.getDebugLoc(), VTs, Operand);
2956 return SDValue(N, 0);
2959 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2960 SDNode *Cst1, SDNode *Cst2) {
2961 // If the opcode is a target-specific ISD node, there's nothing we can
2962 // do here and the operand rules may not line up with the below, so
2964 if (Opcode >= ISD::BUILTIN_OP_END)
2967 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2968 SmallVector<SDValue, 4> Outputs;
2969 EVT SVT = VT.getScalarType();
2971 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2972 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2973 if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
2976 if (Scalar1 && Scalar2)
2977 // Scalar instruction.
2978 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2980 // For vectors extract each constant element into Inputs so we can constant
2981 // fold them individually.
2982 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2983 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2987 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2989 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2990 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2991 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2992 if (!V1 || !V2) // Not a constant, bail.
2995 if (V1->isOpaque() || V2->isOpaque())
2998 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2999 // FIXME: This is valid and could be handled by truncating the APInts.
3000 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
3003 Inputs.push_back(std::make_pair(V1, V2));
3007 // We have a number of constant values, constant fold them element by element.
3008 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
3009 const APInt &C1 = Inputs[I].first->getAPIntValue();
3010 const APInt &C2 = Inputs[I].second->getAPIntValue();
3014 Outputs.push_back(getConstant(C1 + C2, SVT));
3017 Outputs.push_back(getConstant(C1 - C2, SVT));
3020 Outputs.push_back(getConstant(C1 * C2, SVT));
3023 if (!C2.getBoolValue())
3025 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
3028 if (!C2.getBoolValue())
3030 Outputs.push_back(getConstant(C1.urem(C2), SVT));
3033 if (!C2.getBoolValue())
3035 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
3038 if (!C2.getBoolValue())
3040 Outputs.push_back(getConstant(C1.srem(C2), SVT));
3043 Outputs.push_back(getConstant(C1 & C2, SVT));
3046 Outputs.push_back(getConstant(C1 | C2, SVT));
3049 Outputs.push_back(getConstant(C1 ^ C2, SVT));
3052 Outputs.push_back(getConstant(C1 << C2, SVT));
3055 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
3058 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
3061 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
3064 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
3071 assert((Scalar1 && Scalar2) || (VT.getVectorNumElements() == Outputs.size() &&
3072 "Expected a scalar or vector!"));
3074 // Handle the scalar case first.
3076 return Outputs.back();
3078 // We may have a vector type but a scalar result. Create a splat.
3079 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3081 // Build a big vector out of the scalar elements we generated.
3082 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
3085 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
3086 SDValue N2, bool nuw, bool nsw, bool exact) {
3087 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3088 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
3091 case ISD::TokenFactor:
3092 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
3093 N2.getValueType() == MVT::Other && "Invalid token factor!");
3094 // Fold trivial token factors.
3095 if (N1.getOpcode() == ISD::EntryToken) return N2;
3096 if (N2.getOpcode() == ISD::EntryToken) return N1;
3097 if (N1 == N2) return N1;
3099 case ISD::CONCAT_VECTORS:
3100 // Concat of UNDEFs is UNDEF.
3101 if (N1.getOpcode() == ISD::UNDEF &&
3102 N2.getOpcode() == ISD::UNDEF)
3103 return getUNDEF(VT);
3105 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3106 // one big BUILD_VECTOR.
3107 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3108 N2.getOpcode() == ISD::BUILD_VECTOR) {
3109 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3110 N1.getNode()->op_end());
3111 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3112 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3116 assert(VT.isInteger() && "This operator does not apply to FP types!");
3117 assert(N1.getValueType() == N2.getValueType() &&
3118 N1.getValueType() == VT && "Binary operator types must match!");
3119 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
3120 // worth handling here.
3121 if (N2C && N2C->isNullValue())
3123 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
3130 assert(VT.isInteger() && "This operator does not apply to FP types!");
3131 assert(N1.getValueType() == N2.getValueType() &&
3132 N1.getValueType() == VT && "Binary operator types must match!");
3133 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
3134 // it's worth handling here.
3135 if (N2C && N2C->isNullValue())
3145 assert(VT.isInteger() && "This operator does not apply to FP types!");
3146 assert(N1.getValueType() == N2.getValueType() &&
3147 N1.getValueType() == VT && "Binary operator types must match!");
3154 if (getTarget().Options.UnsafeFPMath) {
3155 if (Opcode == ISD::FADD) {
3157 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
3158 if (CFP->getValueAPF().isZero())
3161 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3162 if (CFP->getValueAPF().isZero())
3164 } else if (Opcode == ISD::FSUB) {
3166 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3167 if (CFP->getValueAPF().isZero())
3169 } else if (Opcode == ISD::FMUL) {
3170 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
3173 // If the first operand isn't the constant, try the second
3175 CFP = dyn_cast<ConstantFPSDNode>(N2);
3182 return SDValue(CFP,0);
3184 if (CFP->isExactlyValue(1.0))
3189 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3190 assert(N1.getValueType() == N2.getValueType() &&
3191 N1.getValueType() == VT && "Binary operator types must match!");
3193 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
3194 assert(N1.getValueType() == VT &&
3195 N1.getValueType().isFloatingPoint() &&
3196 N2.getValueType().isFloatingPoint() &&
3197 "Invalid FCOPYSIGN!");
3204 assert(VT == N1.getValueType() &&
3205 "Shift operators return type must be the same as their first arg");
3206 assert(VT.isInteger() && N2.getValueType().isInteger() &&
3207 "Shifts only work on integers");
3208 assert((!VT.isVector() || VT == N2.getValueType()) &&
3209 "Vector shift amounts must be in the same as their first arg");
3210 // Verify that the shift amount VT is bit enough to hold valid shift
3211 // amounts. This catches things like trying to shift an i1024 value by an
3212 // i8, which is easy to fall into in generic code that uses
3213 // TLI.getShiftAmount().
3214 assert(N2.getValueType().getSizeInBits() >=
3215 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3216 "Invalid use of small shift amount with oversized value!");
3218 // Always fold shifts of i1 values so the code generator doesn't need to
3219 // handle them. Since we know the size of the shift has to be less than the
3220 // size of the value, the shift/rotate count is guaranteed to be zero.
3223 if (N2C && N2C->isNullValue())
3226 case ISD::FP_ROUND_INREG: {
3227 EVT EVT = cast<VTSDNode>(N2)->getVT();
3228 assert(VT == N1.getValueType() && "Not an inreg round!");
3229 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3230 "Cannot FP_ROUND_INREG integer types");
3231 assert(EVT.isVector() == VT.isVector() &&
3232 "FP_ROUND_INREG type should be vector iff the operand "
3234 assert((!EVT.isVector() ||
3235 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3236 "Vector element counts must match in FP_ROUND_INREG");
3237 assert(EVT.bitsLE(VT) && "Not rounding down!");
3239 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3243 assert(VT.isFloatingPoint() &&
3244 N1.getValueType().isFloatingPoint() &&
3245 VT.bitsLE(N1.getValueType()) &&
3246 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
3247 if (N1.getValueType() == VT) return N1; // noop conversion.
3249 case ISD::AssertSext:
3250 case ISD::AssertZext: {
3251 EVT EVT = cast<VTSDNode>(N2)->getVT();
3252 assert(VT == N1.getValueType() && "Not an inreg extend!");
3253 assert(VT.isInteger() && EVT.isInteger() &&
3254 "Cannot *_EXTEND_INREG FP types");
3255 assert(!EVT.isVector() &&
3256 "AssertSExt/AssertZExt type should be the vector element type "
3257 "rather than the vector type!");
3258 assert(EVT.bitsLE(VT) && "Not extending!");
3259 if (VT == EVT) return N1; // noop assertion.
3262 case ISD::SIGN_EXTEND_INREG: {
3263 EVT EVT = cast<VTSDNode>(N2)->getVT();
3264 assert(VT == N1.getValueType() && "Not an inreg extend!");
3265 assert(VT.isInteger() && EVT.isInteger() &&
3266 "Cannot *_EXTEND_INREG FP types");
3267 assert(EVT.isVector() == VT.isVector() &&
3268 "SIGN_EXTEND_INREG type should be vector iff the operand "
3270 assert((!EVT.isVector() ||
3271 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3272 "Vector element counts must match in SIGN_EXTEND_INREG");
3273 assert(EVT.bitsLE(VT) && "Not extending!");
3274 if (EVT == VT) return N1; // Not actually extending
3277 APInt Val = N1C->getAPIntValue();
3278 unsigned FromBits = EVT.getScalarType().getSizeInBits();
3279 Val <<= Val.getBitWidth()-FromBits;
3280 Val = Val.ashr(Val.getBitWidth()-FromBits);
3281 return getConstant(Val, VT);
3285 case ISD::EXTRACT_VECTOR_ELT:
3286 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3287 if (N1.getOpcode() == ISD::UNDEF)
3288 return getUNDEF(VT);
3290 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3291 // expanding copies of large vectors from registers.
3293 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3294 N1.getNumOperands() > 0) {
3296 N1.getOperand(0).getValueType().getVectorNumElements();
3297 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3298 N1.getOperand(N2C->getZExtValue() / Factor),
3299 getConstant(N2C->getZExtValue() % Factor,
3300 N2.getValueType()));
3303 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3304 // expanding large vector constants.
3305 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3306 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3308 if (VT != Elt.getValueType())
3309 // If the vector element type is not legal, the BUILD_VECTOR operands
3310 // are promoted and implicitly truncated, and the result implicitly
3311 // extended. Make that explicit here.
3312 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3317 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3318 // operations are lowered to scalars.
3319 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3320 // If the indices are the same, return the inserted element else
3321 // if the indices are known different, extract the element from
3322 // the original vector.
3323 SDValue N1Op2 = N1.getOperand(2);
3324 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3326 if (N1Op2C && N2C) {
3327 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3328 if (VT == N1.getOperand(1).getValueType())
3329 return N1.getOperand(1);
3331 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3334 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3338 case ISD::EXTRACT_ELEMENT:
3339 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3340 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3341 (N1.getValueType().isInteger() == VT.isInteger()) &&
3342 N1.getValueType() != VT &&
3343 "Wrong types for EXTRACT_ELEMENT!");
3345 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3346 // 64-bit integers into 32-bit parts. Instead of building the extract of
3347 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3348 if (N1.getOpcode() == ISD::BUILD_PAIR)
3349 return N1.getOperand(N2C->getZExtValue());
3351 // EXTRACT_ELEMENT of a constant int is also very common.
3352 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3353 unsigned ElementSize = VT.getSizeInBits();
3354 unsigned Shift = ElementSize * N2C->getZExtValue();
3355 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3356 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3359 case ISD::EXTRACT_SUBVECTOR: {
3361 if (VT.isSimple() && N1.getValueType().isSimple()) {
3362 assert(VT.isVector() && N1.getValueType().isVector() &&
3363 "Extract subvector VTs must be a vectors!");
3364 assert(VT.getVectorElementType() ==
3365 N1.getValueType().getVectorElementType() &&
3366 "Extract subvector VTs must have the same element type!");
3367 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3368 "Extract subvector must be from larger vector to smaller vector!");
3370 if (isa<ConstantSDNode>(Index.getNode())) {
3371 assert((VT.getVectorNumElements() +
3372 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3373 <= N1.getValueType().getVectorNumElements())
3374 && "Extract subvector overflow!");
3377 // Trivial extraction.
3378 if (VT.getSimpleVT() == N1.getSimpleValueType())
3385 // Perform trivial constant folding.
3386 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3387 if (SV.getNode()) return SV;
3389 // Canonicalize constant to RHS if commutative.
3390 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3391 std::swap(N1C, N2C);
3395 // Constant fold FP operations.
3396 bool HasFPExceptions = TLI->hasFloatingPointExceptions();
3397 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3398 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3400 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3401 // Canonicalize constant to RHS if commutative.
3402 std::swap(N1CFP, N2CFP);
3405 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3406 APFloat::opStatus s;
3409 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3410 if (!HasFPExceptions || s != APFloat::opInvalidOp)
3411 return getConstantFP(V1, VT);
3414 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3415 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
3416 return getConstantFP(V1, VT);
3419 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3420 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
3421 return getConstantFP(V1, VT);
3424 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3425 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
3426 s!=APFloat::opDivByZero)) {
3427 return getConstantFP(V1, VT);
3431 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3432 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
3433 s!=APFloat::opDivByZero)) {
3434 return getConstantFP(V1, VT);
3437 case ISD::FCOPYSIGN:
3439 return getConstantFP(V1, VT);
3444 if (Opcode == ISD::FP_ROUND) {
3445 APFloat V = N1CFP->getValueAPF(); // make copy
3447 // This can return overflow, underflow, or inexact; we don't care.
3448 // FIXME need to be more flexible about rounding mode.
3449 (void)V.convert(EVTToAPFloatSemantics(VT),
3450 APFloat::rmNearestTiesToEven, &ignored);
3451 return getConstantFP(V, VT);
3455 // Canonicalize an UNDEF to the RHS, even over a constant.
3456 if (N1.getOpcode() == ISD::UNDEF) {
3457 if (isCommutativeBinOp(Opcode)) {
3461 case ISD::FP_ROUND_INREG:
3462 case ISD::SIGN_EXTEND_INREG:
3468 return N1; // fold op(undef, arg2) -> undef
3476 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3477 // For vectors, we can't easily build an all zero vector, just return
3484 // Fold a bunch of operators when the RHS is undef.
3485 if (N2.getOpcode() == ISD::UNDEF) {
3488 if (N1.getOpcode() == ISD::UNDEF)
3489 // Handle undef ^ undef -> 0 special case. This is a common
3491 return getConstant(0, VT);
3501 return N2; // fold op(arg1, undef) -> undef
3507 if (getTarget().Options.UnsafeFPMath)
3515 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3516 // For vectors, we can't easily build an all zero vector, just return
3521 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3522 // For vectors, we can't easily build an all one vector, just return
3530 // Memoize this node if possible.
3532 SDVTList VTs = getVTList(VT);
3533 const bool BinOpHasFlags = isBinOpWithFlags(Opcode);
3534 if (VT != MVT::Glue) {
3535 SDValue Ops[] = {N1, N2};
3536 FoldingSetNodeID ID;
3537 AddNodeIDNode(ID, Opcode, VTs, Ops);
3539 AddBinaryNodeIDCustom(ID, Opcode, nuw, nsw, exact);
3541 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3542 return SDValue(E, 0);
3544 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
3546 CSEMap.InsertNode(N, IP);
3549 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
3553 return SDValue(N, 0);
3556 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3557 SDValue N1, SDValue N2, SDValue N3) {
3558 // Perform various simplifications.
3559 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3562 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3563 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3564 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3565 if (N1CFP && N2CFP && N3CFP) {
3566 APFloat V1 = N1CFP->getValueAPF();
3567 const APFloat &V2 = N2CFP->getValueAPF();
3568 const APFloat &V3 = N3CFP->getValueAPF();
3569 APFloat::opStatus s =
3570 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3571 if (s != APFloat::opInvalidOp)
3572 return getConstantFP(V1, VT);
3576 case ISD::CONCAT_VECTORS:
3577 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3578 // one big BUILD_VECTOR.
3579 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3580 N2.getOpcode() == ISD::BUILD_VECTOR &&
3581 N3.getOpcode() == ISD::BUILD_VECTOR) {
3582 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3583 N1.getNode()->op_end());
3584 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3585 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3586 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3590 // Use FoldSetCC to simplify SETCC's.
3591 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3592 if (Simp.getNode()) return Simp;
3597 if (N1C->getZExtValue())
3598 return N2; // select true, X, Y -> X
3599 return N3; // select false, X, Y -> Y
3602 if (N2 == N3) return N2; // select C, X, X -> X
3604 case ISD::VECTOR_SHUFFLE:
3605 llvm_unreachable("should use getVectorShuffle constructor!");
3606 case ISD::INSERT_SUBVECTOR: {
3608 if (VT.isSimple() && N1.getValueType().isSimple()
3609 && N2.getValueType().isSimple()) {
3610 assert(VT.isVector() && N1.getValueType().isVector() &&
3611 N2.getValueType().isVector() &&
3612 "Insert subvector VTs must be a vectors");
3613 assert(VT == N1.getValueType() &&
3614 "Dest and insert subvector source types must match!");
3615 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3616 "Insert subvector must be from smaller vector to larger vector!");
3617 if (isa<ConstantSDNode>(Index.getNode())) {
3618 assert((N2.getValueType().getVectorNumElements() +
3619 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3620 <= VT.getVectorNumElements())
3621 && "Insert subvector overflow!");
3624 // Trivial insertion.
3625 if (VT.getSimpleVT() == N2.getSimpleValueType())
3631 // Fold bit_convert nodes from a type to themselves.
3632 if (N1.getValueType() == VT)
3637 // Memoize node if it doesn't produce a flag.
3639 SDVTList VTs = getVTList(VT);
3640 if (VT != MVT::Glue) {
3641 SDValue Ops[] = { N1, N2, N3 };
3642 FoldingSetNodeID ID;
3643 AddNodeIDNode(ID, Opcode, VTs, Ops);
3645 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3646 return SDValue(E, 0);
3648 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3649 DL.getDebugLoc(), VTs, N1, N2, N3);
3650 CSEMap.InsertNode(N, IP);
3652 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3653 DL.getDebugLoc(), VTs, N1, N2, N3);
3657 return SDValue(N, 0);
3660 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3661 SDValue N1, SDValue N2, SDValue N3,
3663 SDValue Ops[] = { N1, N2, N3, N4 };
3664 return getNode(Opcode, DL, VT, Ops);
3667 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3668 SDValue N1, SDValue N2, SDValue N3,
3669 SDValue N4, SDValue N5) {
3670 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3671 return getNode(Opcode, DL, VT, Ops);
3674 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3675 /// the incoming stack arguments to be loaded from the stack.
3676 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3677 SmallVector<SDValue, 8> ArgChains;
3679 // Include the original chain at the beginning of the list. When this is
3680 // used by target LowerCall hooks, this helps legalize find the
3681 // CALLSEQ_BEGIN node.
3682 ArgChains.push_back(Chain);
3684 // Add a chain value for each stack argument.
3685 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3686 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3687 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3688 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3689 if (FI->getIndex() < 0)
3690 ArgChains.push_back(SDValue(L, 1));
3692 // Build a tokenfactor for all the chains.
3693 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
3696 /// getMemsetValue - Vectorized representation of the memset value
3698 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3700 assert(Value.getOpcode() != ISD::UNDEF);
3702 unsigned NumBits = VT.getScalarType().getSizeInBits();
3703 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3704 assert(C->getAPIntValue().getBitWidth() == 8);
3705 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3707 return DAG.getConstant(Val, VT);
3708 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3711 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3713 // Use a multiplication with 0x010101... to extend the input to the
3715 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3716 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3722 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3723 /// used when a memcpy is turned into a memset when the source is a constant
3725 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3726 const TargetLowering &TLI, StringRef Str) {
3727 // Handle vector with all elements zero.
3730 return DAG.getConstant(0, VT);
3731 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
3732 return DAG.getConstantFP(0.0, VT);
3733 else if (VT.isVector()) {
3734 unsigned NumElts = VT.getVectorNumElements();
3735 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3736 return DAG.getNode(ISD::BITCAST, dl, VT,
3737 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3740 llvm_unreachable("Expected type!");
3743 assert(!VT.isVector() && "Can't handle vector type here!");
3744 unsigned NumVTBits = VT.getSizeInBits();
3745 unsigned NumVTBytes = NumVTBits / 8;
3746 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3748 APInt Val(NumVTBits, 0);
3749 if (TLI.isLittleEndian()) {
3750 for (unsigned i = 0; i != NumBytes; ++i)
3751 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3753 for (unsigned i = 0; i != NumBytes; ++i)
3754 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3757 // If the "cost" of materializing the integer immediate is less than the cost
3758 // of a load, then it is cost effective to turn the load into the immediate.
3759 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
3760 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
3761 return DAG.getConstant(Val, VT);
3762 return SDValue(nullptr, 0);
3765 /// getMemBasePlusOffset - Returns base and offset node for the
3767 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3768 SelectionDAG &DAG) {
3769 EVT VT = Base.getValueType();
3770 return DAG.getNode(ISD::ADD, dl,
3771 VT, Base, DAG.getConstant(Offset, VT));
3774 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3776 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3777 unsigned SrcDelta = 0;
3778 GlobalAddressSDNode *G = nullptr;
3779 if (Src.getOpcode() == ISD::GlobalAddress)
3780 G = cast<GlobalAddressSDNode>(Src);
3781 else if (Src.getOpcode() == ISD::ADD &&
3782 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3783 Src.getOperand(1).getOpcode() == ISD::Constant) {
3784 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3785 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3790 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3793 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3794 /// to replace the memset / memcpy. Return true if the number of memory ops
3795 /// is below the threshold. It returns the types of the sequence of
3796 /// memory ops to perform memset / memcpy by reference.
3797 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3798 unsigned Limit, uint64_t Size,
3799 unsigned DstAlign, unsigned SrcAlign,
3805 const TargetLowering &TLI) {
3806 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3807 "Expecting memcpy / memset source to meet alignment requirement!");
3808 // If 'SrcAlign' is zero, that means the memory operation does not need to
3809 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3810 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3811 // is the specified alignment of the memory operation. If it is zero, that
3812 // means it's possible to change the alignment of the destination.
3813 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3814 // not need to be loaded.
3815 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3816 IsMemset, ZeroMemset, MemcpyStrSrc,
3817 DAG.getMachineFunction());
3819 if (VT == MVT::Other) {
3821 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
3822 TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
3823 VT = TLI.getPointerTy();
3825 switch (DstAlign & 7) {
3826 case 0: VT = MVT::i64; break;
3827 case 4: VT = MVT::i32; break;
3828 case 2: VT = MVT::i16; break;
3829 default: VT = MVT::i8; break;
3834 while (!TLI.isTypeLegal(LVT))
3835 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3836 assert(LVT.isInteger());
3842 unsigned NumMemOps = 0;
3844 unsigned VTSize = VT.getSizeInBits() / 8;
3845 while (VTSize > Size) {
3846 // For now, only use non-vector load / store's for the left-over pieces.
3851 if (VT.isVector() || VT.isFloatingPoint()) {
3852 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3853 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3854 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3856 else if (NewVT == MVT::i64 &&
3857 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3858 TLI.isSafeMemOpType(MVT::f64)) {
3859 // i64 is usually not legal on 32-bit targets, but f64 may be.
3867 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3868 if (NewVT == MVT::i8)
3870 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3872 NewVTSize = NewVT.getSizeInBits() / 8;
3874 // If the new VT cannot cover all of the remaining bits, then consider
3875 // issuing a (or a pair of) unaligned and overlapping load / store.
3876 // FIXME: Only does this for 64-bit or more since we don't have proper
3877 // cost model for unaligned load / store.
3880 if (NumMemOps && AllowOverlap &&
3881 VTSize >= 8 && NewVTSize < Size &&
3882 TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast)
3890 if (++NumMemOps > Limit)
3893 MemOps.push_back(VT);
3900 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3901 SDValue Chain, SDValue Dst,
3902 SDValue Src, uint64_t Size,
3903 unsigned Align, bool isVol,
3905 MachinePointerInfo DstPtrInfo,
3906 MachinePointerInfo SrcPtrInfo) {
3907 // Turn a memcpy of undef to nop.
3908 if (Src.getOpcode() == ISD::UNDEF)
3911 // Expand memcpy to a series of load and store ops if the size operand falls
3912 // below a certain threshold.
3913 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3914 // rather than maybe a humongous number of loads and stores.
3915 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3916 std::vector<EVT> MemOps;
3917 bool DstAlignCanChange = false;
3918 MachineFunction &MF = DAG.getMachineFunction();
3919 MachineFrameInfo *MFI = MF.getFrameInfo();
3921 MF.getFunction()->getAttributes().
3922 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3923 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3924 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3925 DstAlignCanChange = true;
3926 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3927 if (Align > SrcAlign)
3930 bool CopyFromStr = isMemSrcFromString(Src, Str);
3931 bool isZeroStr = CopyFromStr && Str.empty();
3932 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3934 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3935 (DstAlignCanChange ? 0 : Align),
3936 (isZeroStr ? 0 : SrcAlign),
3937 false, false, CopyFromStr, true, DAG, TLI))
3940 if (DstAlignCanChange) {
3941 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3942 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3944 // Don't promote to an alignment that would require dynamic stack
3946 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
3947 if (!TRI->needsStackRealignment(MF))
3948 while (NewAlign > Align &&
3949 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3952 if (NewAlign > Align) {
3953 // Give the stack frame object a larger alignment if needed.
3954 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3955 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3960 SmallVector<SDValue, 8> OutChains;
3961 unsigned NumMemOps = MemOps.size();
3962 uint64_t SrcOff = 0, DstOff = 0;
3963 for (unsigned i = 0; i != NumMemOps; ++i) {
3965 unsigned VTSize = VT.getSizeInBits() / 8;
3966 SDValue Value, Store;
3968 if (VTSize > Size) {
3969 // Issuing an unaligned load / store pair that overlaps with the previous
3970 // pair. Adjust the offset accordingly.
3971 assert(i == NumMemOps-1 && i != 0);
3972 SrcOff -= VTSize - Size;
3973 DstOff -= VTSize - Size;
3977 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3978 // It's unlikely a store of a vector immediate can be done in a single
3979 // instruction. It would require a load from a constantpool first.
3980 // We only handle zero vectors here.
3981 // FIXME: Handle other cases where store of vector immediate is done in
3982 // a single instruction.
3983 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3984 if (Value.getNode())
3985 Store = DAG.getStore(Chain, dl, Value,
3986 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3987 DstPtrInfo.getWithOffset(DstOff), isVol,
3991 if (!Store.getNode()) {
3992 // The type might not be legal for the target. This should only happen
3993 // if the type is smaller than a legal type, as on PPC, so the right
3994 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3995 // to Load/Store if NVT==VT.
3996 // FIXME does the case above also need this?
3997 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3998 assert(NVT.bitsGE(VT));
3999 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
4000 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
4001 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
4002 false, MinAlign(SrcAlign, SrcOff));
4003 Store = DAG.getTruncStore(Chain, dl, Value,
4004 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4005 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
4008 OutChains.push_back(Store);
4014 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4017 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
4018 SDValue Chain, SDValue Dst,
4019 SDValue Src, uint64_t Size,
4020 unsigned Align, bool isVol,
4022 MachinePointerInfo DstPtrInfo,
4023 MachinePointerInfo SrcPtrInfo) {
4024 // Turn a memmove of undef to nop.
4025 if (Src.getOpcode() == ISD::UNDEF)
4028 // Expand memmove to a series of load and store ops if the size operand falls
4029 // below a certain threshold.
4030 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4031 std::vector<EVT> MemOps;
4032 bool DstAlignCanChange = false;
4033 MachineFunction &MF = DAG.getMachineFunction();
4034 MachineFrameInfo *MFI = MF.getFrameInfo();
4035 bool OptSize = MF.getFunction()->getAttributes().
4036 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
4037 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4038 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4039 DstAlignCanChange = true;
4040 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4041 if (Align > SrcAlign)
4043 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
4045 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4046 (DstAlignCanChange ? 0 : Align), SrcAlign,
4047 false, false, false, false, DAG, TLI))
4050 if (DstAlignCanChange) {
4051 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4052 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
4053 if (NewAlign > Align) {
4054 // Give the stack frame object a larger alignment if needed.
4055 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4056 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4061 uint64_t SrcOff = 0, DstOff = 0;
4062 SmallVector<SDValue, 8> LoadValues;
4063 SmallVector<SDValue, 8> LoadChains;
4064 SmallVector<SDValue, 8> OutChains;
4065 unsigned NumMemOps = MemOps.size();
4066 for (unsigned i = 0; i < NumMemOps; i++) {
4068 unsigned VTSize = VT.getSizeInBits() / 8;
4071 Value = DAG.getLoad(VT, dl, Chain,
4072 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
4073 SrcPtrInfo.getWithOffset(SrcOff), isVol,
4074 false, false, SrcAlign);
4075 LoadValues.push_back(Value);
4076 LoadChains.push_back(Value.getValue(1));
4079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
4081 for (unsigned i = 0; i < NumMemOps; i++) {
4083 unsigned VTSize = VT.getSizeInBits() / 8;
4086 Store = DAG.getStore(Chain, dl, LoadValues[i],
4087 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4088 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
4089 OutChains.push_back(Store);
4093 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4096 /// \brief Lower the call to 'memset' intrinsic function into a series of store
4099 /// \param DAG Selection DAG where lowered code is placed.
4100 /// \param dl Link to corresponding IR location.
4101 /// \param Chain Control flow dependency.
4102 /// \param Dst Pointer to destination memory location.
4103 /// \param Src Value of byte to write into the memory.
4104 /// \param Size Number of bytes to write.
4105 /// \param Align Alignment of the destination in bytes.
4106 /// \param isVol True if destination is volatile.
4107 /// \param DstPtrInfo IR information on the memory pointer.
4108 /// \returns New head in the control flow, if lowering was successful, empty
4109 /// SDValue otherwise.
4111 /// The function tries to replace 'llvm.memset' intrinsic with several store
4112 /// operations and value calculation code. This is usually profitable for small
4114 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
4115 SDValue Chain, SDValue Dst,
4116 SDValue Src, uint64_t Size,
4117 unsigned Align, bool isVol,
4118 MachinePointerInfo DstPtrInfo) {
4119 // Turn a memset of undef to nop.
4120 if (Src.getOpcode() == ISD::UNDEF)
4123 // Expand memset to a series of load/store ops if the size operand
4124 // falls below a certain threshold.
4125 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4126 std::vector<EVT> MemOps;
4127 bool DstAlignCanChange = false;
4128 MachineFunction &MF = DAG.getMachineFunction();
4129 MachineFrameInfo *MFI = MF.getFrameInfo();
4130 bool OptSize = MF.getFunction()->getAttributes().
4131 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
4132 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4133 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
4134 DstAlignCanChange = true;
4136 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
4137 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
4138 Size, (DstAlignCanChange ? 0 : Align), 0,
4139 true, IsZeroVal, false, true, DAG, TLI))
4142 if (DstAlignCanChange) {
4143 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4144 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
4145 if (NewAlign > Align) {
4146 // Give the stack frame object a larger alignment if needed.
4147 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
4148 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4153 SmallVector<SDValue, 8> OutChains;
4154 uint64_t DstOff = 0;
4155 unsigned NumMemOps = MemOps.size();
4157 // Find the largest store and generate the bit pattern for it.
4158 EVT LargestVT = MemOps[0];
4159 for (unsigned i = 1; i < NumMemOps; i++)
4160 if (MemOps[i].bitsGT(LargestVT))
4161 LargestVT = MemOps[i];
4162 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4164 for (unsigned i = 0; i < NumMemOps; i++) {
4166 unsigned VTSize = VT.getSizeInBits() / 8;
4167 if (VTSize > Size) {
4168 // Issuing an unaligned load / store pair that overlaps with the previous
4169 // pair. Adjust the offset accordingly.
4170 assert(i == NumMemOps-1 && i != 0);
4171 DstOff -= VTSize - Size;
4174 // If this store is smaller than the largest store see whether we can get
4175 // the smaller value for free with a truncate.
4176 SDValue Value = MemSetValue;
4177 if (VT.bitsLT(LargestVT)) {
4178 if (!LargestVT.isVector() && !VT.isVector() &&
4179 TLI.isTruncateFree(LargestVT, VT))
4180 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4182 Value = getMemsetValue(Src, VT, DAG, dl);
4184 assert(Value.getValueType() == VT && "Value with wrong type.");
4185 SDValue Store = DAG.getStore(Chain, dl, Value,
4186 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4187 DstPtrInfo.getWithOffset(DstOff),
4188 isVol, false, Align);
4189 OutChains.push_back(Store);
4190 DstOff += VT.getSizeInBits() / 8;
4194 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4197 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
4198 SDValue Src, SDValue Size,
4199 unsigned Align, bool isVol, bool AlwaysInline,
4200 MachinePointerInfo DstPtrInfo,
4201 MachinePointerInfo SrcPtrInfo) {
4202 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4204 // Check to see if we should lower the memcpy to loads and stores first.
4205 // For cases within the target-specified limits, this is the best choice.
4206 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4208 // Memcpy with size zero? Just return the original chain.
4209 if (ConstantSize->isNullValue())
4212 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4213 ConstantSize->getZExtValue(),Align,
4214 isVol, false, DstPtrInfo, SrcPtrInfo);
4215 if (Result.getNode())
4219 // Then check to see if we should lower the memcpy with target-specific
4220 // code. If the target chooses to do this, this is the next best.
4222 TSI->EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
4223 isVol, AlwaysInline, DstPtrInfo, SrcPtrInfo);
4224 if (Result.getNode())
4227 // If we really need inline code and the target declined to provide it,
4228 // use a (potentially long) sequence of loads and stores.
4230 assert(ConstantSize && "AlwaysInline requires a constant size!");
4231 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4232 ConstantSize->getZExtValue(), Align, isVol,
4233 true, DstPtrInfo, SrcPtrInfo);
4236 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4237 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4238 // respect volatile, so they may do things like read or write memory
4239 // beyond the given memory regions. But fixing this isn't easy, and most
4240 // people don't care.
4242 // Emit a library call.
4243 TargetLowering::ArgListTy Args;
4244 TargetLowering::ArgListEntry Entry;
4245 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4246 Entry.Node = Dst; Args.push_back(Entry);
4247 Entry.Node = Src; Args.push_back(Entry);
4248 Entry.Node = Size; Args.push_back(Entry);
4249 // FIXME: pass in SDLoc
4250 TargetLowering::CallLoweringInfo CLI(*this);
4251 CLI.setDebugLoc(dl).setChain(Chain)
4252 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4253 Type::getVoidTy(*getContext()),
4254 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4255 TLI->getPointerTy()), std::move(Args), 0)
4256 .setDiscardResult();
4257 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4259 return CallResult.second;
4262 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4263 SDValue Src, SDValue Size,
4264 unsigned Align, bool isVol,
4265 MachinePointerInfo DstPtrInfo,
4266 MachinePointerInfo SrcPtrInfo) {
4267 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4269 // Check to see if we should lower the memmove to loads and stores first.
4270 // For cases within the target-specified limits, this is the best choice.
4271 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4273 // Memmove with size zero? Just return the original chain.
4274 if (ConstantSize->isNullValue())
4278 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4279 ConstantSize->getZExtValue(), Align, isVol,
4280 false, DstPtrInfo, SrcPtrInfo);
4281 if (Result.getNode())
4285 // Then check to see if we should lower the memmove with target-specific
4286 // code. If the target chooses to do this, this is the next best.
4287 SDValue Result = TSI->EmitTargetCodeForMemmove(
4288 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
4289 if (Result.getNode())
4292 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4293 // not be safe. See memcpy above for more details.
4295 // Emit a library call.
4296 TargetLowering::ArgListTy Args;
4297 TargetLowering::ArgListEntry Entry;
4298 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4299 Entry.Node = Dst; Args.push_back(Entry);
4300 Entry.Node = Src; Args.push_back(Entry);
4301 Entry.Node = Size; Args.push_back(Entry);
4302 // FIXME: pass in SDLoc
4303 TargetLowering::CallLoweringInfo CLI(*this);
4304 CLI.setDebugLoc(dl).setChain(Chain)
4305 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4306 Type::getVoidTy(*getContext()),
4307 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4308 TLI->getPointerTy()), std::move(Args), 0)
4309 .setDiscardResult();
4310 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4312 return CallResult.second;
4315 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4316 SDValue Src, SDValue Size,
4317 unsigned Align, bool isVol,
4318 MachinePointerInfo DstPtrInfo) {
4319 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4321 // Check to see if we should lower the memset to stores first.
4322 // For cases within the target-specified limits, this is the best choice.
4323 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4325 // Memset with size zero? Just return the original chain.
4326 if (ConstantSize->isNullValue())
4330 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4331 Align, isVol, DstPtrInfo);
4333 if (Result.getNode())
4337 // Then check to see if we should lower the memset with target-specific
4338 // code. If the target chooses to do this, this is the next best.
4339 SDValue Result = TSI->EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src,
4340 Size, Align, isVol, DstPtrInfo);
4341 if (Result.getNode())
4344 // Emit a library call.
4345 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4346 TargetLowering::ArgListTy Args;
4347 TargetLowering::ArgListEntry Entry;
4348 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4349 Args.push_back(Entry);
4351 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
4352 Args.push_back(Entry);
4354 Entry.Ty = IntPtrTy;
4355 Args.push_back(Entry);
4357 // FIXME: pass in SDLoc
4358 TargetLowering::CallLoweringInfo CLI(*this);
4359 CLI.setDebugLoc(dl).setChain(Chain)
4360 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
4361 Type::getVoidTy(*getContext()),
4362 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4363 TLI->getPointerTy()), std::move(Args), 0)
4364 .setDiscardResult();
4366 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4367 return CallResult.second;
4370 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4371 SDVTList VTList, ArrayRef<SDValue> Ops,
4372 MachineMemOperand *MMO,
4373 AtomicOrdering SuccessOrdering,
4374 AtomicOrdering FailureOrdering,
4375 SynchronizationScope SynchScope) {
4376 FoldingSetNodeID ID;
4377 ID.AddInteger(MemVT.getRawBits());
4378 AddNodeIDNode(ID, Opcode, VTList, Ops);
4379 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4381 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4382 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4383 return SDValue(E, 0);
4386 // Allocate the operands array for the node out of the BumpPtrAllocator, since
4387 // SDNode doesn't have access to it. This memory will be "leaked" when
4388 // the node is deallocated, but recovered when the allocator is released.
4389 // If the number of operands is less than 5 we use AtomicSDNode's internal
4391 unsigned NumOps = Ops.size();
4392 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps)
4395 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4396 dl.getDebugLoc(), VTList, MemVT,
4397 Ops.data(), DynOps, NumOps, MMO,
4398 SuccessOrdering, FailureOrdering,
4400 CSEMap.InsertNode(N, IP);
4402 return SDValue(N, 0);
4405 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4406 SDVTList VTList, ArrayRef<SDValue> Ops,
4407 MachineMemOperand *MMO,
4408 AtomicOrdering Ordering,
4409 SynchronizationScope SynchScope) {
4410 return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering,
4411 Ordering, SynchScope);
4414 SDValue SelectionDAG::getAtomicCmpSwap(
4415 unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain,
4416 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
4417 unsigned Alignment, AtomicOrdering SuccessOrdering,
4418 AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
4419 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4420 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4421 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4423 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4424 Alignment = getEVTAlignment(MemVT);
4426 MachineFunction &MF = getMachineFunction();
4428 // FIXME: Volatile isn't really correct; we should keep track of atomic
4429 // orderings in the memoperand.
4430 unsigned Flags = MachineMemOperand::MOVolatile;
4431 Flags |= MachineMemOperand::MOLoad;
4432 Flags |= MachineMemOperand::MOStore;
4434 MachineMemOperand *MMO =
4435 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4437 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO,
4438 SuccessOrdering, FailureOrdering, SynchScope);
4441 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT,
4442 SDVTList VTs, SDValue Chain, SDValue Ptr,
4443 SDValue Cmp, SDValue Swp,
4444 MachineMemOperand *MMO,
4445 AtomicOrdering SuccessOrdering,
4446 AtomicOrdering FailureOrdering,
4447 SynchronizationScope SynchScope) {
4448 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
4449 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
4450 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4452 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4453 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO,
4454 SuccessOrdering, FailureOrdering, SynchScope);
4457 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4459 SDValue Ptr, SDValue Val,
4460 const Value* PtrVal,
4462 AtomicOrdering Ordering,
4463 SynchronizationScope SynchScope) {
4464 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4465 Alignment = getEVTAlignment(MemVT);
4467 MachineFunction &MF = getMachineFunction();
4468 // An atomic store does not load. An atomic load does not store.
4469 // (An atomicrmw obviously both loads and stores.)
4470 // For now, atomics are considered to be volatile always, and they are
4472 // FIXME: Volatile isn't really correct; we should keep track of atomic
4473 // orderings in the memoperand.
4474 unsigned Flags = MachineMemOperand::MOVolatile;
4475 if (Opcode != ISD::ATOMIC_STORE)
4476 Flags |= MachineMemOperand::MOLoad;
4477 if (Opcode != ISD::ATOMIC_LOAD)
4478 Flags |= MachineMemOperand::MOStore;
4480 MachineMemOperand *MMO =
4481 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4482 MemVT.getStoreSize(), Alignment);
4484 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4485 Ordering, SynchScope);
4488 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4490 SDValue Ptr, SDValue Val,
4491 MachineMemOperand *MMO,
4492 AtomicOrdering Ordering,
4493 SynchronizationScope SynchScope) {
4494 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4495 Opcode == ISD::ATOMIC_LOAD_SUB ||
4496 Opcode == ISD::ATOMIC_LOAD_AND ||
4497 Opcode == ISD::ATOMIC_LOAD_OR ||
4498 Opcode == ISD::ATOMIC_LOAD_XOR ||
4499 Opcode == ISD::ATOMIC_LOAD_NAND ||
4500 Opcode == ISD::ATOMIC_LOAD_MIN ||
4501 Opcode == ISD::ATOMIC_LOAD_MAX ||
4502 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4503 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4504 Opcode == ISD::ATOMIC_SWAP ||
4505 Opcode == ISD::ATOMIC_STORE) &&
4506 "Invalid Atomic Op");
4508 EVT VT = Val.getValueType();
4510 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4511 getVTList(VT, MVT::Other);
4512 SDValue Ops[] = {Chain, Ptr, Val};
4513 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4516 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4517 EVT VT, SDValue Chain,
4519 MachineMemOperand *MMO,
4520 AtomicOrdering Ordering,
4521 SynchronizationScope SynchScope) {
4522 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4524 SDVTList VTs = getVTList(VT, MVT::Other);
4525 SDValue Ops[] = {Chain, Ptr};
4526 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
4529 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4530 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
4531 if (Ops.size() == 1)
4534 SmallVector<EVT, 4> VTs;
4535 VTs.reserve(Ops.size());
4536 for (unsigned i = 0; i < Ops.size(); ++i)
4537 VTs.push_back(Ops[i].getValueType());
4538 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
4542 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4543 ArrayRef<SDValue> Ops,
4544 EVT MemVT, MachinePointerInfo PtrInfo,
4545 unsigned Align, bool Vol,
4546 bool ReadMem, bool WriteMem, unsigned Size) {
4547 if (Align == 0) // Ensure that codegen never sees alignment 0
4548 Align = getEVTAlignment(MemVT);
4550 MachineFunction &MF = getMachineFunction();
4553 Flags |= MachineMemOperand::MOStore;
4555 Flags |= MachineMemOperand::MOLoad;
4557 Flags |= MachineMemOperand::MOVolatile;
4559 Size = MemVT.getStoreSize();
4560 MachineMemOperand *MMO =
4561 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
4563 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
4567 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4568 ArrayRef<SDValue> Ops, EVT MemVT,
4569 MachineMemOperand *MMO) {
4570 assert((Opcode == ISD::INTRINSIC_VOID ||
4571 Opcode == ISD::INTRINSIC_W_CHAIN ||
4572 Opcode == ISD::PREFETCH ||
4573 Opcode == ISD::LIFETIME_START ||
4574 Opcode == ISD::LIFETIME_END ||
4575 (Opcode <= INT_MAX &&
4576 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4577 "Opcode is not a memory-accessing opcode!");
4579 // Memoize the node unless it returns a flag.
4580 MemIntrinsicSDNode *N;
4581 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4582 FoldingSetNodeID ID;
4583 AddNodeIDNode(ID, Opcode, VTList, Ops);
4584 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4586 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4587 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4588 return SDValue(E, 0);
4591 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4592 dl.getDebugLoc(), VTList, Ops,
4594 CSEMap.InsertNode(N, IP);
4596 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4597 dl.getDebugLoc(), VTList, Ops,
4601 return SDValue(N, 0);
4604 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4605 /// MachinePointerInfo record from it. This is particularly useful because the
4606 /// code generator has many cases where it doesn't bother passing in a
4607 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4608 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4609 // If this is FI+Offset, we can model it.
4610 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4611 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4613 // If this is (FI+Offset1)+Offset2, we can model it.
4614 if (Ptr.getOpcode() != ISD::ADD ||
4615 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4616 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4617 return MachinePointerInfo();
4619 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4620 return MachinePointerInfo::getFixedStack(FI, Offset+
4621 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4624 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4625 /// MachinePointerInfo record from it. This is particularly useful because the
4626 /// code generator has many cases where it doesn't bother passing in a
4627 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4628 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4629 // If the 'Offset' value isn't a constant, we can't handle this.
4630 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4631 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4632 if (OffsetOp.getOpcode() == ISD::UNDEF)
4633 return InferPointerInfo(Ptr);
4634 return MachinePointerInfo();
4639 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4640 EVT VT, SDLoc dl, SDValue Chain,
4641 SDValue Ptr, SDValue Offset,
4642 MachinePointerInfo PtrInfo, EVT MemVT,
4643 bool isVolatile, bool isNonTemporal, bool isInvariant,
4644 unsigned Alignment, const AAMDNodes &AAInfo,
4645 const MDNode *Ranges) {
4646 assert(Chain.getValueType() == MVT::Other &&
4647 "Invalid chain type");
4648 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4649 Alignment = getEVTAlignment(VT);
4651 unsigned Flags = MachineMemOperand::MOLoad;
4653 Flags |= MachineMemOperand::MOVolatile;
4655 Flags |= MachineMemOperand::MONonTemporal;
4657 Flags |= MachineMemOperand::MOInvariant;
4659 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4661 if (PtrInfo.V.isNull())
4662 PtrInfo = InferPointerInfo(Ptr, Offset);
4664 MachineFunction &MF = getMachineFunction();
4665 MachineMemOperand *MMO =
4666 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4668 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4672 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4673 EVT VT, SDLoc dl, SDValue Chain,
4674 SDValue Ptr, SDValue Offset, EVT MemVT,
4675 MachineMemOperand *MMO) {
4677 ExtType = ISD::NON_EXTLOAD;
4678 } else if (ExtType == ISD::NON_EXTLOAD) {
4679 assert(VT == MemVT && "Non-extending load from different memory type!");
4682 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4683 "Should only be an extending load, not truncating!");
4684 assert(VT.isInteger() == MemVT.isInteger() &&
4685 "Cannot convert from FP to Int or Int -> FP!");
4686 assert(VT.isVector() == MemVT.isVector() &&
4687 "Cannot use trunc store to convert to or from a vector!");
4688 assert((!VT.isVector() ||
4689 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4690 "Cannot use trunc store to change the number of vector elements!");
4693 bool Indexed = AM != ISD::UNINDEXED;
4694 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4695 "Unindexed load with an offset!");
4697 SDVTList VTs = Indexed ?
4698 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4699 SDValue Ops[] = { Chain, Ptr, Offset };
4700 FoldingSetNodeID ID;
4701 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
4702 ID.AddInteger(MemVT.getRawBits());
4703 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4704 MMO->isNonTemporal(),
4705 MMO->isInvariant()));
4706 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4708 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4709 cast<LoadSDNode>(E)->refineAlignment(MMO);
4710 return SDValue(E, 0);
4712 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4713 dl.getDebugLoc(), VTs, AM, ExtType,
4715 CSEMap.InsertNode(N, IP);
4717 return SDValue(N, 0);
4720 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4721 SDValue Chain, SDValue Ptr,
4722 MachinePointerInfo PtrInfo,
4723 bool isVolatile, bool isNonTemporal,
4724 bool isInvariant, unsigned Alignment,
4725 const AAMDNodes &AAInfo,
4726 const MDNode *Ranges) {
4727 SDValue Undef = getUNDEF(Ptr.getValueType());
4728 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4729 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4733 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4734 SDValue Chain, SDValue Ptr,
4735 MachineMemOperand *MMO) {
4736 SDValue Undef = getUNDEF(Ptr.getValueType());
4737 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4741 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4742 SDValue Chain, SDValue Ptr,
4743 MachinePointerInfo PtrInfo, EVT MemVT,
4744 bool isVolatile, bool isNonTemporal,
4745 bool isInvariant, unsigned Alignment,
4746 const AAMDNodes &AAInfo) {
4747 SDValue Undef = getUNDEF(Ptr.getValueType());
4748 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4749 PtrInfo, MemVT, isVolatile, isNonTemporal, isInvariant,
4754 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4755 SDValue Chain, SDValue Ptr, EVT MemVT,
4756 MachineMemOperand *MMO) {
4757 SDValue Undef = getUNDEF(Ptr.getValueType());
4758 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4763 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4764 SDValue Offset, ISD::MemIndexedMode AM) {
4765 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4766 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4767 "Load is already a indexed load!");
4768 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4769 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4770 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4771 false, LD->getAlignment());
4774 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4775 SDValue Ptr, MachinePointerInfo PtrInfo,
4776 bool isVolatile, bool isNonTemporal,
4777 unsigned Alignment, const AAMDNodes &AAInfo) {
4778 assert(Chain.getValueType() == MVT::Other &&
4779 "Invalid chain type");
4780 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4781 Alignment = getEVTAlignment(Val.getValueType());
4783 unsigned Flags = MachineMemOperand::MOStore;
4785 Flags |= MachineMemOperand::MOVolatile;
4787 Flags |= MachineMemOperand::MONonTemporal;
4789 if (PtrInfo.V.isNull())
4790 PtrInfo = InferPointerInfo(Ptr);
4792 MachineFunction &MF = getMachineFunction();
4793 MachineMemOperand *MMO =
4794 MF.getMachineMemOperand(PtrInfo, Flags,
4795 Val.getValueType().getStoreSize(), Alignment,
4798 return getStore(Chain, dl, Val, Ptr, MMO);
4801 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4802 SDValue Ptr, MachineMemOperand *MMO) {
4803 assert(Chain.getValueType() == MVT::Other &&
4804 "Invalid chain type");
4805 EVT VT = Val.getValueType();
4806 SDVTList VTs = getVTList(MVT::Other);
4807 SDValue Undef = getUNDEF(Ptr.getValueType());
4808 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4809 FoldingSetNodeID ID;
4810 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4811 ID.AddInteger(VT.getRawBits());
4812 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4813 MMO->isNonTemporal(), MMO->isInvariant()));
4814 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4816 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4817 cast<StoreSDNode>(E)->refineAlignment(MMO);
4818 return SDValue(E, 0);
4820 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4821 dl.getDebugLoc(), VTs,
4822 ISD::UNINDEXED, false, VT, MMO);
4823 CSEMap.InsertNode(N, IP);
4825 return SDValue(N, 0);
4828 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4829 SDValue Ptr, MachinePointerInfo PtrInfo,
4830 EVT SVT,bool isVolatile, bool isNonTemporal,
4832 const AAMDNodes &AAInfo) {
4833 assert(Chain.getValueType() == MVT::Other &&
4834 "Invalid chain type");
4835 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4836 Alignment = getEVTAlignment(SVT);
4838 unsigned Flags = MachineMemOperand::MOStore;
4840 Flags |= MachineMemOperand::MOVolatile;
4842 Flags |= MachineMemOperand::MONonTemporal;
4844 if (PtrInfo.V.isNull())
4845 PtrInfo = InferPointerInfo(Ptr);
4847 MachineFunction &MF = getMachineFunction();
4848 MachineMemOperand *MMO =
4849 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4852 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4855 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4856 SDValue Ptr, EVT SVT,
4857 MachineMemOperand *MMO) {
4858 EVT VT = Val.getValueType();
4860 assert(Chain.getValueType() == MVT::Other &&
4861 "Invalid chain type");
4863 return getStore(Chain, dl, Val, Ptr, MMO);
4865 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4866 "Should only be a truncating store, not extending!");
4867 assert(VT.isInteger() == SVT.isInteger() &&
4868 "Can't do FP-INT conversion!");
4869 assert(VT.isVector() == SVT.isVector() &&
4870 "Cannot use trunc store to convert to or from a vector!");
4871 assert((!VT.isVector() ||
4872 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4873 "Cannot use trunc store to change the number of vector elements!");
4875 SDVTList VTs = getVTList(MVT::Other);
4876 SDValue Undef = getUNDEF(Ptr.getValueType());
4877 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4878 FoldingSetNodeID ID;
4879 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4880 ID.AddInteger(SVT.getRawBits());
4881 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4882 MMO->isNonTemporal(), MMO->isInvariant()));
4883 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4885 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4886 cast<StoreSDNode>(E)->refineAlignment(MMO);
4887 return SDValue(E, 0);
4889 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4890 dl.getDebugLoc(), VTs,
4891 ISD::UNINDEXED, true, SVT, MMO);
4892 CSEMap.InsertNode(N, IP);
4894 return SDValue(N, 0);
4898 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4899 SDValue Offset, ISD::MemIndexedMode AM) {
4900 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4901 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4902 "Store is already a indexed store!");
4903 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4904 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4905 FoldingSetNodeID ID;
4906 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
4907 ID.AddInteger(ST->getMemoryVT().getRawBits());
4908 ID.AddInteger(ST->getRawSubclassData());
4909 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4911 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4912 return SDValue(E, 0);
4914 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4915 dl.getDebugLoc(), VTs, AM,
4916 ST->isTruncatingStore(),
4918 ST->getMemOperand());
4919 CSEMap.InsertNode(N, IP);
4921 return SDValue(N, 0);
4925 SelectionDAG::getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain,
4926 SDValue Ptr, SDValue Mask, SDValue Src0,
4927 MachineMemOperand *MMO) {
4929 SDVTList VTs = getVTList(VT, MVT::Other);
4930 SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
4931 FoldingSetNodeID ID;
4932 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
4933 ID.AddInteger(VT.getRawBits());
4934 ID.AddInteger(encodeMemSDNodeFlags(ISD::NON_EXTLOAD, ISD::UNINDEXED,
4936 MMO->isNonTemporal(),
4937 MMO->isInvariant()));
4938 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4940 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4941 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
4942 return SDValue(E, 0);
4944 SDNode *N = new (NodeAllocator) MaskedLoadSDNode(dl.getIROrder(),
4945 dl.getDebugLoc(), Ops, 4, VTs,
4947 CSEMap.InsertNode(N, IP);
4949 return SDValue(N, 0);
4952 SDValue SelectionDAG::getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
4953 SDValue Ptr, SDValue Mask, MachineMemOperand *MMO) {
4954 assert(Chain.getValueType() == MVT::Other &&
4955 "Invalid chain type");
4956 EVT VT = Val.getValueType();
4957 SDVTList VTs = getVTList(MVT::Other);
4958 SDValue Ops[] = { Chain, Ptr, Mask, Val };
4959 FoldingSetNodeID ID;
4960 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
4961 ID.AddInteger(VT.getRawBits());
4962 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4963 MMO->isNonTemporal(), MMO->isInvariant()));
4964 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4966 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4967 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
4968 return SDValue(E, 0);
4970 SDNode *N = new (NodeAllocator) MaskedStoreSDNode(dl.getIROrder(),
4971 dl.getDebugLoc(), Ops, 4,
4973 CSEMap.InsertNode(N, IP);
4975 return SDValue(N, 0);
4978 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4979 SDValue Chain, SDValue Ptr,
4982 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4983 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
4986 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4987 ArrayRef<SDUse> Ops) {
4988 switch (Ops.size()) {
4989 case 0: return getNode(Opcode, DL, VT);
4990 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
4991 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4992 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4996 // Copy from an SDUse array into an SDValue array for use with
4997 // the regular getNode logic.
4998 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
4999 return getNode(Opcode, DL, VT, NewOps);
5002 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
5003 ArrayRef<SDValue> Ops) {
5004 unsigned NumOps = Ops.size();
5006 case 0: return getNode(Opcode, DL, VT);
5007 case 1: return getNode(Opcode, DL, VT, Ops[0]);
5008 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
5009 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5015 case ISD::SELECT_CC: {
5016 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
5017 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
5018 "LHS and RHS of condition must have same type!");
5019 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5020 "True and False arms of SelectCC must have same type!");
5021 assert(Ops[2].getValueType() == VT &&
5022 "select_cc node must be of same type as true and false value!");
5026 assert(NumOps == 5 && "BR_CC takes 5 operands!");
5027 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5028 "LHS/RHS of comparison should match types!");
5035 SDVTList VTs = getVTList(VT);
5037 if (VT != MVT::Glue) {
5038 FoldingSetNodeID ID;
5039 AddNodeIDNode(ID, Opcode, VTs, Ops);
5042 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5043 return SDValue(E, 0);
5045 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5047 CSEMap.InsertNode(N, IP);
5049 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5054 return SDValue(N, 0);
5057 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
5058 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
5059 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
5062 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5063 ArrayRef<SDValue> Ops) {
5064 if (VTList.NumVTs == 1)
5065 return getNode(Opcode, DL, VTList.VTs[0], Ops);
5069 // FIXME: figure out how to safely handle things like
5070 // int foo(int x) { return 1 << (x & 255); }
5071 // int bar() { return foo(256); }
5072 case ISD::SRA_PARTS:
5073 case ISD::SRL_PARTS:
5074 case ISD::SHL_PARTS:
5075 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5076 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
5077 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5078 else if (N3.getOpcode() == ISD::AND)
5079 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
5080 // If the and is only masking out bits that cannot effect the shift,
5081 // eliminate the and.
5082 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
5083 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
5084 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5090 // Memoize the node unless it returns a flag.
5092 unsigned NumOps = Ops.size();
5093 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5094 FoldingSetNodeID ID;
5095 AddNodeIDNode(ID, Opcode, VTList, Ops);
5097 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5098 return SDValue(E, 0);
5101 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5102 DL.getDebugLoc(), VTList, Ops[0]);
5103 } else if (NumOps == 2) {
5104 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5105 DL.getDebugLoc(), VTList, Ops[0],
5107 } else if (NumOps == 3) {
5108 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5109 DL.getDebugLoc(), VTList, Ops[0],
5112 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5115 CSEMap.InsertNode(N, IP);
5118 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
5119 DL.getDebugLoc(), VTList, Ops[0]);
5120 } else if (NumOps == 2) {
5121 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
5122 DL.getDebugLoc(), VTList, Ops[0],
5124 } else if (NumOps == 3) {
5125 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
5126 DL.getDebugLoc(), VTList, Ops[0],
5129 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
5134 return SDValue(N, 0);
5137 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
5138 return getNode(Opcode, DL, VTList, None);
5141 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5143 SDValue Ops[] = { N1 };
5144 return getNode(Opcode, DL, VTList, Ops);
5147 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5148 SDValue N1, SDValue N2) {
5149 SDValue Ops[] = { N1, N2 };
5150 return getNode(Opcode, DL, VTList, Ops);
5153 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5154 SDValue N1, SDValue N2, SDValue N3) {
5155 SDValue Ops[] = { N1, N2, N3 };
5156 return getNode(Opcode, DL, VTList, Ops);
5159 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5160 SDValue N1, SDValue N2, SDValue N3,
5162 SDValue Ops[] = { N1, N2, N3, N4 };
5163 return getNode(Opcode, DL, VTList, Ops);
5166 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5167 SDValue N1, SDValue N2, SDValue N3,
5168 SDValue N4, SDValue N5) {
5169 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5170 return getNode(Opcode, DL, VTList, Ops);
5173 SDVTList SelectionDAG::getVTList(EVT VT) {
5174 return makeVTList(SDNode::getValueTypeList(VT), 1);
5177 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
5178 FoldingSetNodeID ID;
5180 ID.AddInteger(VT1.getRawBits());
5181 ID.AddInteger(VT2.getRawBits());
5184 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5186 EVT *Array = Allocator.Allocate<EVT>(2);
5189 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5190 VTListMap.InsertNode(Result, IP);
5192 return Result->getSDVTList();
5195 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5196 FoldingSetNodeID ID;
5198 ID.AddInteger(VT1.getRawBits());
5199 ID.AddInteger(VT2.getRawBits());
5200 ID.AddInteger(VT3.getRawBits());
5203 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5205 EVT *Array = Allocator.Allocate<EVT>(3);
5209 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5210 VTListMap.InsertNode(Result, IP);
5212 return Result->getSDVTList();
5215 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5216 FoldingSetNodeID ID;
5218 ID.AddInteger(VT1.getRawBits());
5219 ID.AddInteger(VT2.getRawBits());
5220 ID.AddInteger(VT3.getRawBits());
5221 ID.AddInteger(VT4.getRawBits());
5224 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5226 EVT *Array = Allocator.Allocate<EVT>(4);
5231 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5232 VTListMap.InsertNode(Result, IP);
5234 return Result->getSDVTList();
5237 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5238 unsigned NumVTs = VTs.size();
5239 FoldingSetNodeID ID;
5240 ID.AddInteger(NumVTs);
5241 for (unsigned index = 0; index < NumVTs; index++) {
5242 ID.AddInteger(VTs[index].getRawBits());
5246 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5248 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5249 std::copy(VTs.begin(), VTs.end(), Array);
5250 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5251 VTListMap.InsertNode(Result, IP);
5253 return Result->getSDVTList();
5257 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5258 /// specified operands. If the resultant node already exists in the DAG,
5259 /// this does not modify the specified node, instead it returns the node that
5260 /// already exists. If the resultant node does not exist in the DAG, the
5261 /// input node is returned. As a degenerate case, if you specify the same
5262 /// input operands as the node already has, the input node is returned.
5263 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5264 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5266 // Check to see if there is no change.
5267 if (Op == N->getOperand(0)) return N;
5269 // See if the modified node already exists.
5270 void *InsertPos = nullptr;
5271 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5274 // Nope it doesn't. Remove the node from its current place in the maps.
5276 if (!RemoveNodeFromCSEMaps(N))
5277 InsertPos = nullptr;
5279 // Now we update the operands.
5280 N->OperandList[0].set(Op);
5282 // If this gets put into a CSE map, add it.
5283 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5287 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5288 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5290 // Check to see if there is no change.
5291 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5292 return N; // No operands changed, just return the input node.
5294 // See if the modified node already exists.
5295 void *InsertPos = nullptr;
5296 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5299 // Nope it doesn't. Remove the node from its current place in the maps.
5301 if (!RemoveNodeFromCSEMaps(N))
5302 InsertPos = nullptr;
5304 // Now we update the operands.
5305 if (N->OperandList[0] != Op1)
5306 N->OperandList[0].set(Op1);
5307 if (N->OperandList[1] != Op2)
5308 N->OperandList[1].set(Op2);
5310 // If this gets put into a CSE map, add it.
5311 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5315 SDNode *SelectionDAG::
5316 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5317 SDValue Ops[] = { Op1, Op2, Op3 };
5318 return UpdateNodeOperands(N, Ops);
5321 SDNode *SelectionDAG::
5322 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5323 SDValue Op3, SDValue Op4) {
5324 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5325 return UpdateNodeOperands(N, Ops);
5328 SDNode *SelectionDAG::
5329 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5330 SDValue Op3, SDValue Op4, SDValue Op5) {
5331 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5332 return UpdateNodeOperands(N, Ops);
5335 SDNode *SelectionDAG::
5336 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
5337 unsigned NumOps = Ops.size();
5338 assert(N->getNumOperands() == NumOps &&
5339 "Update with wrong number of operands");
5341 // Check to see if there is no change.
5342 bool AnyChange = false;
5343 for (unsigned i = 0; i != NumOps; ++i) {
5344 if (Ops[i] != N->getOperand(i)) {
5350 // No operands changed, just return the input node.
5351 if (!AnyChange) return N;
5353 // See if the modified node already exists.
5354 void *InsertPos = nullptr;
5355 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
5358 // Nope it doesn't. Remove the node from its current place in the maps.
5360 if (!RemoveNodeFromCSEMaps(N))
5361 InsertPos = nullptr;
5363 // Now we update the operands.
5364 for (unsigned i = 0; i != NumOps; ++i)
5365 if (N->OperandList[i] != Ops[i])
5366 N->OperandList[i].set(Ops[i]);
5368 // If this gets put into a CSE map, add it.
5369 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5373 /// DropOperands - Release the operands and set this node to have
5375 void SDNode::DropOperands() {
5376 // Unlike the code in MorphNodeTo that does this, we don't need to
5377 // watch for dead nodes here.
5378 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5384 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5387 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5389 SDVTList VTs = getVTList(VT);
5390 return SelectNodeTo(N, MachineOpc, VTs, None);
5393 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5394 EVT VT, SDValue Op1) {
5395 SDVTList VTs = getVTList(VT);
5396 SDValue Ops[] = { Op1 };
5397 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5400 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5401 EVT VT, SDValue Op1,
5403 SDVTList VTs = getVTList(VT);
5404 SDValue Ops[] = { Op1, Op2 };
5405 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5408 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5409 EVT VT, SDValue Op1,
5410 SDValue Op2, SDValue Op3) {
5411 SDVTList VTs = getVTList(VT);
5412 SDValue Ops[] = { Op1, Op2, Op3 };
5413 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5416 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5417 EVT VT, ArrayRef<SDValue> Ops) {
5418 SDVTList VTs = getVTList(VT);
5419 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5422 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5423 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
5424 SDVTList VTs = getVTList(VT1, VT2);
5425 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5428 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5430 SDVTList VTs = getVTList(VT1, VT2);
5431 return SelectNodeTo(N, MachineOpc, VTs, None);
5434 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5435 EVT VT1, EVT VT2, EVT VT3,
5436 ArrayRef<SDValue> Ops) {
5437 SDVTList VTs = getVTList(VT1, VT2, VT3);
5438 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5441 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5442 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5443 ArrayRef<SDValue> Ops) {
5444 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5445 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5448 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5451 SDVTList VTs = getVTList(VT1, VT2);
5452 SDValue Ops[] = { Op1 };
5453 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5456 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5458 SDValue Op1, SDValue Op2) {
5459 SDVTList VTs = getVTList(VT1, VT2);
5460 SDValue Ops[] = { Op1, Op2 };
5461 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5464 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5466 SDValue Op1, SDValue Op2,
5468 SDVTList VTs = getVTList(VT1, VT2);
5469 SDValue Ops[] = { Op1, Op2, Op3 };
5470 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5473 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5474 EVT VT1, EVT VT2, EVT VT3,
5475 SDValue Op1, SDValue Op2,
5477 SDVTList VTs = getVTList(VT1, VT2, VT3);
5478 SDValue Ops[] = { Op1, Op2, Op3 };
5479 return SelectNodeTo(N, MachineOpc, VTs, Ops);
5482 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5483 SDVTList VTs,ArrayRef<SDValue> Ops) {
5484 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
5485 // Reset the NodeID to -1.
5490 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5491 /// the line number information on the merged node since it is not possible to
5492 /// preserve the information that operation is associated with multiple lines.
5493 /// This will make the debugger working better at -O0, were there is a higher
5494 /// probability having other instructions associated with that line.
5496 /// For IROrder, we keep the smaller of the two
5497 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5498 DebugLoc NLoc = N->getDebugLoc();
5499 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5500 (OLoc.getDebugLoc() != NLoc)) {
5501 N->setDebugLoc(DebugLoc());
5503 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5504 N->setIROrder(Order);
5508 /// MorphNodeTo - This *mutates* the specified node to have the specified
5509 /// return type, opcode, and operands.
5511 /// Note that MorphNodeTo returns the resultant node. If there is already a
5512 /// node of the specified opcode and operands, it returns that node instead of
5513 /// the current one. Note that the SDLoc need not be the same.
5515 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5516 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5517 /// node, and because it doesn't require CSE recalculation for any of
5518 /// the node's users.
5520 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
5521 /// As a consequence it isn't appropriate to use from within the DAG combiner or
5522 /// the legalizer which maintain worklists that would need to be updated when
5523 /// deleting things.
5524 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5525 SDVTList VTs, ArrayRef<SDValue> Ops) {
5526 unsigned NumOps = Ops.size();
5527 // If an identical node already exists, use it.
5529 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5530 FoldingSetNodeID ID;
5531 AddNodeIDNode(ID, Opc, VTs, Ops);
5532 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5533 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5536 if (!RemoveNodeFromCSEMaps(N))
5539 // Start the morphing.
5541 N->ValueList = VTs.VTs;
5542 N->NumValues = VTs.NumVTs;
5544 // Clear the operands list, updating used nodes to remove this from their
5545 // use list. Keep track of any operands that become dead as a result.
5546 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5547 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5549 SDNode *Used = Use.getNode();
5551 if (Used->use_empty())
5552 DeadNodeSet.insert(Used);
5555 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5556 // Initialize the memory references information.
5557 MN->setMemRefs(nullptr, nullptr);
5558 // If NumOps is larger than the # of operands we can have in a
5559 // MachineSDNode, reallocate the operand list.
5560 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5561 if (MN->OperandsNeedDelete)
5562 delete[] MN->OperandList;
5563 if (NumOps > array_lengthof(MN->LocalOperands))
5564 // We're creating a final node that will live unmorphed for the
5565 // remainder of the current SelectionDAG iteration, so we can allocate
5566 // the operands directly out of a pool with no recycling metadata.
5567 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5568 Ops.data(), NumOps);
5570 MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps);
5571 MN->OperandsNeedDelete = false;
5573 MN->InitOperands(MN->OperandList, Ops.data(), NumOps);
5575 // If NumOps is larger than the # of operands we currently have, reallocate
5576 // the operand list.
5577 if (NumOps > N->NumOperands) {
5578 if (N->OperandsNeedDelete)
5579 delete[] N->OperandList;
5580 N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps);
5581 N->OperandsNeedDelete = true;
5583 N->InitOperands(N->OperandList, Ops.data(), NumOps);
5586 // Delete any nodes that are still dead after adding the uses for the
5588 if (!DeadNodeSet.empty()) {
5589 SmallVector<SDNode *, 16> DeadNodes;
5590 for (SDNode *N : DeadNodeSet)
5592 DeadNodes.push_back(N);
5593 RemoveDeadNodes(DeadNodes);
5597 CSEMap.InsertNode(N, IP); // Memoize the new node.
5602 /// getMachineNode - These are used for target selectors to create a new node
5603 /// with specified return type(s), MachineInstr opcode, and operands.
5605 /// Note that getMachineNode returns the resultant node. If there is already a
5606 /// node of the specified opcode and operands, it returns that node instead of
5607 /// the current one.
5609 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5610 SDVTList VTs = getVTList(VT);
5611 return getMachineNode(Opcode, dl, VTs, None);
5615 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5616 SDVTList VTs = getVTList(VT);
5617 SDValue Ops[] = { Op1 };
5618 return getMachineNode(Opcode, dl, VTs, Ops);
5622 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5623 SDValue Op1, SDValue Op2) {
5624 SDVTList VTs = getVTList(VT);
5625 SDValue Ops[] = { Op1, Op2 };
5626 return getMachineNode(Opcode, dl, VTs, Ops);
5630 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5631 SDValue Op1, SDValue Op2, SDValue Op3) {
5632 SDVTList VTs = getVTList(VT);
5633 SDValue Ops[] = { Op1, Op2, Op3 };
5634 return getMachineNode(Opcode, dl, VTs, Ops);
5638 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5639 ArrayRef<SDValue> Ops) {
5640 SDVTList VTs = getVTList(VT);
5641 return getMachineNode(Opcode, dl, VTs, Ops);
5645 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5646 SDVTList VTs = getVTList(VT1, VT2);
5647 return getMachineNode(Opcode, dl, VTs, None);
5651 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5652 EVT VT1, EVT VT2, SDValue Op1) {
5653 SDVTList VTs = getVTList(VT1, VT2);
5654 SDValue Ops[] = { Op1 };
5655 return getMachineNode(Opcode, dl, VTs, Ops);
5659 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5660 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5661 SDVTList VTs = getVTList(VT1, VT2);
5662 SDValue Ops[] = { Op1, Op2 };
5663 return getMachineNode(Opcode, dl, VTs, Ops);
5667 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5668 EVT VT1, EVT VT2, SDValue Op1,
5669 SDValue Op2, SDValue Op3) {
5670 SDVTList VTs = getVTList(VT1, VT2);
5671 SDValue Ops[] = { Op1, Op2, Op3 };
5672 return getMachineNode(Opcode, dl, VTs, Ops);
5676 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5678 ArrayRef<SDValue> Ops) {
5679 SDVTList VTs = getVTList(VT1, VT2);
5680 return getMachineNode(Opcode, dl, VTs, Ops);
5684 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5685 EVT VT1, EVT VT2, EVT VT3,
5686 SDValue Op1, SDValue Op2) {
5687 SDVTList VTs = getVTList(VT1, VT2, VT3);
5688 SDValue Ops[] = { Op1, Op2 };
5689 return getMachineNode(Opcode, dl, VTs, Ops);
5693 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5694 EVT VT1, EVT VT2, EVT VT3,
5695 SDValue Op1, SDValue Op2, SDValue Op3) {
5696 SDVTList VTs = getVTList(VT1, VT2, VT3);
5697 SDValue Ops[] = { Op1, Op2, Op3 };
5698 return getMachineNode(Opcode, dl, VTs, Ops);
5702 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5703 EVT VT1, EVT VT2, EVT VT3,
5704 ArrayRef<SDValue> Ops) {
5705 SDVTList VTs = getVTList(VT1, VT2, VT3);
5706 return getMachineNode(Opcode, dl, VTs, Ops);
5710 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5711 EVT VT2, EVT VT3, EVT VT4,
5712 ArrayRef<SDValue> Ops) {
5713 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5714 return getMachineNode(Opcode, dl, VTs, Ops);
5718 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5719 ArrayRef<EVT> ResultTys,
5720 ArrayRef<SDValue> Ops) {
5721 SDVTList VTs = getVTList(ResultTys);
5722 return getMachineNode(Opcode, dl, VTs, Ops);
5726 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5727 ArrayRef<SDValue> OpsArray) {
5728 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5731 const SDValue *Ops = OpsArray.data();
5732 unsigned NumOps = OpsArray.size();
5735 FoldingSetNodeID ID;
5736 AddNodeIDNode(ID, ~Opcode, VTs, OpsArray);
5738 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5739 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5743 // Allocate a new MachineSDNode.
5744 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5745 DL.getDebugLoc(), VTs);
5747 // Initialize the operands list.
5748 if (NumOps > array_lengthof(N->LocalOperands))
5749 // We're creating a final node that will live unmorphed for the
5750 // remainder of the current SelectionDAG iteration, so we can allocate
5751 // the operands directly out of a pool with no recycling metadata.
5752 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5755 N->InitOperands(N->LocalOperands, Ops, NumOps);
5756 N->OperandsNeedDelete = false;
5759 CSEMap.InsertNode(N, IP);
5765 /// getTargetExtractSubreg - A convenience function for creating
5766 /// TargetOpcode::EXTRACT_SUBREG nodes.
5768 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5770 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5771 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5772 VT, Operand, SRIdxVal);
5773 return SDValue(Subreg, 0);
5776 /// getTargetInsertSubreg - A convenience function for creating
5777 /// TargetOpcode::INSERT_SUBREG nodes.
5779 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5780 SDValue Operand, SDValue Subreg) {
5781 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5782 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5783 VT, Operand, Subreg, SRIdxVal);
5784 return SDValue(Result, 0);
5787 /// getNodeIfExists - Get the specified node if it's already available, or
5788 /// else return NULL.
5789 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5790 ArrayRef<SDValue> Ops, bool nuw, bool nsw,
5792 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
5793 FoldingSetNodeID ID;
5794 AddNodeIDNode(ID, Opcode, VTList, Ops);
5795 if (isBinOpWithFlags(Opcode))
5796 AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
5798 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5804 /// getDbgValue - Creates a SDDbgValue node.
5807 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
5808 unsigned R, bool IsIndirect, uint64_t Off,
5809 DebugLoc DL, unsigned O) {
5810 return new (Allocator) SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
5814 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
5815 const Value *C, uint64_t Off,
5816 DebugLoc DL, unsigned O) {
5817 return new (Allocator) SDDbgValue(Var, Expr, C, Off, DL, O);
5821 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
5822 unsigned FI, uint64_t Off,
5823 DebugLoc DL, unsigned O) {
5824 return new (Allocator) SDDbgValue(Var, Expr, FI, Off, DL, O);
5829 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5830 /// pointed to by a use iterator is deleted, increment the use iterator
5831 /// so that it doesn't dangle.
5833 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5834 SDNode::use_iterator &UI;
5835 SDNode::use_iterator &UE;
5837 void NodeDeleted(SDNode *N, SDNode *E) override {
5838 // Increment the iterator as needed.
5839 while (UI != UE && N == *UI)
5844 RAUWUpdateListener(SelectionDAG &d,
5845 SDNode::use_iterator &ui,
5846 SDNode::use_iterator &ue)
5847 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5852 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5853 /// This can cause recursive merging of nodes in the DAG.
5855 /// This version assumes From has a single result value.
5857 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5858 SDNode *From = FromN.getNode();
5859 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5860 "Cannot replace with this method!");
5861 assert(From != To.getNode() && "Cannot replace uses of with self");
5863 // Iterate over all the existing uses of From. New uses will be added
5864 // to the beginning of the use list, which we avoid visiting.
5865 // This specifically avoids visiting uses of From that arise while the
5866 // replacement is happening, because any such uses would be the result
5867 // of CSE: If an existing node looks like From after one of its operands
5868 // is replaced by To, we don't want to replace of all its users with To
5869 // too. See PR3018 for more info.
5870 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5871 RAUWUpdateListener Listener(*this, UI, UE);
5875 // This node is about to morph, remove its old self from the CSE maps.
5876 RemoveNodeFromCSEMaps(User);
5878 // A user can appear in a use list multiple times, and when this
5879 // happens the uses are usually next to each other in the list.
5880 // To help reduce the number of CSE recomputations, process all
5881 // the uses of this user that we can find this way.
5883 SDUse &Use = UI.getUse();
5886 } while (UI != UE && *UI == User);
5888 // Now that we have modified User, add it back to the CSE maps. If it
5889 // already exists there, recursively merge the results together.
5890 AddModifiedNodeToCSEMaps(User);
5893 // If we just RAUW'd the root, take note.
5894 if (FromN == getRoot())
5898 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5899 /// This can cause recursive merging of nodes in the DAG.
5901 /// This version assumes that for each value of From, there is a
5902 /// corresponding value in To in the same position with the same type.
5904 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5906 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5907 assert((!From->hasAnyUseOfValue(i) ||
5908 From->getValueType(i) == To->getValueType(i)) &&
5909 "Cannot use this version of ReplaceAllUsesWith!");
5912 // Handle the trivial case.
5916 // Iterate over just the existing users of From. See the comments in
5917 // the ReplaceAllUsesWith above.
5918 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5919 RAUWUpdateListener Listener(*this, UI, UE);
5923 // This node is about to morph, remove its old self from the CSE maps.
5924 RemoveNodeFromCSEMaps(User);
5926 // A user can appear in a use list multiple times, and when this
5927 // happens the uses are usually next to each other in the list.
5928 // To help reduce the number of CSE recomputations, process all
5929 // the uses of this user that we can find this way.
5931 SDUse &Use = UI.getUse();
5934 } while (UI != UE && *UI == User);
5936 // Now that we have modified User, add it back to the CSE maps. If it
5937 // already exists there, recursively merge the results together.
5938 AddModifiedNodeToCSEMaps(User);
5941 // If we just RAUW'd the root, take note.
5942 if (From == getRoot().getNode())
5943 setRoot(SDValue(To, getRoot().getResNo()));
5946 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5947 /// This can cause recursive merging of nodes in the DAG.
5949 /// This version can replace From with any result values. To must match the
5950 /// number and types of values returned by From.
5951 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5952 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5953 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5955 // Iterate over just the existing users of From. See the comments in
5956 // the ReplaceAllUsesWith above.
5957 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5958 RAUWUpdateListener Listener(*this, UI, UE);
5962 // This node is about to morph, remove its old self from the CSE maps.
5963 RemoveNodeFromCSEMaps(User);
5965 // A user can appear in a use list multiple times, and when this
5966 // happens the uses are usually next to each other in the list.
5967 // To help reduce the number of CSE recomputations, process all
5968 // the uses of this user that we can find this way.
5970 SDUse &Use = UI.getUse();
5971 const SDValue &ToOp = To[Use.getResNo()];
5974 } while (UI != UE && *UI == User);
5976 // Now that we have modified User, add it back to the CSE maps. If it
5977 // already exists there, recursively merge the results together.
5978 AddModifiedNodeToCSEMaps(User);
5981 // If we just RAUW'd the root, take note.
5982 if (From == getRoot().getNode())
5983 setRoot(SDValue(To[getRoot().getResNo()]));
5986 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5987 /// uses of other values produced by From.getNode() alone. The Deleted
5988 /// vector is handled the same way as for ReplaceAllUsesWith.
5989 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5990 // Handle the really simple, really trivial case efficiently.
5991 if (From == To) return;
5993 // Handle the simple, trivial, case efficiently.
5994 if (From.getNode()->getNumValues() == 1) {
5995 ReplaceAllUsesWith(From, To);
5999 // Iterate over just the existing users of From. See the comments in
6000 // the ReplaceAllUsesWith above.
6001 SDNode::use_iterator UI = From.getNode()->use_begin(),
6002 UE = From.getNode()->use_end();
6003 RAUWUpdateListener Listener(*this, UI, UE);
6006 bool UserRemovedFromCSEMaps = false;
6008 // A user can appear in a use list multiple times, and when this
6009 // happens the uses are usually next to each other in the list.
6010 // To help reduce the number of CSE recomputations, process all
6011 // the uses of this user that we can find this way.
6013 SDUse &Use = UI.getUse();
6015 // Skip uses of different values from the same node.
6016 if (Use.getResNo() != From.getResNo()) {
6021 // If this node hasn't been modified yet, it's still in the CSE maps,
6022 // so remove its old self from the CSE maps.
6023 if (!UserRemovedFromCSEMaps) {
6024 RemoveNodeFromCSEMaps(User);
6025 UserRemovedFromCSEMaps = true;
6030 } while (UI != UE && *UI == User);
6032 // We are iterating over all uses of the From node, so if a use
6033 // doesn't use the specific value, no changes are made.
6034 if (!UserRemovedFromCSEMaps)
6037 // Now that we have modified User, add it back to the CSE maps. If it
6038 // already exists there, recursively merge the results together.
6039 AddModifiedNodeToCSEMaps(User);
6042 // If we just RAUW'd the root, take note.
6043 if (From == getRoot())
6048 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
6049 /// to record information about a use.
6056 /// operator< - Sort Memos by User.
6057 bool operator<(const UseMemo &L, const UseMemo &R) {
6058 return (intptr_t)L.User < (intptr_t)R.User;
6062 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
6063 /// uses of other values produced by From.getNode() alone. The same value
6064 /// may appear in both the From and To list. The Deleted vector is
6065 /// handled the same way as for ReplaceAllUsesWith.
6066 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
6069 // Handle the simple, trivial case efficiently.
6071 return ReplaceAllUsesOfValueWith(*From, *To);
6073 // Read up all the uses and make records of them. This helps
6074 // processing new uses that are introduced during the
6075 // replacement process.
6076 SmallVector<UseMemo, 4> Uses;
6077 for (unsigned i = 0; i != Num; ++i) {
6078 unsigned FromResNo = From[i].getResNo();
6079 SDNode *FromNode = From[i].getNode();
6080 for (SDNode::use_iterator UI = FromNode->use_begin(),
6081 E = FromNode->use_end(); UI != E; ++UI) {
6082 SDUse &Use = UI.getUse();
6083 if (Use.getResNo() == FromResNo) {
6084 UseMemo Memo = { *UI, i, &Use };
6085 Uses.push_back(Memo);
6090 // Sort the uses, so that all the uses from a given User are together.
6091 std::sort(Uses.begin(), Uses.end());
6093 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
6094 UseIndex != UseIndexEnd; ) {
6095 // We know that this user uses some value of From. If it is the right
6096 // value, update it.
6097 SDNode *User = Uses[UseIndex].User;
6099 // This node is about to morph, remove its old self from the CSE maps.
6100 RemoveNodeFromCSEMaps(User);
6102 // The Uses array is sorted, so all the uses for a given User
6103 // are next to each other in the list.
6104 // To help reduce the number of CSE recomputations, process all
6105 // the uses of this user that we can find this way.
6107 unsigned i = Uses[UseIndex].Index;
6108 SDUse &Use = *Uses[UseIndex].Use;
6112 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
6114 // Now that we have modified User, add it back to the CSE maps. If it
6115 // already exists there, recursively merge the results together.
6116 AddModifiedNodeToCSEMaps(User);
6120 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
6121 /// based on their topological order. It returns the maximum id and a vector
6122 /// of the SDNodes* in assigned order by reference.
6123 unsigned SelectionDAG::AssignTopologicalOrder() {
6125 unsigned DAGSize = 0;
6127 // SortedPos tracks the progress of the algorithm. Nodes before it are
6128 // sorted, nodes after it are unsorted. When the algorithm completes
6129 // it is at the end of the list.
6130 allnodes_iterator SortedPos = allnodes_begin();
6132 // Visit all the nodes. Move nodes with no operands to the front of
6133 // the list immediately. Annotate nodes that do have operands with their
6134 // operand count. Before we do this, the Node Id fields of the nodes
6135 // may contain arbitrary values. After, the Node Id fields for nodes
6136 // before SortedPos will contain the topological sort index, and the
6137 // Node Id fields for nodes At SortedPos and after will contain the
6138 // count of outstanding operands.
6139 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
6141 checkForCycles(N, this);
6142 unsigned Degree = N->getNumOperands();
6144 // A node with no uses, add it to the result array immediately.
6145 N->setNodeId(DAGSize++);
6146 allnodes_iterator Q = N;
6148 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
6149 assert(SortedPos != AllNodes.end() && "Overran node list");
6152 // Temporarily use the Node Id as scratch space for the degree count.
6153 N->setNodeId(Degree);
6157 // Visit all the nodes. As we iterate, move nodes into sorted order,
6158 // such that by the time the end is reached all nodes will be sorted.
6159 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
6161 checkForCycles(N, this);
6162 // N is in sorted position, so all its uses have one less operand
6163 // that needs to be sorted.
6164 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6167 unsigned Degree = P->getNodeId();
6168 assert(Degree != 0 && "Invalid node degree");
6171 // All of P's operands are sorted, so P may sorted now.
6172 P->setNodeId(DAGSize++);
6174 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6175 assert(SortedPos != AllNodes.end() && "Overran node list");
6178 // Update P's outstanding operand count.
6179 P->setNodeId(Degree);
6182 if (I == SortedPos) {
6185 dbgs() << "Overran sorted position:\n";
6186 S->dumprFull(this); dbgs() << "\n";
6187 dbgs() << "Checking if this is due to cycles\n";
6188 checkForCycles(this, true);
6190 llvm_unreachable(nullptr);
6194 assert(SortedPos == AllNodes.end() &&
6195 "Topological sort incomplete!");
6196 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6197 "First node in topological sort is not the entry token!");
6198 assert(AllNodes.front().getNodeId() == 0 &&
6199 "First node in topological sort has non-zero id!");
6200 assert(AllNodes.front().getNumOperands() == 0 &&
6201 "First node in topological sort has operands!");
6202 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6203 "Last node in topologic sort has unexpected id!");
6204 assert(AllNodes.back().use_empty() &&
6205 "Last node in topologic sort has users!");
6206 assert(DAGSize == allnodes_size() && "Node count mismatch!");
6210 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6211 /// value is produced by SD.
6212 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6214 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
6215 SD->setHasDebugValue(true);
6217 DbgInfo->add(DB, SD, isParameter);
6220 /// TransferDbgValues - Transfer SDDbgValues.
6221 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6222 if (From == To || !From.getNode()->getHasDebugValue())
6224 SDNode *FromNode = From.getNode();
6225 SDNode *ToNode = To.getNode();
6226 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6227 SmallVector<SDDbgValue *, 2> ClonedDVs;
6228 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6230 SDDbgValue *Dbg = *I;
6231 if (Dbg->getKind() == SDDbgValue::SDNODE) {
6233 getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
6234 To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
6235 Dbg->getDebugLoc(), Dbg->getOrder());
6236 ClonedDVs.push_back(Clone);
6239 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6240 E = ClonedDVs.end(); I != E; ++I)
6241 AddDbgValue(*I, ToNode, false);
6244 //===----------------------------------------------------------------------===//
6246 //===----------------------------------------------------------------------===//
6248 HandleSDNode::~HandleSDNode() {
6252 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6253 DebugLoc DL, const GlobalValue *GA,
6254 EVT VT, int64_t o, unsigned char TF)
6255 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6259 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6260 SDValue X, unsigned SrcAS,
6262 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6263 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6265 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6266 EVT memvt, MachineMemOperand *mmo)
6267 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6268 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6269 MMO->isNonTemporal(), MMO->isInvariant());
6270 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6271 assert(isNonTemporal() == MMO->isNonTemporal() &&
6272 "Non-temporal encoding error!");
6273 // We check here that the size of the memory operand fits within the size of
6274 // the MMO. This is because the MMO might indicate only a possible address
6275 // range instead of specifying the affected memory addresses precisely.
6276 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6279 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6280 ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
6281 : SDNode(Opc, Order, dl, VTs, Ops),
6282 MemoryVT(memvt), MMO(mmo) {
6283 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6284 MMO->isNonTemporal(), MMO->isInvariant());
6285 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6286 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6289 /// Profile - Gather unique data for the node.
6291 void SDNode::Profile(FoldingSetNodeID &ID) const {
6292 AddNodeIDNode(ID, this);
6297 std::vector<EVT> VTs;
6300 VTs.reserve(MVT::LAST_VALUETYPE);
6301 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6302 VTs.push_back(MVT((MVT::SimpleValueType)i));
6307 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6308 static ManagedStatic<EVTArray> SimpleVTArray;
6309 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6311 /// getValueTypeList - Return a pointer to the specified value type.
6313 const EVT *SDNode::getValueTypeList(EVT VT) {
6314 if (VT.isExtended()) {
6315 sys::SmartScopedLock<true> Lock(*VTMutex);
6316 return &(*EVTs->insert(VT).first);
6318 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6319 "Value type out of range!");
6320 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6324 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6325 /// indicated value. This method ignores uses of other values defined by this
6327 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6328 assert(Value < getNumValues() && "Bad value!");
6330 // TODO: Only iterate over uses of a given value of the node
6331 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6332 if (UI.getUse().getResNo() == Value) {
6339 // Found exactly the right number of uses?
6344 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6345 /// value. This method ignores uses of other values defined by this operation.
6346 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6347 assert(Value < getNumValues() && "Bad value!");
6349 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6350 if (UI.getUse().getResNo() == Value)
6357 /// isOnlyUserOf - Return true if this node is the only use of N.
6359 bool SDNode::isOnlyUserOf(SDNode *N) const {
6361 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6372 /// isOperand - Return true if this node is an operand of N.
6374 bool SDValue::isOperandOf(SDNode *N) const {
6375 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6376 if (*this == N->getOperand(i))
6381 bool SDNode::isOperandOf(SDNode *N) const {
6382 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6383 if (this == N->OperandList[i].getNode())
6388 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6389 /// be a chain) reaches the specified operand without crossing any
6390 /// side-effecting instructions on any chain path. In practice, this looks
6391 /// through token factors and non-volatile loads. In order to remain efficient,
6392 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6393 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6394 unsigned Depth) const {
6395 if (*this == Dest) return true;
6397 // Don't search too deeply, we just want to be able to see through
6398 // TokenFactor's etc.
6399 if (Depth == 0) return false;
6401 // If this is a token factor, all inputs to the TF happen in parallel. If any
6402 // of the operands of the TF does not reach dest, then we cannot do the xform.
6403 if (getOpcode() == ISD::TokenFactor) {
6404 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6405 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6410 // Loads don't have side effects, look through them.
6411 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6412 if (!Ld->isVolatile())
6413 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6418 /// hasPredecessor - Return true if N is a predecessor of this node.
6419 /// N is either an operand of this node, or can be reached by recursively
6420 /// traversing up the operands.
6421 /// NOTE: This is an expensive method. Use it carefully.
6422 bool SDNode::hasPredecessor(const SDNode *N) const {
6423 SmallPtrSet<const SDNode *, 32> Visited;
6424 SmallVector<const SDNode *, 16> Worklist;
6425 return hasPredecessorHelper(N, Visited, Worklist);
6429 SDNode::hasPredecessorHelper(const SDNode *N,
6430 SmallPtrSetImpl<const SDNode *> &Visited,
6431 SmallVectorImpl<const SDNode *> &Worklist) const {
6432 if (Visited.empty()) {
6433 Worklist.push_back(this);
6435 // Take a look in the visited set. If we've already encountered this node
6436 // we needn't search further.
6437 if (Visited.count(N))
6441 // Haven't visited N yet. Continue the search.
6442 while (!Worklist.empty()) {
6443 const SDNode *M = Worklist.pop_back_val();
6444 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6445 SDNode *Op = M->getOperand(i).getNode();
6446 if (Visited.insert(Op).second)
6447 Worklist.push_back(Op);
6456 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6457 assert(Num < NumOperands && "Invalid child # of SDNode!");
6458 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6461 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6462 assert(N->getNumValues() == 1 &&
6463 "Can't unroll a vector with multiple results!");
6465 EVT VT = N->getValueType(0);
6466 unsigned NE = VT.getVectorNumElements();
6467 EVT EltVT = VT.getVectorElementType();
6470 SmallVector<SDValue, 8> Scalars;
6471 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6473 // If ResNE is 0, fully unroll the vector op.
6476 else if (NE > ResNE)
6480 for (i= 0; i != NE; ++i) {
6481 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6482 SDValue Operand = N->getOperand(j);
6483 EVT OperandVT = Operand.getValueType();
6484 if (OperandVT.isVector()) {
6485 // A vector operand; extract a single element.
6486 EVT OperandEltVT = OperandVT.getVectorElementType();
6487 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6490 getConstant(i, TLI->getVectorIdxTy()));
6492 // A scalar operand; just use it as is.
6493 Operands[j] = Operand;
6497 switch (N->getOpcode()) {
6499 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
6502 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
6509 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6510 getShiftAmountOperand(Operands[0].getValueType(),
6513 case ISD::SIGN_EXTEND_INREG:
6514 case ISD::FP_ROUND_INREG: {
6515 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6516 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6518 getValueType(ExtVT)));
6523 for (; i < ResNE; ++i)
6524 Scalars.push_back(getUNDEF(EltVT));
6526 return getNode(ISD::BUILD_VECTOR, dl,
6527 EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
6531 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6532 /// location that is 'Dist' units away from the location that the 'Base' load
6533 /// is loading from.
6534 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6535 unsigned Bytes, int Dist) const {
6536 if (LD->getChain() != Base->getChain())
6538 EVT VT = LD->getValueType(0);
6539 if (VT.getSizeInBits() / 8 != Bytes)
6542 SDValue Loc = LD->getOperand(1);
6543 SDValue BaseLoc = Base->getOperand(1);
6544 if (Loc.getOpcode() == ISD::FrameIndex) {
6545 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6547 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6548 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6549 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6550 int FS = MFI->getObjectSize(FI);
6551 int BFS = MFI->getObjectSize(BFI);
6552 if (FS != BFS || FS != (int)Bytes) return false;
6553 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6557 if (isBaseWithConstantOffset(Loc)) {
6558 int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
6559 if (Loc.getOperand(0) == BaseLoc) {
6560 // If the base location is a simple address with no offset itself, then
6561 // the second load's first add operand should be the base address.
6562 if (LocOffset == Dist * (int)Bytes)
6564 } else if (isBaseWithConstantOffset(BaseLoc)) {
6565 // The base location itself has an offset, so subtract that value from the
6566 // second load's offset before comparing to distance * size.
6568 cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
6569 if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
6570 if ((LocOffset - BOffset) == Dist * (int)Bytes)
6575 const GlobalValue *GV1 = nullptr;
6576 const GlobalValue *GV2 = nullptr;
6577 int64_t Offset1 = 0;
6578 int64_t Offset2 = 0;
6579 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6580 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6581 if (isGA1 && isGA2 && GV1 == GV2)
6582 return Offset1 == (Offset2 + Dist*Bytes);
6587 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6588 /// it cannot be inferred.
6589 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6590 // If this is a GlobalAddress + cst, return the alignment.
6591 const GlobalValue *GV;
6592 int64_t GVOffset = 0;
6593 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6594 unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
6595 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6596 llvm::computeKnownBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6597 TLI->getDataLayout());
6598 unsigned AlignBits = KnownZero.countTrailingOnes();
6599 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6601 return MinAlign(Align, GVOffset);
6604 // If this is a direct reference to a stack slot, use information about the
6605 // stack slot's alignment.
6606 int FrameIdx = 1 << 31;
6607 int64_t FrameOffset = 0;
6608 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6609 FrameIdx = FI->getIndex();
6610 } else if (isBaseWithConstantOffset(Ptr) &&
6611 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6613 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6614 FrameOffset = Ptr.getConstantOperandVal(1);
6617 if (FrameIdx != (1 << 31)) {
6618 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6619 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6627 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
6628 /// which is split (or expanded) into two not necessarily identical pieces.
6629 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
6630 // Currently all types are split in half.
6632 if (!VT.isVector()) {
6633 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
6635 unsigned NumElements = VT.getVectorNumElements();
6636 assert(!(NumElements & 1) && "Splitting vector, but not in half!");
6637 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
6640 return std::make_pair(LoVT, HiVT);
6643 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
6645 std::pair<SDValue, SDValue>
6646 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
6648 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
6649 N.getValueType().getVectorNumElements() &&
6650 "More vector elements requested than available!");
6652 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
6653 getConstant(0, TLI->getVectorIdxTy()));
6654 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
6655 getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy()));
6656 return std::make_pair(Lo, Hi);
6659 void SelectionDAG::ExtractVectorElements(SDValue Op,
6660 SmallVectorImpl<SDValue> &Args,
6661 unsigned Start, unsigned Count) {
6662 EVT VT = Op.getValueType();
6664 Count = VT.getVectorNumElements();
6666 EVT EltVT = VT.getVectorElementType();
6667 EVT IdxTy = TLI->getVectorIdxTy();
6669 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
6670 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6671 Op, getConstant(i, IdxTy)));
6675 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6676 unsigned GlobalAddressSDNode::getAddressSpace() const {
6677 return getGlobal()->getType()->getAddressSpace();
6681 Type *ConstantPoolSDNode::getType() const {
6682 if (isMachineConstantPoolEntry())
6683 return Val.MachineCPVal->getType();
6684 return Val.ConstVal->getType();
6687 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6689 unsigned &SplatBitSize,
6691 unsigned MinSplatBits,
6692 bool isBigEndian) const {
6693 EVT VT = getValueType(0);
6694 assert(VT.isVector() && "Expected a vector type");
6695 unsigned sz = VT.getSizeInBits();
6696 if (MinSplatBits > sz)
6699 SplatValue = APInt(sz, 0);
6700 SplatUndef = APInt(sz, 0);
6702 // Get the bits. Bits with undefined values (when the corresponding element
6703 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6704 // in SplatValue. If any of the values are not constant, give up and return
6706 unsigned int nOps = getNumOperands();
6707 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6708 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6710 for (unsigned j = 0; j < nOps; ++j) {
6711 unsigned i = isBigEndian ? nOps-1-j : j;
6712 SDValue OpVal = getOperand(i);
6713 unsigned BitPos = j * EltBitSize;
6715 if (OpVal.getOpcode() == ISD::UNDEF)
6716 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6717 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6718 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6719 zextOrTrunc(sz) << BitPos;
6720 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6721 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6726 // The build_vector is all constants or undefs. Find the smallest element
6727 // size that splats the vector.
6729 HasAnyUndefs = (SplatUndef != 0);
6732 unsigned HalfSize = sz / 2;
6733 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6734 APInt LowValue = SplatValue.trunc(HalfSize);
6735 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6736 APInt LowUndef = SplatUndef.trunc(HalfSize);
6738 // If the two halves do not match (ignoring undef bits), stop here.
6739 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6740 MinSplatBits > HalfSize)
6743 SplatValue = HighValue | LowValue;
6744 SplatUndef = HighUndef & LowUndef;
6753 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
6754 if (UndefElements) {
6755 UndefElements->clear();
6756 UndefElements->resize(getNumOperands());
6759 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6760 SDValue Op = getOperand(i);
6761 if (Op.getOpcode() == ISD::UNDEF) {
6763 (*UndefElements)[i] = true;
6764 } else if (!Splatted) {
6766 } else if (Splatted != Op) {
6772 assert(getOperand(0).getOpcode() == ISD::UNDEF &&
6773 "Can only have a splat without a constant for all undefs.");
6774 return getOperand(0);
6781 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
6782 return dyn_cast_or_null<ConstantSDNode>(
6783 getSplatValue(UndefElements).getNode());
6787 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
6788 return dyn_cast_or_null<ConstantFPSDNode>(
6789 getSplatValue(UndefElements).getNode());
6792 bool BuildVectorSDNode::isConstant() const {
6793 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6794 unsigned Opc = getOperand(i).getOpcode();
6795 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
6801 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6802 // Find the first non-undef value in the shuffle mask.
6804 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6807 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6809 // Make sure all remaining elements are either undef or the same as the first
6811 for (int Idx = Mask[i]; i != e; ++i)
6812 if (Mask[i] >= 0 && Mask[i] != Idx)
6818 static void checkForCyclesHelper(const SDNode *N,
6819 SmallPtrSetImpl<const SDNode*> &Visited,
6820 SmallPtrSetImpl<const SDNode*> &Checked,
6821 const llvm::SelectionDAG *DAG) {
6822 // If this node has already been checked, don't check it again.
6823 if (Checked.count(N))
6826 // If a node has already been visited on this depth-first walk, reject it as
6828 if (!Visited.insert(N).second) {
6829 errs() << "Detected cycle in SelectionDAG\n";
6830 dbgs() << "Offending node:\n";
6831 N->dumprFull(DAG); dbgs() << "\n";
6835 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6836 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked, DAG);
6843 void llvm::checkForCycles(const llvm::SDNode *N,
6844 const llvm::SelectionDAG *DAG,
6852 assert(N && "Checking nonexistent SDNode");
6853 SmallPtrSet<const SDNode*, 32> visited;
6854 SmallPtrSet<const SDNode*, 32> checked;
6855 checkForCyclesHelper(N, visited, checked, DAG);
6860 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
6861 checkForCycles(DAG->getRoot().getNode(), DAG, force);