1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DebugInfo.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/ManagedStatic.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Mutex.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetInstrInfo.h"
43 #include "llvm/Target/TargetIntrinsicInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSelectionDAGInfo.h"
53 /// makeVTList - Return an instance of the SDVTList struct initialized with the
54 /// specified members.
55 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
56 SDVTList Res = {VTs, NumVTs};
60 // Default null implementations of the callbacks.
61 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
62 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
64 //===----------------------------------------------------------------------===//
65 // ConstantFPSDNode Class
66 //===----------------------------------------------------------------------===//
68 /// isExactlyValue - We don't rely on operator== working on double values, as
69 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
70 /// As such, this method can be used to do an exact bit-for-bit comparison of
71 /// two floating point values.
72 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
73 return getValueAPF().bitwiseIsEqual(V);
76 bool ConstantFPSDNode::isValueValidForType(EVT VT,
78 assert(VT.isFloatingPoint() && "Can only convert between FP types");
80 // convert modifies in place, so make a copy.
81 APFloat Val2 = APFloat(Val);
83 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
84 APFloat::rmNearestTiesToEven,
89 //===----------------------------------------------------------------------===//
91 //===----------------------------------------------------------------------===//
93 /// isBuildVectorAllOnes - Return true if the specified node is a
94 /// BUILD_VECTOR where all of the elements are ~0 or undef.
95 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
96 // Look through a bit convert.
97 if (N->getOpcode() == ISD::BITCAST)
98 N = N->getOperand(0).getNode();
100 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
102 unsigned i = 0, e = N->getNumOperands();
104 // Skip over all of the undef values.
105 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
108 // Do not accept an all-undef vector.
109 if (i == e) return false;
111 // Do not accept build_vectors that aren't all constants or which have non-~0
112 // elements. We have to be a bit careful here, as the type of the constant
113 // may not be the same as the type of the vector elements due to type
114 // legalization (the elements are promoted to a legal type for the target and
115 // a vector of a type may be legal when the base element type is not).
116 // We only want to check enough bits to cover the vector elements, because
117 // we care if the resultant vector is all ones, not whether the individual
119 SDValue NotZero = N->getOperand(i);
120 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
121 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
122 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
124 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
125 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
130 // Okay, we have at least one ~0 value, check to see if the rest match or are
131 // undefs. Even with the above element type twiddling, this should be OK, as
132 // the same type legalization should have applied to all the elements.
133 for (++i; i != e; ++i)
134 if (N->getOperand(i) != NotZero &&
135 N->getOperand(i).getOpcode() != ISD::UNDEF)
141 /// isBuildVectorAllZeros - Return true if the specified node is a
142 /// BUILD_VECTOR where all of the elements are 0 or undef.
143 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
144 // Look through a bit convert.
145 if (N->getOpcode() == ISD::BITCAST)
146 N = N->getOperand(0).getNode();
148 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
150 unsigned i = 0, e = N->getNumOperands();
152 // Skip over all of the undef values.
153 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
156 // Do not accept an all-undef vector.
157 if (i == e) return false;
159 // Do not accept build_vectors that aren't all constants or which have non-0
161 SDValue Zero = N->getOperand(i);
162 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
163 if (!CN->isNullValue())
165 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
166 if (!CFPN->getValueAPF().isPosZero())
171 // Okay, we have at least one 0 value, check to see if the rest match or are
173 for (++i; i != e; ++i)
174 if (N->getOperand(i) != Zero &&
175 N->getOperand(i).getOpcode() != ISD::UNDEF)
180 /// \brief Return true if the specified node is a BUILD_VECTOR node of
181 /// all ConstantSDNode or undef.
182 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
183 if (N->getOpcode() != ISD::BUILD_VECTOR)
186 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
187 SDValue Op = N->getOperand(i);
188 if (Op.getOpcode() == ISD::UNDEF)
190 if (!isa<ConstantSDNode>(Op))
196 /// isScalarToVector - Return true if the specified node is a
197 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
198 /// element is not an undef.
199 bool ISD::isScalarToVector(const SDNode *N) {
200 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
203 if (N->getOpcode() != ISD::BUILD_VECTOR)
205 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
207 unsigned NumElems = N->getNumOperands();
210 for (unsigned i = 1; i < NumElems; ++i) {
211 SDValue V = N->getOperand(i);
212 if (V.getOpcode() != ISD::UNDEF)
218 /// allOperandsUndef - Return true if the node has at least one operand
219 /// and all operands of the specified node are ISD::UNDEF.
220 bool ISD::allOperandsUndef(const SDNode *N) {
221 // Return false if the node has no operands.
222 // This is "logically inconsistent" with the definition of "all" but
223 // is probably the desired behavior.
224 if (N->getNumOperands() == 0)
227 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
228 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
234 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) {
237 return ISD::ANY_EXTEND;
239 return ISD::SIGN_EXTEND;
241 return ISD::ZERO_EXTEND;
246 llvm_unreachable("Invalid LoadExtType");
249 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
250 /// when given the operation for (X op Y).
251 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
252 // To perform this operation, we just need to swap the L and G bits of the
254 unsigned OldL = (Operation >> 2) & 1;
255 unsigned OldG = (Operation >> 1) & 1;
256 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
257 (OldL << 1) | // New G bit
258 (OldG << 2)); // New L bit.
261 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
262 /// 'op' is a valid SetCC operation.
263 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
264 unsigned Operation = Op;
266 Operation ^= 7; // Flip L, G, E bits, but not U.
268 Operation ^= 15; // Flip all of the condition bits.
270 if (Operation > ISD::SETTRUE2)
271 Operation &= ~8; // Don't let N and U bits get set.
273 return ISD::CondCode(Operation);
277 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
278 /// signed operation and 2 if the result is an unsigned comparison. Return zero
279 /// if the operation does not depend on the sign of the input (setne and seteq).
280 static int isSignedOp(ISD::CondCode Opcode) {
282 default: llvm_unreachable("Illegal integer setcc operation!");
284 case ISD::SETNE: return 0;
288 case ISD::SETGE: return 1;
292 case ISD::SETUGE: return 2;
296 /// getSetCCOrOperation - Return the result of a logical OR between different
297 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
298 /// returns SETCC_INVALID if it is not possible to represent the resultant
300 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
302 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
303 // Cannot fold a signed integer setcc with an unsigned integer setcc.
304 return ISD::SETCC_INVALID;
306 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
308 // If the N and U bits get set then the resultant comparison DOES suddenly
309 // care about orderedness, and is true when ordered.
310 if (Op > ISD::SETTRUE2)
311 Op &= ~16; // Clear the U bit if the N bit is set.
313 // Canonicalize illegal integer setcc's.
314 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
317 return ISD::CondCode(Op);
320 /// getSetCCAndOperation - Return the result of a logical AND between different
321 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
322 /// function returns zero if it is not possible to represent the resultant
324 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
326 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
327 // Cannot fold a signed setcc with an unsigned setcc.
328 return ISD::SETCC_INVALID;
330 // Combine all of the condition bits.
331 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
333 // Canonicalize illegal integer setcc's.
337 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
338 case ISD::SETOEQ: // SETEQ & SETU[LG]E
339 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
340 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
341 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
348 //===----------------------------------------------------------------------===//
349 // SDNode Profile Support
350 //===----------------------------------------------------------------------===//
352 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
354 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
358 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
359 /// solely with their pointer.
360 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
361 ID.AddPointer(VTList.VTs);
364 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
366 static void AddNodeIDOperands(FoldingSetNodeID &ID,
367 const SDValue *Ops, unsigned NumOps) {
368 for (; NumOps; --NumOps, ++Ops) {
369 ID.AddPointer(Ops->getNode());
370 ID.AddInteger(Ops->getResNo());
374 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
376 static void AddNodeIDOperands(FoldingSetNodeID &ID,
377 const SDUse *Ops, unsigned NumOps) {
378 for (; NumOps; --NumOps, ++Ops) {
379 ID.AddPointer(Ops->getNode());
380 ID.AddInteger(Ops->getResNo());
384 static void AddNodeIDNode(FoldingSetNodeID &ID,
385 unsigned short OpC, SDVTList VTList,
386 const SDValue *OpList, unsigned N) {
387 AddNodeIDOpcode(ID, OpC);
388 AddNodeIDValueTypes(ID, VTList);
389 AddNodeIDOperands(ID, OpList, N);
392 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
394 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
395 switch (N->getOpcode()) {
396 case ISD::TargetExternalSymbol:
397 case ISD::ExternalSymbol:
398 llvm_unreachable("Should only be used on nodes with operands");
399 default: break; // Normal nodes don't need extra info.
400 case ISD::TargetConstant:
401 case ISD::Constant: {
402 const ConstantSDNode *C = cast<ConstantSDNode>(N);
403 ID.AddPointer(C->getConstantIntValue());
404 ID.AddBoolean(C->isOpaque());
407 case ISD::TargetConstantFP:
408 case ISD::ConstantFP: {
409 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
412 case ISD::TargetGlobalAddress:
413 case ISD::GlobalAddress:
414 case ISD::TargetGlobalTLSAddress:
415 case ISD::GlobalTLSAddress: {
416 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
417 ID.AddPointer(GA->getGlobal());
418 ID.AddInteger(GA->getOffset());
419 ID.AddInteger(GA->getTargetFlags());
420 ID.AddInteger(GA->getAddressSpace());
423 case ISD::BasicBlock:
424 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
427 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
429 case ISD::RegisterMask:
430 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
433 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
435 case ISD::FrameIndex:
436 case ISD::TargetFrameIndex:
437 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
440 case ISD::TargetJumpTable:
441 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
442 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
444 case ISD::ConstantPool:
445 case ISD::TargetConstantPool: {
446 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
447 ID.AddInteger(CP->getAlignment());
448 ID.AddInteger(CP->getOffset());
449 if (CP->isMachineConstantPoolEntry())
450 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
452 ID.AddPointer(CP->getConstVal());
453 ID.AddInteger(CP->getTargetFlags());
456 case ISD::TargetIndex: {
457 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
458 ID.AddInteger(TI->getIndex());
459 ID.AddInteger(TI->getOffset());
460 ID.AddInteger(TI->getTargetFlags());
464 const LoadSDNode *LD = cast<LoadSDNode>(N);
465 ID.AddInteger(LD->getMemoryVT().getRawBits());
466 ID.AddInteger(LD->getRawSubclassData());
467 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
471 const StoreSDNode *ST = cast<StoreSDNode>(N);
472 ID.AddInteger(ST->getMemoryVT().getRawBits());
473 ID.AddInteger(ST->getRawSubclassData());
474 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
477 case ISD::ATOMIC_CMP_SWAP:
478 case ISD::ATOMIC_SWAP:
479 case ISD::ATOMIC_LOAD_ADD:
480 case ISD::ATOMIC_LOAD_SUB:
481 case ISD::ATOMIC_LOAD_AND:
482 case ISD::ATOMIC_LOAD_OR:
483 case ISD::ATOMIC_LOAD_XOR:
484 case ISD::ATOMIC_LOAD_NAND:
485 case ISD::ATOMIC_LOAD_MIN:
486 case ISD::ATOMIC_LOAD_MAX:
487 case ISD::ATOMIC_LOAD_UMIN:
488 case ISD::ATOMIC_LOAD_UMAX:
489 case ISD::ATOMIC_LOAD:
490 case ISD::ATOMIC_STORE: {
491 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
492 ID.AddInteger(AT->getMemoryVT().getRawBits());
493 ID.AddInteger(AT->getRawSubclassData());
494 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
497 case ISD::PREFETCH: {
498 const MemSDNode *PF = cast<MemSDNode>(N);
499 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
502 case ISD::VECTOR_SHUFFLE: {
503 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
504 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
506 ID.AddInteger(SVN->getMaskElt(i));
509 case ISD::TargetBlockAddress:
510 case ISD::BlockAddress: {
511 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
512 ID.AddPointer(BA->getBlockAddress());
513 ID.AddInteger(BA->getOffset());
514 ID.AddInteger(BA->getTargetFlags());
517 } // end switch (N->getOpcode())
519 // Target specific memory nodes could also have address spaces to check.
520 if (N->isTargetMemoryOpcode())
521 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
524 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
526 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
527 AddNodeIDOpcode(ID, N->getOpcode());
528 // Add the return value info.
529 AddNodeIDValueTypes(ID, N->getVTList());
530 // Add the operand info.
531 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
533 // Handle SDNode leafs with special info.
534 AddNodeIDCustom(ID, N);
537 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
538 /// the CSE map that carries volatility, temporalness, indexing mode, and
539 /// extension/truncation information.
541 static inline unsigned
542 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
543 bool isNonTemporal, bool isInvariant) {
544 assert((ConvType & 3) == ConvType &&
545 "ConvType may not require more than 2 bits!");
546 assert((AM & 7) == AM &&
547 "AM may not require more than 3 bits!");
551 (isNonTemporal << 6) |
555 //===----------------------------------------------------------------------===//
556 // SelectionDAG Class
557 //===----------------------------------------------------------------------===//
559 /// doNotCSE - Return true if CSE should not be performed for this node.
560 static bool doNotCSE(SDNode *N) {
561 if (N->getValueType(0) == MVT::Glue)
562 return true; // Never CSE anything that produces a flag.
564 switch (N->getOpcode()) {
566 case ISD::HANDLENODE:
568 return true; // Never CSE these nodes.
571 // Check that remaining values produced are not flags.
572 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
573 if (N->getValueType(i) == MVT::Glue)
574 return true; // Never CSE anything that produces a flag.
579 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
581 void SelectionDAG::RemoveDeadNodes() {
582 // Create a dummy node (which is not added to allnodes), that adds a reference
583 // to the root node, preventing it from being deleted.
584 HandleSDNode Dummy(getRoot());
586 SmallVector<SDNode*, 128> DeadNodes;
588 // Add all obviously-dead nodes to the DeadNodes worklist.
589 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
591 DeadNodes.push_back(I);
593 RemoveDeadNodes(DeadNodes);
595 // If the root changed (e.g. it was a dead load, update the root).
596 setRoot(Dummy.getValue());
599 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
600 /// given list, and any nodes that become unreachable as a result.
601 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
603 // Process the worklist, deleting the nodes and adding their uses to the
605 while (!DeadNodes.empty()) {
606 SDNode *N = DeadNodes.pop_back_val();
608 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
609 DUL->NodeDeleted(N, nullptr);
611 // Take the node out of the appropriate CSE map.
612 RemoveNodeFromCSEMaps(N);
614 // Next, brutally remove the operand list. This is safe to do, as there are
615 // no cycles in the graph.
616 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
618 SDNode *Operand = Use.getNode();
621 // Now that we removed this operand, see if there are no uses of it left.
622 if (Operand->use_empty())
623 DeadNodes.push_back(Operand);
630 void SelectionDAG::RemoveDeadNode(SDNode *N){
631 SmallVector<SDNode*, 16> DeadNodes(1, N);
633 // Create a dummy node that adds a reference to the root node, preventing
634 // it from being deleted. (This matters if the root is an operand of the
636 HandleSDNode Dummy(getRoot());
638 RemoveDeadNodes(DeadNodes);
641 void SelectionDAG::DeleteNode(SDNode *N) {
642 // First take this out of the appropriate CSE map.
643 RemoveNodeFromCSEMaps(N);
645 // Finally, remove uses due to operands of this node, remove from the
646 // AllNodes list, and delete the node.
647 DeleteNodeNotInCSEMaps(N);
650 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
651 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
652 assert(N->use_empty() && "Cannot delete a node that is not dead!");
654 // Drop all of the operands and decrement used node's use counts.
660 void SelectionDAG::DeallocateNode(SDNode *N) {
661 if (N->OperandsNeedDelete)
662 delete[] N->OperandList;
664 // Set the opcode to DELETED_NODE to help catch bugs when node
665 // memory is reallocated.
666 N->NodeType = ISD::DELETED_NODE;
668 NodeAllocator.Deallocate(AllNodes.remove(N));
670 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
671 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
672 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
673 DbgVals[i]->setIsInvalidated();
676 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
677 /// correspond to it. This is useful when we're about to delete or repurpose
678 /// the node. We don't want future request for structurally identical nodes
679 /// to return N anymore.
680 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
682 switch (N->getOpcode()) {
683 case ISD::HANDLENODE: return false; // noop.
685 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
686 "Cond code doesn't exist!");
687 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
688 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
690 case ISD::ExternalSymbol:
691 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
693 case ISD::TargetExternalSymbol: {
694 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
695 Erased = TargetExternalSymbols.erase(
696 std::pair<std::string,unsigned char>(ESN->getSymbol(),
697 ESN->getTargetFlags()));
700 case ISD::VALUETYPE: {
701 EVT VT = cast<VTSDNode>(N)->getVT();
702 if (VT.isExtended()) {
703 Erased = ExtendedValueTypeNodes.erase(VT);
705 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
706 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
711 // Remove it from the CSE Map.
712 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
713 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
714 Erased = CSEMap.RemoveNode(N);
718 // Verify that the node was actually in one of the CSE maps, unless it has a
719 // flag result (which cannot be CSE'd) or is one of the special cases that are
720 // not subject to CSE.
721 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
722 !N->isMachineOpcode() && !doNotCSE(N)) {
725 llvm_unreachable("Node is not in map!");
731 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
732 /// maps and modified in place. Add it back to the CSE maps, unless an identical
733 /// node already exists, in which case transfer all its users to the existing
734 /// node. This transfer can potentially trigger recursive merging.
737 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
738 // For node types that aren't CSE'd, just act as if no identical node
741 SDNode *Existing = CSEMap.GetOrInsertNode(N);
743 // If there was already an existing matching node, use ReplaceAllUsesWith
744 // to replace the dead one with the existing one. This can cause
745 // recursive merging of other unrelated nodes down the line.
746 ReplaceAllUsesWith(N, Existing);
748 // N is now dead. Inform the listeners and delete it.
749 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
750 DUL->NodeDeleted(N, Existing);
751 DeleteNodeNotInCSEMaps(N);
756 // If the node doesn't already exist, we updated it. Inform listeners.
757 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
761 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
762 /// were replaced with those specified. If this node is never memoized,
763 /// return null, otherwise return a pointer to the slot it would take. If a
764 /// node already exists with these operands, the slot will be non-null.
765 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
770 SDValue Ops[] = { Op };
772 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
773 AddNodeIDCustom(ID, N);
774 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
778 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
779 /// were replaced with those specified. If this node is never memoized,
780 /// return null, otherwise return a pointer to the slot it would take. If a
781 /// node already exists with these operands, the slot will be non-null.
782 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
783 SDValue Op1, SDValue Op2,
788 SDValue Ops[] = { Op1, Op2 };
790 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
791 AddNodeIDCustom(ID, N);
792 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
797 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
798 /// were replaced with those specified. If this node is never memoized,
799 /// return null, otherwise return a pointer to the slot it would take. If a
800 /// node already exists with these operands, the slot will be non-null.
801 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
802 const SDValue *Ops,unsigned NumOps,
808 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
809 AddNodeIDCustom(ID, N);
810 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
815 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
816 static void VerifyNodeCommon(SDNode *N) {
817 switch (N->getOpcode()) {
820 case ISD::BUILD_PAIR: {
821 EVT VT = N->getValueType(0);
822 assert(N->getNumValues() == 1 && "Too many results!");
823 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
824 "Wrong return type!");
825 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
826 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
827 "Mismatched operand types!");
828 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
829 "Wrong operand type!");
830 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
831 "Wrong return type size");
834 case ISD::BUILD_VECTOR: {
835 assert(N->getNumValues() == 1 && "Too many results!");
836 assert(N->getValueType(0).isVector() && "Wrong return type!");
837 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
838 "Wrong number of operands!");
839 EVT EltVT = N->getValueType(0).getVectorElementType();
840 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
841 assert((I->getValueType() == EltVT ||
842 (EltVT.isInteger() && I->getValueType().isInteger() &&
843 EltVT.bitsLE(I->getValueType()))) &&
844 "Wrong operand type!");
845 assert(I->getValueType() == N->getOperand(0).getValueType() &&
846 "Operands must all have the same type");
853 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
854 static void VerifySDNode(SDNode *N) {
855 // The SDNode allocators cannot be used to allocate nodes with fields that are
856 // not present in an SDNode!
857 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
858 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
859 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
860 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
861 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
862 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
863 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
864 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
865 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
866 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
867 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
868 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
869 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
870 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
871 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
872 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
873 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
874 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
875 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
880 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
882 static void VerifyMachineNode(SDNode *N) {
883 // The MachineNode allocators cannot be used to allocate nodes with fields
884 // that are not present in a MachineNode!
885 // Currently there are no such nodes.
891 /// getEVTAlignment - Compute the default alignment value for the
894 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
895 Type *Ty = VT == MVT::iPTR ?
896 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
897 VT.getTypeForEVT(*getContext());
899 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
902 // EntryNode could meaningfully have debug info if we can find it...
903 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
904 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(nullptr), OptLevel(OL),
905 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
906 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
907 UpdateListeners(nullptr) {
908 AllNodes.push_back(&EntryNode);
909 DbgInfo = new SDDbgInfo();
912 void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) {
915 Context = &mf.getFunction()->getContext();
918 SelectionDAG::~SelectionDAG() {
919 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
924 void SelectionDAG::allnodes_clear() {
925 assert(&*AllNodes.begin() == &EntryNode);
926 AllNodes.remove(AllNodes.begin());
927 while (!AllNodes.empty())
928 DeallocateNode(AllNodes.begin());
931 void SelectionDAG::clear() {
933 OperandAllocator.Reset();
936 ExtendedValueTypeNodes.clear();
937 ExternalSymbols.clear();
938 TargetExternalSymbols.clear();
939 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
940 static_cast<CondCodeSDNode*>(nullptr));
941 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
942 static_cast<SDNode*>(nullptr));
944 EntryNode.UseList = nullptr;
945 AllNodes.push_back(&EntryNode);
946 Root = getEntryNode();
950 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
951 return VT.bitsGT(Op.getValueType()) ?
952 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
953 getNode(ISD::TRUNCATE, DL, VT, Op);
956 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
957 return VT.bitsGT(Op.getValueType()) ?
958 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
959 getNode(ISD::TRUNCATE, DL, VT, Op);
962 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
963 return VT.bitsGT(Op.getValueType()) ?
964 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
965 getNode(ISD::TRUNCATE, DL, VT, Op);
968 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
969 assert(!VT.isVector() &&
970 "getZeroExtendInReg should use the vector element type instead of "
972 if (Op.getValueType() == VT) return Op;
973 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
974 APInt Imm = APInt::getLowBitsSet(BitWidth,
976 return getNode(ISD::AND, DL, Op.getValueType(), Op,
977 getConstant(Imm, Op.getValueType()));
980 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
982 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
983 EVT EltVT = VT.getScalarType();
985 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
986 return getNode(ISD::XOR, DL, VT, Val, NegOne);
989 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
990 EVT EltVT = VT.getScalarType();
991 assert((EltVT.getSizeInBits() >= 64 ||
992 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
993 "getConstant with a uint64_t value that doesn't fit in the type!");
994 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
997 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
999 return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
1002 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
1004 assert(VT.isInteger() && "Cannot create FP integer constant!");
1006 EVT EltVT = VT.getScalarType();
1007 const ConstantInt *Elt = &Val;
1009 const TargetLowering *TLI = TM.getTargetLowering();
1011 // In some cases the vector type is legal but the element type is illegal and
1012 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1013 // inserted value (the type does not need to match the vector element type).
1014 // Any extra bits introduced will be truncated away.
1015 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1016 TargetLowering::TypePromoteInteger) {
1017 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1018 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1019 Elt = ConstantInt::get(*getContext(), NewVal);
1021 // In other cases the element type is illegal and needs to be expanded, for
1022 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1023 // the value into n parts and use a vector type with n-times the elements.
1024 // Then bitcast to the type requested.
1025 // Legalizing constants too early makes the DAGCombiner's job harder so we
1026 // only legalize if the DAG tells us we must produce legal types.
1027 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1028 TLI->getTypeAction(*getContext(), EltVT) ==
1029 TargetLowering::TypeExpandInteger) {
1030 APInt NewVal = Elt->getValue();
1031 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1032 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1033 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1034 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1036 // Check the temporary vector is the correct size. If this fails then
1037 // getTypeToTransformTo() probably returned a type whose size (in bits)
1038 // isn't a power-of-2 factor of the requested type size.
1039 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1041 SmallVector<SDValue, 2> EltParts;
1042 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1043 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1044 .trunc(ViaEltSizeInBits),
1045 ViaEltVT, isT, isO));
1048 // EltParts is currently in little endian order. If we actually want
1049 // big-endian order then reverse it now.
1050 if (TLI->isBigEndian())
1051 std::reverse(EltParts.begin(), EltParts.end());
1053 // The elements must be reversed when the element order is different
1054 // to the endianness of the elements (because the BITCAST is itself a
1055 // vector shuffle in this situation). However, we do not need any code to
1056 // perform this reversal because getConstant() is producing a vector
1058 // This situation occurs in MIPS MSA.
1060 SmallVector<SDValue, 8> Ops;
1061 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1062 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1064 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1065 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1070 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1071 "APInt size does not match type size!");
1072 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1073 FoldingSetNodeID ID;
1074 AddNodeIDNode(ID, Opc, getVTList(EltVT), nullptr, 0);
1078 SDNode *N = nullptr;
1079 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1081 return SDValue(N, 0);
1084 N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
1085 CSEMap.InsertNode(N, IP);
1086 AllNodes.push_back(N);
1089 SDValue Result(N, 0);
1090 if (VT.isVector()) {
1091 SmallVector<SDValue, 8> Ops;
1092 Ops.assign(VT.getVectorNumElements(), Result);
1093 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1098 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1099 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
1103 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1104 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1107 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1108 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1110 EVT EltVT = VT.getScalarType();
1112 // Do the map lookup using the actual bit pattern for the floating point
1113 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1114 // we don't have issues with SNANs.
1115 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1116 FoldingSetNodeID ID;
1117 AddNodeIDNode(ID, Opc, getVTList(EltVT), nullptr, 0);
1120 SDNode *N = nullptr;
1121 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1123 return SDValue(N, 0);
1126 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1127 CSEMap.InsertNode(N, IP);
1128 AllNodes.push_back(N);
1131 SDValue Result(N, 0);
1132 if (VT.isVector()) {
1133 SmallVector<SDValue, 8> Ops;
1134 Ops.assign(VT.getVectorNumElements(), Result);
1135 // FIXME SDLoc info might be appropriate here
1136 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1141 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1142 EVT EltVT = VT.getScalarType();
1143 if (EltVT==MVT::f32)
1144 return getConstantFP(APFloat((float)Val), VT, isTarget);
1145 else if (EltVT==MVT::f64)
1146 return getConstantFP(APFloat(Val), VT, isTarget);
1147 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1150 APFloat apf = APFloat(Val);
1151 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1153 return getConstantFP(apf, VT, isTarget);
1155 llvm_unreachable("Unsupported type in getConstantFP");
1158 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1159 EVT VT, int64_t Offset,
1161 unsigned char TargetFlags) {
1162 assert((TargetFlags == 0 || isTargetGA) &&
1163 "Cannot set target flags on target-independent globals");
1164 const TargetLowering *TLI = TM.getTargetLowering();
1166 // Truncate (with sign-extension) the offset value to the pointer size.
1167 unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
1169 Offset = SignExtend64(Offset, BitWidth);
1171 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1173 // If GV is an alias then use the aliasee for determining thread-localness.
1174 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1175 GVar = dyn_cast_or_null<GlobalVariable>(GA->getAliasedGlobal());
1179 if (GVar && GVar->isThreadLocal())
1180 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1182 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1184 FoldingSetNodeID ID;
1185 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1187 ID.AddInteger(Offset);
1188 ID.AddInteger(TargetFlags);
1189 ID.AddInteger(GV->getType()->getAddressSpace());
1191 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1192 return SDValue(E, 0);
1194 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1195 DL.getDebugLoc(), GV, VT,
1196 Offset, TargetFlags);
1197 CSEMap.InsertNode(N, IP);
1198 AllNodes.push_back(N);
1199 return SDValue(N, 0);
1202 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1203 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1204 FoldingSetNodeID ID;
1205 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1208 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1209 return SDValue(E, 0);
1211 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1212 CSEMap.InsertNode(N, IP);
1213 AllNodes.push_back(N);
1214 return SDValue(N, 0);
1217 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1218 unsigned char TargetFlags) {
1219 assert((TargetFlags == 0 || isTarget) &&
1220 "Cannot set target flags on target-independent jump tables");
1221 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1222 FoldingSetNodeID ID;
1223 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1225 ID.AddInteger(TargetFlags);
1227 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1228 return SDValue(E, 0);
1230 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1232 CSEMap.InsertNode(N, IP);
1233 AllNodes.push_back(N);
1234 return SDValue(N, 0);
1237 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1238 unsigned Alignment, int Offset,
1240 unsigned char TargetFlags) {
1241 assert((TargetFlags == 0 || isTarget) &&
1242 "Cannot set target flags on target-independent globals");
1245 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1246 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1247 FoldingSetNodeID ID;
1248 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1249 ID.AddInteger(Alignment);
1250 ID.AddInteger(Offset);
1252 ID.AddInteger(TargetFlags);
1254 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1255 return SDValue(E, 0);
1257 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1258 Alignment, TargetFlags);
1259 CSEMap.InsertNode(N, IP);
1260 AllNodes.push_back(N);
1261 return SDValue(N, 0);
1265 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1266 unsigned Alignment, int Offset,
1268 unsigned char TargetFlags) {
1269 assert((TargetFlags == 0 || isTarget) &&
1270 "Cannot set target flags on target-independent globals");
1273 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1274 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1275 FoldingSetNodeID ID;
1276 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1277 ID.AddInteger(Alignment);
1278 ID.AddInteger(Offset);
1279 C->addSelectionDAGCSEId(ID);
1280 ID.AddInteger(TargetFlags);
1282 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1283 return SDValue(E, 0);
1285 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1286 Alignment, TargetFlags);
1287 CSEMap.InsertNode(N, IP);
1288 AllNodes.push_back(N);
1289 return SDValue(N, 0);
1292 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1293 unsigned char TargetFlags) {
1294 FoldingSetNodeID ID;
1295 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), nullptr, 0);
1296 ID.AddInteger(Index);
1297 ID.AddInteger(Offset);
1298 ID.AddInteger(TargetFlags);
1300 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1301 return SDValue(E, 0);
1303 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1305 CSEMap.InsertNode(N, IP);
1306 AllNodes.push_back(N);
1307 return SDValue(N, 0);
1310 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1311 FoldingSetNodeID ID;
1312 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), nullptr, 0);
1315 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1316 return SDValue(E, 0);
1318 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1319 CSEMap.InsertNode(N, IP);
1320 AllNodes.push_back(N);
1321 return SDValue(N, 0);
1324 SDValue SelectionDAG::getValueType(EVT VT) {
1325 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1326 ValueTypeNodes.size())
1327 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1329 SDNode *&N = VT.isExtended() ?
1330 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1332 if (N) return SDValue(N, 0);
1333 N = new (NodeAllocator) VTSDNode(VT);
1334 AllNodes.push_back(N);
1335 return SDValue(N, 0);
1338 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1339 SDNode *&N = ExternalSymbols[Sym];
1340 if (N) return SDValue(N, 0);
1341 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1342 AllNodes.push_back(N);
1343 return SDValue(N, 0);
1346 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1347 unsigned char TargetFlags) {
1349 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1351 if (N) return SDValue(N, 0);
1352 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1353 AllNodes.push_back(N);
1354 return SDValue(N, 0);
1357 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1358 if ((unsigned)Cond >= CondCodeNodes.size())
1359 CondCodeNodes.resize(Cond+1);
1361 if (!CondCodeNodes[Cond]) {
1362 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1363 CondCodeNodes[Cond] = N;
1364 AllNodes.push_back(N);
1367 return SDValue(CondCodeNodes[Cond], 0);
1370 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1371 // the shuffle mask M that point at N1 to point at N2, and indices that point
1372 // N2 to point at N1.
1373 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1375 int NElts = M.size();
1376 for (int i = 0; i != NElts; ++i) {
1384 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1385 SDValue N2, const int *Mask) {
1386 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1387 "Invalid VECTOR_SHUFFLE");
1389 // Canonicalize shuffle undef, undef -> undef
1390 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1391 return getUNDEF(VT);
1393 // Validate that all indices in Mask are within the range of the elements
1394 // input to the shuffle.
1395 unsigned NElts = VT.getVectorNumElements();
1396 SmallVector<int, 8> MaskVec;
1397 for (unsigned i = 0; i != NElts; ++i) {
1398 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1399 MaskVec.push_back(Mask[i]);
1402 // Canonicalize shuffle v, v -> v, undef
1405 for (unsigned i = 0; i != NElts; ++i)
1406 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1409 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1410 if (N1.getOpcode() == ISD::UNDEF)
1411 commuteShuffle(N1, N2, MaskVec);
1413 // Canonicalize all index into lhs, -> shuffle lhs, undef
1414 // Canonicalize all index into rhs, -> shuffle rhs, undef
1415 bool AllLHS = true, AllRHS = true;
1416 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1417 for (unsigned i = 0; i != NElts; ++i) {
1418 if (MaskVec[i] >= (int)NElts) {
1423 } else if (MaskVec[i] >= 0) {
1427 if (AllLHS && AllRHS)
1428 return getUNDEF(VT);
1429 if (AllLHS && !N2Undef)
1433 commuteShuffle(N1, N2, MaskVec);
1436 // If Identity shuffle return that node.
1437 bool Identity = true;
1438 for (unsigned i = 0; i != NElts; ++i) {
1439 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1441 if (Identity && NElts)
1444 // Shuffling a constant splat doesn't change the result.
1445 if (N2Undef && N1.getOpcode() == ISD::BUILD_VECTOR)
1446 if (cast<BuildVectorSDNode>(N1)->getConstantSplatValue())
1449 FoldingSetNodeID ID;
1450 SDValue Ops[2] = { N1, N2 };
1451 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1452 for (unsigned i = 0; i != NElts; ++i)
1453 ID.AddInteger(MaskVec[i]);
1456 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1457 return SDValue(E, 0);
1459 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1460 // SDNode doesn't have access to it. This memory will be "leaked" when
1461 // the node is deallocated, but recovered when the NodeAllocator is released.
1462 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1463 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1465 ShuffleVectorSDNode *N =
1466 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1467 dl.getDebugLoc(), N1, N2,
1469 CSEMap.InsertNode(N, IP);
1470 AllNodes.push_back(N);
1471 return SDValue(N, 0);
1474 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1475 SDValue Val, SDValue DTy,
1476 SDValue STy, SDValue Rnd, SDValue Sat,
1477 ISD::CvtCode Code) {
1478 // If the src and dest types are the same and the conversion is between
1479 // integer types of the same sign or two floats, no conversion is necessary.
1481 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1484 FoldingSetNodeID ID;
1485 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1486 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1488 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1489 return SDValue(E, 0);
1491 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1494 CSEMap.InsertNode(N, IP);
1495 AllNodes.push_back(N);
1496 return SDValue(N, 0);
1499 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1500 FoldingSetNodeID ID;
1501 AddNodeIDNode(ID, ISD::Register, getVTList(VT), nullptr, 0);
1502 ID.AddInteger(RegNo);
1504 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1505 return SDValue(E, 0);
1507 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1508 CSEMap.InsertNode(N, IP);
1509 AllNodes.push_back(N);
1510 return SDValue(N, 0);
1513 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1514 FoldingSetNodeID ID;
1515 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), nullptr, 0);
1516 ID.AddPointer(RegMask);
1518 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1519 return SDValue(E, 0);
1521 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1522 CSEMap.InsertNode(N, IP);
1523 AllNodes.push_back(N);
1524 return SDValue(N, 0);
1527 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1528 FoldingSetNodeID ID;
1529 SDValue Ops[] = { Root };
1530 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1531 ID.AddPointer(Label);
1533 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1534 return SDValue(E, 0);
1536 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1537 dl.getDebugLoc(), Root, Label);
1538 CSEMap.InsertNode(N, IP);
1539 AllNodes.push_back(N);
1540 return SDValue(N, 0);
1544 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1547 unsigned char TargetFlags) {
1548 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1550 FoldingSetNodeID ID;
1551 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1553 ID.AddInteger(Offset);
1554 ID.AddInteger(TargetFlags);
1556 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1557 return SDValue(E, 0);
1559 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1561 CSEMap.InsertNode(N, IP);
1562 AllNodes.push_back(N);
1563 return SDValue(N, 0);
1566 SDValue SelectionDAG::getSrcValue(const Value *V) {
1567 assert((!V || V->getType()->isPointerTy()) &&
1568 "SrcValue is not a pointer?");
1570 FoldingSetNodeID ID;
1571 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), nullptr, 0);
1575 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1576 return SDValue(E, 0);
1578 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1579 CSEMap.InsertNode(N, IP);
1580 AllNodes.push_back(N);
1581 return SDValue(N, 0);
1584 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1585 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1586 FoldingSetNodeID ID;
1587 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), nullptr, 0);
1591 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1592 return SDValue(E, 0);
1594 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1595 CSEMap.InsertNode(N, IP);
1596 AllNodes.push_back(N);
1597 return SDValue(N, 0);
1600 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
1601 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1602 unsigned SrcAS, unsigned DestAS) {
1603 SDValue Ops[] = {Ptr};
1604 FoldingSetNodeID ID;
1605 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), &Ops[0], 1);
1606 ID.AddInteger(SrcAS);
1607 ID.AddInteger(DestAS);
1610 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1611 return SDValue(E, 0);
1613 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1615 VT, Ptr, SrcAS, DestAS);
1616 CSEMap.InsertNode(N, IP);
1617 AllNodes.push_back(N);
1618 return SDValue(N, 0);
1621 /// getShiftAmountOperand - Return the specified value casted to
1622 /// the target's desired shift amount type.
1623 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1624 EVT OpTy = Op.getValueType();
1625 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
1626 if (OpTy == ShTy || OpTy.isVector()) return Op;
1628 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1629 return getNode(Opcode, SDLoc(Op), ShTy, Op);
1632 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1633 /// specified value type.
1634 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1635 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1636 unsigned ByteSize = VT.getStoreSize();
1637 Type *Ty = VT.getTypeForEVT(*getContext());
1638 const TargetLowering *TLI = TM.getTargetLowering();
1639 unsigned StackAlign =
1640 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1642 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1643 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1646 /// CreateStackTemporary - Create a stack temporary suitable for holding
1647 /// either of the specified value types.
1648 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1649 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1650 VT2.getStoreSizeInBits())/8;
1651 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1652 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1653 const TargetLowering *TLI = TM.getTargetLowering();
1654 const DataLayout *TD = TLI->getDataLayout();
1655 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1656 TD->getPrefTypeAlignment(Ty2));
1658 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1659 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1660 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1663 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1664 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1665 // These setcc operations always fold.
1669 case ISD::SETFALSE2: return getConstant(0, VT);
1671 case ISD::SETTRUE2: {
1672 const TargetLowering *TLI = TM.getTargetLowering();
1673 TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
1675 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1688 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1692 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1693 const APInt &C2 = N2C->getAPIntValue();
1694 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1695 const APInt &C1 = N1C->getAPIntValue();
1698 default: llvm_unreachable("Unknown integer setcc!");
1699 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1700 case ISD::SETNE: return getConstant(C1 != C2, VT);
1701 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1702 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1703 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1704 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1705 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1706 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1707 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1708 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1712 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1713 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1714 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1717 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1718 return getUNDEF(VT);
1720 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1721 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1722 return getUNDEF(VT);
1724 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1725 R==APFloat::cmpLessThan, VT);
1726 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1727 return getUNDEF(VT);
1729 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1730 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1731 return getUNDEF(VT);
1733 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1734 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1735 return getUNDEF(VT);
1737 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1738 R==APFloat::cmpEqual, VT);
1739 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1740 return getUNDEF(VT);
1742 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1743 R==APFloat::cmpEqual, VT);
1744 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1745 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1746 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1747 R==APFloat::cmpEqual, VT);
1748 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1749 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1750 R==APFloat::cmpLessThan, VT);
1751 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1752 R==APFloat::cmpUnordered, VT);
1753 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1754 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1757 // Ensure that the constant occurs on the RHS.
1758 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1759 MVT CompVT = N1.getValueType().getSimpleVT();
1760 if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
1763 return getSetCC(dl, VT, N2, N1, SwappedCond);
1767 // Could not fold it.
1771 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1772 /// use this predicate to simplify operations downstream.
1773 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1774 // This predicate is not safe for vector operations.
1775 if (Op.getValueType().isVector())
1778 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1779 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1782 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1783 /// this predicate to simplify operations downstream. Mask is known to be zero
1784 /// for bits that V cannot have.
1785 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1786 unsigned Depth) const {
1787 APInt KnownZero, KnownOne;
1788 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1789 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1790 return (KnownZero & Mask) == Mask;
1793 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
1794 /// known to be either zero or one and return them in the KnownZero/KnownOne
1795 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1797 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1798 APInt &KnownOne, unsigned Depth) const {
1799 const TargetLowering *TLI = TM.getTargetLowering();
1800 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1802 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1804 return; // Limit search depth.
1806 APInt KnownZero2, KnownOne2;
1808 switch (Op.getOpcode()) {
1810 // We know all of the bits for a constant!
1811 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1812 KnownZero = ~KnownOne;
1815 // If either the LHS or the RHS are Zero, the result is zero.
1816 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1817 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1818 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1819 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1821 // Output known-1 bits are only known if set in both the LHS & RHS.
1822 KnownOne &= KnownOne2;
1823 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1824 KnownZero |= KnownZero2;
1827 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1828 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1829 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1830 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1832 // Output known-0 bits are only known if clear in both the LHS & RHS.
1833 KnownZero &= KnownZero2;
1834 // Output known-1 are known to be set if set in either the LHS | RHS.
1835 KnownOne |= KnownOne2;
1838 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1839 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1840 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1841 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1843 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1844 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1845 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1846 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1847 KnownZero = KnownZeroOut;
1851 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1852 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1853 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1854 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1856 // If low bits are zero in either operand, output low known-0 bits.
1857 // Also compute a conserative estimate for high known-0 bits.
1858 // More trickiness is possible, but this is sufficient for the
1859 // interesting case of alignment computation.
1860 KnownOne.clearAllBits();
1861 unsigned TrailZ = KnownZero.countTrailingOnes() +
1862 KnownZero2.countTrailingOnes();
1863 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1864 KnownZero2.countLeadingOnes(),
1865 BitWidth) - BitWidth;
1867 TrailZ = std::min(TrailZ, BitWidth);
1868 LeadZ = std::min(LeadZ, BitWidth);
1869 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1870 APInt::getHighBitsSet(BitWidth, LeadZ);
1874 // For the purposes of computing leading zeros we can conservatively
1875 // treat a udiv as a logical right shift by the power of 2 known to
1876 // be less than the denominator.
1877 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1878 unsigned LeadZ = KnownZero2.countLeadingOnes();
1880 KnownOne2.clearAllBits();
1881 KnownZero2.clearAllBits();
1882 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1883 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1884 if (RHSUnknownLeadingOnes != BitWidth)
1885 LeadZ = std::min(BitWidth,
1886 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1888 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1892 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1893 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1894 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1895 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1897 // Only known if known in both the LHS and RHS.
1898 KnownOne &= KnownOne2;
1899 KnownZero &= KnownZero2;
1901 case ISD::SELECT_CC:
1902 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1903 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1904 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1905 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1907 // Only known if known in both the LHS and RHS.
1908 KnownOne &= KnownOne2;
1909 KnownZero &= KnownZero2;
1917 if (Op.getResNo() != 1)
1919 // The boolean result conforms to getBooleanContents. Fall through.
1921 // If we know the result of a setcc has the top bits zero, use this info.
1922 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
1923 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1924 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1927 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1928 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1929 unsigned ShAmt = SA->getZExtValue();
1931 // If the shift count is an invalid immediate, don't do anything.
1932 if (ShAmt >= BitWidth)
1935 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1936 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1937 KnownZero <<= ShAmt;
1939 // low bits known zero.
1940 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1944 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1945 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1946 unsigned ShAmt = SA->getZExtValue();
1948 // If the shift count is an invalid immediate, don't do anything.
1949 if (ShAmt >= BitWidth)
1952 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1953 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1954 KnownZero = KnownZero.lshr(ShAmt);
1955 KnownOne = KnownOne.lshr(ShAmt);
1957 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1958 KnownZero |= HighBits; // High bits known zero.
1962 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1963 unsigned ShAmt = SA->getZExtValue();
1965 // If the shift count is an invalid immediate, don't do anything.
1966 if (ShAmt >= BitWidth)
1969 // If any of the demanded bits are produced by the sign extension, we also
1970 // demand the input sign bit.
1971 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1973 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1974 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1975 KnownZero = KnownZero.lshr(ShAmt);
1976 KnownOne = KnownOne.lshr(ShAmt);
1978 // Handle the sign bits.
1979 APInt SignBit = APInt::getSignBit(BitWidth);
1980 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1982 if (KnownZero.intersects(SignBit)) {
1983 KnownZero |= HighBits; // New bits are known zero.
1984 } else if (KnownOne.intersects(SignBit)) {
1985 KnownOne |= HighBits; // New bits are known one.
1989 case ISD::SIGN_EXTEND_INREG: {
1990 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1991 unsigned EBits = EVT.getScalarType().getSizeInBits();
1993 // Sign extension. Compute the demanded bits in the result that are not
1994 // present in the input.
1995 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1997 APInt InSignBit = APInt::getSignBit(EBits);
1998 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2000 // If the sign extended bits are demanded, we know that the sign
2002 InSignBit = InSignBit.zext(BitWidth);
2003 if (NewBits.getBoolValue())
2004 InputDemandedBits |= InSignBit;
2006 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2007 KnownOne &= InputDemandedBits;
2008 KnownZero &= InputDemandedBits;
2009 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2011 // If the sign bit of the input is known set or clear, then we know the
2012 // top bits of the result.
2013 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
2014 KnownZero |= NewBits;
2015 KnownOne &= ~NewBits;
2016 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
2017 KnownOne |= NewBits;
2018 KnownZero &= ~NewBits;
2019 } else { // Input sign bit unknown
2020 KnownZero &= ~NewBits;
2021 KnownOne &= ~NewBits;
2026 case ISD::CTTZ_ZERO_UNDEF:
2028 case ISD::CTLZ_ZERO_UNDEF:
2030 unsigned LowBits = Log2_32(BitWidth)+1;
2031 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2032 KnownOne.clearAllBits();
2036 LoadSDNode *LD = cast<LoadSDNode>(Op);
2037 // If this is a ZEXTLoad and we are looking at the loaded value.
2038 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2039 EVT VT = LD->getMemoryVT();
2040 unsigned MemBits = VT.getScalarType().getSizeInBits();
2041 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2042 } else if (const MDNode *Ranges = LD->getRanges()) {
2043 computeMaskedBitsLoad(*Ranges, KnownZero);
2047 case ISD::ZERO_EXTEND: {
2048 EVT InVT = Op.getOperand(0).getValueType();
2049 unsigned InBits = InVT.getScalarType().getSizeInBits();
2050 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2051 KnownZero = KnownZero.trunc(InBits);
2052 KnownOne = KnownOne.trunc(InBits);
2053 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2054 KnownZero = KnownZero.zext(BitWidth);
2055 KnownOne = KnownOne.zext(BitWidth);
2056 KnownZero |= NewBits;
2059 case ISD::SIGN_EXTEND: {
2060 EVT InVT = Op.getOperand(0).getValueType();
2061 unsigned InBits = InVT.getScalarType().getSizeInBits();
2062 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2064 KnownZero = KnownZero.trunc(InBits);
2065 KnownOne = KnownOne.trunc(InBits);
2066 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2068 // Note if the sign bit is known to be zero or one.
2069 bool SignBitKnownZero = KnownZero.isNegative();
2070 bool SignBitKnownOne = KnownOne.isNegative();
2071 assert(!(SignBitKnownZero && SignBitKnownOne) &&
2072 "Sign bit can't be known to be both zero and one!");
2074 KnownZero = KnownZero.zext(BitWidth);
2075 KnownOne = KnownOne.zext(BitWidth);
2077 // If the sign bit is known zero or one, the top bits match.
2078 if (SignBitKnownZero)
2079 KnownZero |= NewBits;
2080 else if (SignBitKnownOne)
2081 KnownOne |= NewBits;
2084 case ISD::ANY_EXTEND: {
2085 EVT InVT = Op.getOperand(0).getValueType();
2086 unsigned InBits = InVT.getScalarType().getSizeInBits();
2087 KnownZero = KnownZero.trunc(InBits);
2088 KnownOne = KnownOne.trunc(InBits);
2089 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2090 KnownZero = KnownZero.zext(BitWidth);
2091 KnownOne = KnownOne.zext(BitWidth);
2094 case ISD::TRUNCATE: {
2095 EVT InVT = Op.getOperand(0).getValueType();
2096 unsigned InBits = InVT.getScalarType().getSizeInBits();
2097 KnownZero = KnownZero.zext(InBits);
2098 KnownOne = KnownOne.zext(InBits);
2099 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2100 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2101 KnownZero = KnownZero.trunc(BitWidth);
2102 KnownOne = KnownOne.trunc(BitWidth);
2105 case ISD::AssertZext: {
2106 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2107 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2108 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2109 KnownZero |= (~InMask);
2110 KnownOne &= (~KnownZero);
2114 // All bits are zero except the low bit.
2115 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2119 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2120 // We know that the top bits of C-X are clear if X contains less bits
2121 // than C (i.e. no wrap-around can happen). For example, 20-X is
2122 // positive if we can prove that X is >= 0 and < 16.
2123 if (CLHS->getAPIntValue().isNonNegative()) {
2124 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2125 // NLZ can't be BitWidth with no sign bit
2126 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2127 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2129 // If all of the MaskV bits are known to be zero, then we know the
2130 // output top bits are zero, because we now know that the output is
2132 if ((KnownZero2 & MaskV) == MaskV) {
2133 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2134 // Top bits known zero.
2135 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2143 // Output known-0 bits are known if clear or set in both the low clear bits
2144 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2145 // low 3 bits clear.
2146 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2147 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2148 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2150 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2151 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2152 KnownZeroOut = std::min(KnownZeroOut,
2153 KnownZero2.countTrailingOnes());
2155 if (Op.getOpcode() == ISD::ADD) {
2156 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2160 // With ADDE, a carry bit may be added in, so we can only use this
2161 // information if we know (at least) that the low two bits are clear. We
2162 // then return to the caller that the low bit is unknown but that other bits
2164 if (KnownZeroOut >= 2) // ADDE
2165 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2169 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2170 const APInt &RA = Rem->getAPIntValue().abs();
2171 if (RA.isPowerOf2()) {
2172 APInt LowBits = RA - 1;
2173 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2175 // The low bits of the first operand are unchanged by the srem.
2176 KnownZero = KnownZero2 & LowBits;
2177 KnownOne = KnownOne2 & LowBits;
2179 // If the first operand is non-negative or has all low bits zero, then
2180 // the upper bits are all zero.
2181 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2182 KnownZero |= ~LowBits;
2184 // If the first operand is negative and not all low bits are zero, then
2185 // the upper bits are all one.
2186 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2187 KnownOne |= ~LowBits;
2188 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2193 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2194 const APInt &RA = Rem->getAPIntValue();
2195 if (RA.isPowerOf2()) {
2196 APInt LowBits = (RA - 1);
2197 KnownZero |= ~LowBits;
2198 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2199 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2204 // Since the result is less than or equal to either operand, any leading
2205 // zero bits in either operand must also exist in the result.
2206 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2207 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2209 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2210 KnownZero2.countLeadingOnes());
2211 KnownOne.clearAllBits();
2212 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2215 case ISD::FrameIndex:
2216 case ISD::TargetFrameIndex:
2217 if (unsigned Align = InferPtrAlignment(Op)) {
2218 // The low bits are known zero if the pointer is aligned.
2219 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2225 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2228 case ISD::INTRINSIC_WO_CHAIN:
2229 case ISD::INTRINSIC_W_CHAIN:
2230 case ISD::INTRINSIC_VOID:
2231 // Allow the target to implement this method for its nodes.
2232 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2237 /// ComputeNumSignBits - Return the number of times the sign bit of the
2238 /// register is replicated into the other bits. We know that at least 1 bit
2239 /// is always equal to the sign bit (itself), but other cases can give us
2240 /// information. For example, immediately after an "SRA X, 2", we know that
2241 /// the top 3 bits are all equal to each other, so we return 3.
2242 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2243 const TargetLowering *TLI = TM.getTargetLowering();
2244 EVT VT = Op.getValueType();
2245 assert(VT.isInteger() && "Invalid VT!");
2246 unsigned VTBits = VT.getScalarType().getSizeInBits();
2248 unsigned FirstAnswer = 1;
2251 return 1; // Limit search depth.
2253 switch (Op.getOpcode()) {
2255 case ISD::AssertSext:
2256 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2257 return VTBits-Tmp+1;
2258 case ISD::AssertZext:
2259 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2262 case ISD::Constant: {
2263 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2264 return Val.getNumSignBits();
2267 case ISD::SIGN_EXTEND:
2269 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2270 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2272 case ISD::SIGN_EXTEND_INREG:
2273 // Max of the input and what this extends.
2275 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2278 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2279 return std::max(Tmp, Tmp2);
2282 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2283 // SRA X, C -> adds C sign bits.
2284 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2285 Tmp += C->getZExtValue();
2286 if (Tmp > VTBits) Tmp = VTBits;
2290 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2291 // shl destroys sign bits.
2292 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2293 if (C->getZExtValue() >= VTBits || // Bad shift.
2294 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2295 return Tmp - C->getZExtValue();
2300 case ISD::XOR: // NOT is handled here.
2301 // Logical binary ops preserve the number of sign bits at the worst.
2302 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2304 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2305 FirstAnswer = std::min(Tmp, Tmp2);
2306 // We computed what we know about the sign bits as our first
2307 // answer. Now proceed to the generic code that uses
2308 // ComputeMaskedBits, and pick whichever answer is better.
2313 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2314 if (Tmp == 1) return 1; // Early out.
2315 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2316 return std::min(Tmp, Tmp2);
2324 if (Op.getResNo() != 1)
2326 // The boolean result conforms to getBooleanContents. Fall through.
2328 // If setcc returns 0/-1, all bits are sign bits.
2329 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
2330 TargetLowering::ZeroOrNegativeOneBooleanContent)
2335 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2336 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2338 // Handle rotate right by N like a rotate left by 32-N.
2339 if (Op.getOpcode() == ISD::ROTR)
2340 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2342 // If we aren't rotating out all of the known-in sign bits, return the
2343 // number that are left. This handles rotl(sext(x), 1) for example.
2344 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2345 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2349 // Add can have at most one carry bit. Thus we know that the output
2350 // is, at worst, one more bit than the inputs.
2351 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2352 if (Tmp == 1) return 1; // Early out.
2354 // Special case decrementing a value (ADD X, -1):
2355 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2356 if (CRHS->isAllOnesValue()) {
2357 APInt KnownZero, KnownOne;
2358 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2360 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2362 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2365 // If we are subtracting one from a positive number, there is no carry
2366 // out of the result.
2367 if (KnownZero.isNegative())
2371 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2372 if (Tmp2 == 1) return 1;
2373 return std::min(Tmp, Tmp2)-1;
2376 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2377 if (Tmp2 == 1) return 1;
2380 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2381 if (CLHS->isNullValue()) {
2382 APInt KnownZero, KnownOne;
2383 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2384 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2386 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2389 // If the input is known to be positive (the sign bit is known clear),
2390 // the output of the NEG has the same number of sign bits as the input.
2391 if (KnownZero.isNegative())
2394 // Otherwise, we treat this like a SUB.
2397 // Sub can have at most one carry bit. Thus we know that the output
2398 // is, at worst, one more bit than the inputs.
2399 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2400 if (Tmp == 1) return 1; // Early out.
2401 return std::min(Tmp, Tmp2)-1;
2403 // FIXME: it's tricky to do anything useful for this, but it is an important
2404 // case for targets like X86.
2408 // If we are looking at the loaded value of the SDNode.
2409 if (Op.getResNo() == 0) {
2410 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2411 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2412 unsigned ExtType = LD->getExtensionType();
2415 case ISD::SEXTLOAD: // '17' bits known
2416 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2417 return VTBits-Tmp+1;
2418 case ISD::ZEXTLOAD: // '16' bits known
2419 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2425 // Allow the target to implement this method for its nodes.
2426 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2427 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2428 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2429 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2430 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
2431 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2434 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2435 // use this information.
2436 APInt KnownZero, KnownOne;
2437 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2440 if (KnownZero.isNegative()) { // sign bit is 0
2442 } else if (KnownOne.isNegative()) { // sign bit is 1;
2449 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2450 // the number of identical bits in the top of the input value.
2452 Mask <<= Mask.getBitWidth()-VTBits;
2453 // Return # leading zeros. We use 'min' here in case Val was zero before
2454 // shifting. We don't want to return '64' as for an i32 "0".
2455 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2458 /// isBaseWithConstantOffset - Return true if the specified operand is an
2459 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2460 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2461 /// semantics as an ADD. This handles the equivalence:
2462 /// X|Cst == X+Cst iff X&Cst = 0.
2463 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2464 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2465 !isa<ConstantSDNode>(Op.getOperand(1)))
2468 if (Op.getOpcode() == ISD::OR &&
2469 !MaskedValueIsZero(Op.getOperand(0),
2470 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2477 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2478 // If we're told that NaNs won't happen, assume they won't.
2479 if (getTarget().Options.NoNaNsFPMath)
2482 // If the value is a constant, we can obviously see if it is a NaN or not.
2483 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2484 return !C->getValueAPF().isNaN();
2486 // TODO: Recognize more cases here.
2491 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2492 // If the value is a constant, we can obviously see if it is a zero or not.
2493 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2494 return !C->isZero();
2496 // TODO: Recognize more cases here.
2497 switch (Op.getOpcode()) {
2500 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2501 return !C->isNullValue();
2508 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2509 // Check the obvious case.
2510 if (A == B) return true;
2512 // For for negative and positive zero.
2513 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2514 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2515 if (CA->isZero() && CB->isZero()) return true;
2517 // Otherwise they may not be equal.
2521 /// getNode - Gets or creates the specified node.
2523 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2524 FoldingSetNodeID ID;
2525 AddNodeIDNode(ID, Opcode, getVTList(VT), nullptr, 0);
2527 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2528 return SDValue(E, 0);
2530 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2531 DL.getDebugLoc(), getVTList(VT));
2532 CSEMap.InsertNode(N, IP);
2534 AllNodes.push_back(N);
2538 return SDValue(N, 0);
2541 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2542 EVT VT, SDValue Operand) {
2543 // Constant fold unary operations with an integer constant operand. Even
2544 // opaque constant will be folded, because the folding of unary operations
2545 // doesn't create new constants with different values. Nevertheless, the
2546 // opaque flag is preserved during folding to prevent future folding with
2548 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2549 const APInt &Val = C->getAPIntValue();
2552 case ISD::SIGN_EXTEND:
2553 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT,
2554 C->isTargetOpcode(), C->isOpaque());
2555 case ISD::ANY_EXTEND:
2556 case ISD::ZERO_EXTEND:
2558 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT,
2559 C->isTargetOpcode(), C->isOpaque());
2560 case ISD::UINT_TO_FP:
2561 case ISD::SINT_TO_FP: {
2562 APFloat apf(EVTToAPFloatSemantics(VT),
2563 APInt::getNullValue(VT.getSizeInBits()));
2564 (void)apf.convertFromAPInt(Val,
2565 Opcode==ISD::SINT_TO_FP,
2566 APFloat::rmNearestTiesToEven);
2567 return getConstantFP(apf, VT);
2570 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2571 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2572 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2573 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2576 return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(),
2579 return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(),
2582 case ISD::CTLZ_ZERO_UNDEF:
2583 return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(),
2586 case ISD::CTTZ_ZERO_UNDEF:
2587 return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(),
2592 // Constant fold unary operations with a floating point constant operand.
2593 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2594 APFloat V = C->getValueAPF(); // make copy
2598 return getConstantFP(V, VT);
2601 return getConstantFP(V, VT);
2603 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2604 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2605 return getConstantFP(V, VT);
2609 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2610 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2611 return getConstantFP(V, VT);
2615 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2616 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2617 return getConstantFP(V, VT);
2620 case ISD::FP_EXTEND: {
2622 // This can return overflow, underflow, or inexact; we don't care.
2623 // FIXME need to be more flexible about rounding mode.
2624 (void)V.convert(EVTToAPFloatSemantics(VT),
2625 APFloat::rmNearestTiesToEven, &ignored);
2626 return getConstantFP(V, VT);
2628 case ISD::FP_TO_SINT:
2629 case ISD::FP_TO_UINT: {
2632 assert(integerPartWidth >= 64);
2633 // FIXME need to be more flexible about rounding mode.
2634 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2635 Opcode==ISD::FP_TO_SINT,
2636 APFloat::rmTowardZero, &ignored);
2637 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2639 APInt api(VT.getSizeInBits(), x);
2640 return getConstant(api, VT);
2643 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2644 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2645 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2646 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2651 unsigned OpOpcode = Operand.getNode()->getOpcode();
2653 case ISD::TokenFactor:
2654 case ISD::MERGE_VALUES:
2655 case ISD::CONCAT_VECTORS:
2656 return Operand; // Factor, merge or concat of one node? No need.
2657 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2658 case ISD::FP_EXTEND:
2659 assert(VT.isFloatingPoint() &&
2660 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2661 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2662 assert((!VT.isVector() ||
2663 VT.getVectorNumElements() ==
2664 Operand.getValueType().getVectorNumElements()) &&
2665 "Vector element count mismatch!");
2666 if (Operand.getOpcode() == ISD::UNDEF)
2667 return getUNDEF(VT);
2669 case ISD::SIGN_EXTEND:
2670 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2671 "Invalid SIGN_EXTEND!");
2672 if (Operand.getValueType() == VT) return Operand; // noop extension
2673 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2674 "Invalid sext node, dst < src!");
2675 assert((!VT.isVector() ||
2676 VT.getVectorNumElements() ==
2677 Operand.getValueType().getVectorNumElements()) &&
2678 "Vector element count mismatch!");
2679 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2680 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2681 else if (OpOpcode == ISD::UNDEF)
2682 // sext(undef) = 0, because the top bits will all be the same.
2683 return getConstant(0, VT);
2685 case ISD::ZERO_EXTEND:
2686 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2687 "Invalid ZERO_EXTEND!");
2688 if (Operand.getValueType() == VT) return Operand; // noop extension
2689 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2690 "Invalid zext node, dst < src!");
2691 assert((!VT.isVector() ||
2692 VT.getVectorNumElements() ==
2693 Operand.getValueType().getVectorNumElements()) &&
2694 "Vector element count mismatch!");
2695 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2696 return getNode(ISD::ZERO_EXTEND, DL, VT,
2697 Operand.getNode()->getOperand(0));
2698 else if (OpOpcode == ISD::UNDEF)
2699 // zext(undef) = 0, because the top bits will be zero.
2700 return getConstant(0, VT);
2702 case ISD::ANY_EXTEND:
2703 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2704 "Invalid ANY_EXTEND!");
2705 if (Operand.getValueType() == VT) return Operand; // noop extension
2706 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2707 "Invalid anyext node, dst < src!");
2708 assert((!VT.isVector() ||
2709 VT.getVectorNumElements() ==
2710 Operand.getValueType().getVectorNumElements()) &&
2711 "Vector element count mismatch!");
2713 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2714 OpOpcode == ISD::ANY_EXTEND)
2715 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2716 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2717 else if (OpOpcode == ISD::UNDEF)
2718 return getUNDEF(VT);
2720 // (ext (trunx x)) -> x
2721 if (OpOpcode == ISD::TRUNCATE) {
2722 SDValue OpOp = Operand.getNode()->getOperand(0);
2723 if (OpOp.getValueType() == VT)
2728 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2729 "Invalid TRUNCATE!");
2730 if (Operand.getValueType() == VT) return Operand; // noop truncate
2731 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2732 "Invalid truncate node, src < dst!");
2733 assert((!VT.isVector() ||
2734 VT.getVectorNumElements() ==
2735 Operand.getValueType().getVectorNumElements()) &&
2736 "Vector element count mismatch!");
2737 if (OpOpcode == ISD::TRUNCATE)
2738 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2739 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2740 OpOpcode == ISD::ANY_EXTEND) {
2741 // If the source is smaller than the dest, we still need an extend.
2742 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2743 .bitsLT(VT.getScalarType()))
2744 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2745 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2746 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2747 return Operand.getNode()->getOperand(0);
2749 if (OpOpcode == ISD::UNDEF)
2750 return getUNDEF(VT);
2753 // Basic sanity checking.
2754 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2755 && "Cannot BITCAST between types of different sizes!");
2756 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2757 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2758 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2759 if (OpOpcode == ISD::UNDEF)
2760 return getUNDEF(VT);
2762 case ISD::SCALAR_TO_VECTOR:
2763 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2764 (VT.getVectorElementType() == Operand.getValueType() ||
2765 (VT.getVectorElementType().isInteger() &&
2766 Operand.getValueType().isInteger() &&
2767 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2768 "Illegal SCALAR_TO_VECTOR node!");
2769 if (OpOpcode == ISD::UNDEF)
2770 return getUNDEF(VT);
2771 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2772 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2773 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2774 Operand.getConstantOperandVal(1) == 0 &&
2775 Operand.getOperand(0).getValueType() == VT)
2776 return Operand.getOperand(0);
2779 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2780 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2781 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2782 Operand.getNode()->getOperand(0));
2783 if (OpOpcode == ISD::FNEG) // --X -> X
2784 return Operand.getNode()->getOperand(0);
2787 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2788 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2793 SDVTList VTs = getVTList(VT);
2794 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2795 FoldingSetNodeID ID;
2796 SDValue Ops[1] = { Operand };
2797 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2799 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2800 return SDValue(E, 0);
2802 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2803 DL.getDebugLoc(), VTs, Operand);
2804 CSEMap.InsertNode(N, IP);
2806 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2807 DL.getDebugLoc(), VTs, Operand);
2810 AllNodes.push_back(N);
2814 return SDValue(N, 0);
2817 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2818 SDNode *Cst1, SDNode *Cst2) {
2819 // If the opcode is a target-specific ISD node, there's nothing we can
2820 // do here and the operand rules may not line up with the below, so
2822 if (Opcode >= ISD::BUILTIN_OP_END)
2825 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2826 SmallVector<SDValue, 4> Outputs;
2827 EVT SVT = VT.getScalarType();
2829 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2830 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2831 if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
2834 if (Scalar1 && Scalar2)
2835 // Scalar instruction.
2836 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2838 // For vectors extract each constant element into Inputs so we can constant
2839 // fold them individually.
2840 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2841 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2845 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2847 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2848 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2849 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2850 if (!V1 || !V2) // Not a constant, bail.
2853 if (V1->isOpaque() || V2->isOpaque())
2856 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2857 // FIXME: This is valid and could be handled by truncating the APInts.
2858 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2861 Inputs.push_back(std::make_pair(V1, V2));
2865 // We have a number of constant values, constant fold them element by element.
2866 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2867 const APInt &C1 = Inputs[I].first->getAPIntValue();
2868 const APInt &C2 = Inputs[I].second->getAPIntValue();
2872 Outputs.push_back(getConstant(C1 + C2, SVT));
2875 Outputs.push_back(getConstant(C1 - C2, SVT));
2878 Outputs.push_back(getConstant(C1 * C2, SVT));
2881 if (!C2.getBoolValue())
2883 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2886 if (!C2.getBoolValue())
2888 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2891 if (!C2.getBoolValue())
2893 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2896 if (!C2.getBoolValue())
2898 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2901 Outputs.push_back(getConstant(C1 & C2, SVT));
2904 Outputs.push_back(getConstant(C1 | C2, SVT));
2907 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2910 Outputs.push_back(getConstant(C1 << C2, SVT));
2913 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2916 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2919 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2922 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2929 // Handle the scalar case first.
2930 if (Scalar1 && Scalar2)
2931 return Outputs.back();
2933 // Otherwise build a big vector out of the scalar elements we generated.
2934 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
2937 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
2939 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2940 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2943 case ISD::TokenFactor:
2944 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2945 N2.getValueType() == MVT::Other && "Invalid token factor!");
2946 // Fold trivial token factors.
2947 if (N1.getOpcode() == ISD::EntryToken) return N2;
2948 if (N2.getOpcode() == ISD::EntryToken) return N1;
2949 if (N1 == N2) return N1;
2951 case ISD::CONCAT_VECTORS:
2952 // Concat of UNDEFs is UNDEF.
2953 if (N1.getOpcode() == ISD::UNDEF &&
2954 N2.getOpcode() == ISD::UNDEF)
2955 return getUNDEF(VT);
2957 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2958 // one big BUILD_VECTOR.
2959 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2960 N2.getOpcode() == ISD::BUILD_VECTOR) {
2961 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2962 N1.getNode()->op_end());
2963 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2964 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
2968 assert(VT.isInteger() && "This operator does not apply to FP types!");
2969 assert(N1.getValueType() == N2.getValueType() &&
2970 N1.getValueType() == VT && "Binary operator types must match!");
2971 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2972 // worth handling here.
2973 if (N2C && N2C->isNullValue())
2975 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2982 assert(VT.isInteger() && "This operator does not apply to FP types!");
2983 assert(N1.getValueType() == N2.getValueType() &&
2984 N1.getValueType() == VT && "Binary operator types must match!");
2985 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2986 // it's worth handling here.
2987 if (N2C && N2C->isNullValue())
2997 assert(VT.isInteger() && "This operator does not apply to FP types!");
2998 assert(N1.getValueType() == N2.getValueType() &&
2999 N1.getValueType() == VT && "Binary operator types must match!");
3006 if (getTarget().Options.UnsafeFPMath) {
3007 if (Opcode == ISD::FADD) {
3009 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
3010 if (CFP->getValueAPF().isZero())
3013 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3014 if (CFP->getValueAPF().isZero())
3016 } else if (Opcode == ISD::FSUB) {
3018 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3019 if (CFP->getValueAPF().isZero())
3021 } else if (Opcode == ISD::FMUL) {
3022 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
3025 // If the first operand isn't the constant, try the second
3027 CFP = dyn_cast<ConstantFPSDNode>(N2);
3034 return SDValue(CFP,0);
3036 if (CFP->isExactlyValue(1.0))
3041 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3042 assert(N1.getValueType() == N2.getValueType() &&
3043 N1.getValueType() == VT && "Binary operator types must match!");
3045 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
3046 assert(N1.getValueType() == VT &&
3047 N1.getValueType().isFloatingPoint() &&
3048 N2.getValueType().isFloatingPoint() &&
3049 "Invalid FCOPYSIGN!");
3056 assert(VT == N1.getValueType() &&
3057 "Shift operators return type must be the same as their first arg");
3058 assert(VT.isInteger() && N2.getValueType().isInteger() &&
3059 "Shifts only work on integers");
3060 assert((!VT.isVector() || VT == N2.getValueType()) &&
3061 "Vector shift amounts must be in the same as their first arg");
3062 // Verify that the shift amount VT is bit enough to hold valid shift
3063 // amounts. This catches things like trying to shift an i1024 value by an
3064 // i8, which is easy to fall into in generic code that uses
3065 // TLI.getShiftAmount().
3066 assert(N2.getValueType().getSizeInBits() >=
3067 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3068 "Invalid use of small shift amount with oversized value!");
3070 // Always fold shifts of i1 values so the code generator doesn't need to
3071 // handle them. Since we know the size of the shift has to be less than the
3072 // size of the value, the shift/rotate count is guaranteed to be zero.
3075 if (N2C && N2C->isNullValue())
3078 case ISD::FP_ROUND_INREG: {
3079 EVT EVT = cast<VTSDNode>(N2)->getVT();
3080 assert(VT == N1.getValueType() && "Not an inreg round!");
3081 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3082 "Cannot FP_ROUND_INREG integer types");
3083 assert(EVT.isVector() == VT.isVector() &&
3084 "FP_ROUND_INREG type should be vector iff the operand "
3086 assert((!EVT.isVector() ||
3087 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3088 "Vector element counts must match in FP_ROUND_INREG");
3089 assert(EVT.bitsLE(VT) && "Not rounding down!");
3091 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3095 assert(VT.isFloatingPoint() &&
3096 N1.getValueType().isFloatingPoint() &&
3097 VT.bitsLE(N1.getValueType()) &&
3098 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
3099 if (N1.getValueType() == VT) return N1; // noop conversion.
3101 case ISD::AssertSext:
3102 case ISD::AssertZext: {
3103 EVT EVT = cast<VTSDNode>(N2)->getVT();
3104 assert(VT == N1.getValueType() && "Not an inreg extend!");
3105 assert(VT.isInteger() && EVT.isInteger() &&
3106 "Cannot *_EXTEND_INREG FP types");
3107 assert(!EVT.isVector() &&
3108 "AssertSExt/AssertZExt type should be the vector element type "
3109 "rather than the vector type!");
3110 assert(EVT.bitsLE(VT) && "Not extending!");
3111 if (VT == EVT) return N1; // noop assertion.
3114 case ISD::SIGN_EXTEND_INREG: {
3115 EVT EVT = cast<VTSDNode>(N2)->getVT();
3116 assert(VT == N1.getValueType() && "Not an inreg extend!");
3117 assert(VT.isInteger() && EVT.isInteger() &&
3118 "Cannot *_EXTEND_INREG FP types");
3119 assert(EVT.isVector() == VT.isVector() &&
3120 "SIGN_EXTEND_INREG type should be vector iff the operand "
3122 assert((!EVT.isVector() ||
3123 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3124 "Vector element counts must match in SIGN_EXTEND_INREG");
3125 assert(EVT.bitsLE(VT) && "Not extending!");
3126 if (EVT == VT) return N1; // Not actually extending
3129 APInt Val = N1C->getAPIntValue();
3130 unsigned FromBits = EVT.getScalarType().getSizeInBits();
3131 Val <<= Val.getBitWidth()-FromBits;
3132 Val = Val.ashr(Val.getBitWidth()-FromBits);
3133 return getConstant(Val, VT);
3137 case ISD::EXTRACT_VECTOR_ELT:
3138 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3139 if (N1.getOpcode() == ISD::UNDEF)
3140 return getUNDEF(VT);
3142 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3143 // expanding copies of large vectors from registers.
3145 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3146 N1.getNumOperands() > 0) {
3148 N1.getOperand(0).getValueType().getVectorNumElements();
3149 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3150 N1.getOperand(N2C->getZExtValue() / Factor),
3151 getConstant(N2C->getZExtValue() % Factor,
3152 N2.getValueType()));
3155 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3156 // expanding large vector constants.
3157 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3158 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3160 if (VT != Elt.getValueType())
3161 // If the vector element type is not legal, the BUILD_VECTOR operands
3162 // are promoted and implicitly truncated, and the result implicitly
3163 // extended. Make that explicit here.
3164 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3169 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3170 // operations are lowered to scalars.
3171 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3172 // If the indices are the same, return the inserted element else
3173 // if the indices are known different, extract the element from
3174 // the original vector.
3175 SDValue N1Op2 = N1.getOperand(2);
3176 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3178 if (N1Op2C && N2C) {
3179 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3180 if (VT == N1.getOperand(1).getValueType())
3181 return N1.getOperand(1);
3183 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3186 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3190 case ISD::EXTRACT_ELEMENT:
3191 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3192 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3193 (N1.getValueType().isInteger() == VT.isInteger()) &&
3194 N1.getValueType() != VT &&
3195 "Wrong types for EXTRACT_ELEMENT!");
3197 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3198 // 64-bit integers into 32-bit parts. Instead of building the extract of
3199 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3200 if (N1.getOpcode() == ISD::BUILD_PAIR)
3201 return N1.getOperand(N2C->getZExtValue());
3203 // EXTRACT_ELEMENT of a constant int is also very common.
3204 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3205 unsigned ElementSize = VT.getSizeInBits();
3206 unsigned Shift = ElementSize * N2C->getZExtValue();
3207 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3208 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3211 case ISD::EXTRACT_SUBVECTOR: {
3213 if (VT.isSimple() && N1.getValueType().isSimple()) {
3214 assert(VT.isVector() && N1.getValueType().isVector() &&
3215 "Extract subvector VTs must be a vectors!");
3216 assert(VT.getVectorElementType() ==
3217 N1.getValueType().getVectorElementType() &&
3218 "Extract subvector VTs must have the same element type!");
3219 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3220 "Extract subvector must be from larger vector to smaller vector!");
3222 if (isa<ConstantSDNode>(Index.getNode())) {
3223 assert((VT.getVectorNumElements() +
3224 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3225 <= N1.getValueType().getVectorNumElements())
3226 && "Extract subvector overflow!");
3229 // Trivial extraction.
3230 if (VT.getSimpleVT() == N1.getSimpleValueType())
3237 // Perform trivial constant folding.
3238 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3239 if (SV.getNode()) return SV;
3241 // Canonicalize constant to RHS if commutative.
3242 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3243 std::swap(N1C, N2C);
3247 // Constant fold FP operations.
3248 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3249 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3251 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3252 // Canonicalize constant to RHS if commutative.
3253 std::swap(N1CFP, N2CFP);
3256 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3257 APFloat::opStatus s;
3260 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3261 if (s != APFloat::opInvalidOp)
3262 return getConstantFP(V1, VT);
3265 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3266 if (s!=APFloat::opInvalidOp)
3267 return getConstantFP(V1, VT);
3270 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3271 if (s!=APFloat::opInvalidOp)
3272 return getConstantFP(V1, VT);
3275 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3276 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3277 return getConstantFP(V1, VT);
3280 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3281 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3282 return getConstantFP(V1, VT);
3284 case ISD::FCOPYSIGN:
3286 return getConstantFP(V1, VT);
3291 if (Opcode == ISD::FP_ROUND) {
3292 APFloat V = N1CFP->getValueAPF(); // make copy
3294 // This can return overflow, underflow, or inexact; we don't care.
3295 // FIXME need to be more flexible about rounding mode.
3296 (void)V.convert(EVTToAPFloatSemantics(VT),
3297 APFloat::rmNearestTiesToEven, &ignored);
3298 return getConstantFP(V, VT);
3302 // Canonicalize an UNDEF to the RHS, even over a constant.
3303 if (N1.getOpcode() == ISD::UNDEF) {
3304 if (isCommutativeBinOp(Opcode)) {
3308 case ISD::FP_ROUND_INREG:
3309 case ISD::SIGN_EXTEND_INREG:
3315 return N1; // fold op(undef, arg2) -> undef
3323 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3324 // For vectors, we can't easily build an all zero vector, just return
3331 // Fold a bunch of operators when the RHS is undef.
3332 if (N2.getOpcode() == ISD::UNDEF) {
3335 if (N1.getOpcode() == ISD::UNDEF)
3336 // Handle undef ^ undef -> 0 special case. This is a common
3338 return getConstant(0, VT);
3348 return N2; // fold op(arg1, undef) -> undef
3354 if (getTarget().Options.UnsafeFPMath)
3362 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3363 // For vectors, we can't easily build an all zero vector, just return
3368 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3369 // For vectors, we can't easily build an all one vector, just return
3377 // Memoize this node if possible.
3379 SDVTList VTs = getVTList(VT);
3380 if (VT != MVT::Glue) {
3381 SDValue Ops[] = { N1, N2 };
3382 FoldingSetNodeID ID;
3383 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3385 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3386 return SDValue(E, 0);
3388 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3389 DL.getDebugLoc(), VTs, N1, N2);
3390 CSEMap.InsertNode(N, IP);
3392 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3393 DL.getDebugLoc(), VTs, N1, N2);
3396 AllNodes.push_back(N);
3400 return SDValue(N, 0);
3403 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3404 SDValue N1, SDValue N2, SDValue N3) {
3405 // Perform various simplifications.
3406 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3409 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3410 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3411 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3412 if (N1CFP && N2CFP && N3CFP) {
3413 APFloat V1 = N1CFP->getValueAPF();
3414 const APFloat &V2 = N2CFP->getValueAPF();
3415 const APFloat &V3 = N3CFP->getValueAPF();
3416 APFloat::opStatus s =
3417 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3418 if (s != APFloat::opInvalidOp)
3419 return getConstantFP(V1, VT);
3423 case ISD::CONCAT_VECTORS:
3424 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3425 // one big BUILD_VECTOR.
3426 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3427 N2.getOpcode() == ISD::BUILD_VECTOR &&
3428 N3.getOpcode() == ISD::BUILD_VECTOR) {
3429 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3430 N1.getNode()->op_end());
3431 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3432 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3433 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3437 // Use FoldSetCC to simplify SETCC's.
3438 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3439 if (Simp.getNode()) return Simp;
3444 if (N1C->getZExtValue())
3445 return N2; // select true, X, Y -> X
3446 return N3; // select false, X, Y -> Y
3449 if (N2 == N3) return N2; // select C, X, X -> X
3451 case ISD::VECTOR_SHUFFLE:
3452 llvm_unreachable("should use getVectorShuffle constructor!");
3453 case ISD::INSERT_SUBVECTOR: {
3455 if (VT.isSimple() && N1.getValueType().isSimple()
3456 && N2.getValueType().isSimple()) {
3457 assert(VT.isVector() && N1.getValueType().isVector() &&
3458 N2.getValueType().isVector() &&
3459 "Insert subvector VTs must be a vectors");
3460 assert(VT == N1.getValueType() &&
3461 "Dest and insert subvector source types must match!");
3462 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3463 "Insert subvector must be from smaller vector to larger vector!");
3464 if (isa<ConstantSDNode>(Index.getNode())) {
3465 assert((N2.getValueType().getVectorNumElements() +
3466 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3467 <= VT.getVectorNumElements())
3468 && "Insert subvector overflow!");
3471 // Trivial insertion.
3472 if (VT.getSimpleVT() == N2.getSimpleValueType())
3478 // Fold bit_convert nodes from a type to themselves.
3479 if (N1.getValueType() == VT)
3484 // Memoize node if it doesn't produce a flag.
3486 SDVTList VTs = getVTList(VT);
3487 if (VT != MVT::Glue) {
3488 SDValue Ops[] = { N1, N2, N3 };
3489 FoldingSetNodeID ID;
3490 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3492 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3493 return SDValue(E, 0);
3495 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3496 DL.getDebugLoc(), VTs, N1, N2, N3);
3497 CSEMap.InsertNode(N, IP);
3499 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3500 DL.getDebugLoc(), VTs, N1, N2, N3);
3503 AllNodes.push_back(N);
3507 return SDValue(N, 0);
3510 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3511 SDValue N1, SDValue N2, SDValue N3,
3513 SDValue Ops[] = { N1, N2, N3, N4 };
3514 return getNode(Opcode, DL, VT, Ops);
3517 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3518 SDValue N1, SDValue N2, SDValue N3,
3519 SDValue N4, SDValue N5) {
3520 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3521 return getNode(Opcode, DL, VT, Ops);
3524 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3525 /// the incoming stack arguments to be loaded from the stack.
3526 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3527 SmallVector<SDValue, 8> ArgChains;
3529 // Include the original chain at the beginning of the list. When this is
3530 // used by target LowerCall hooks, this helps legalize find the
3531 // CALLSEQ_BEGIN node.
3532 ArgChains.push_back(Chain);
3534 // Add a chain value for each stack argument.
3535 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3536 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3537 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3538 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3539 if (FI->getIndex() < 0)
3540 ArgChains.push_back(SDValue(L, 1));
3542 // Build a tokenfactor for all the chains.
3543 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
3546 /// getMemsetValue - Vectorized representation of the memset value
3548 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3550 assert(Value.getOpcode() != ISD::UNDEF);
3552 unsigned NumBits = VT.getScalarType().getSizeInBits();
3553 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3554 assert(C->getAPIntValue().getBitWidth() == 8);
3555 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3557 return DAG.getConstant(Val, VT);
3558 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3561 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3563 // Use a multiplication with 0x010101... to extend the input to the
3565 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3566 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3572 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3573 /// used when a memcpy is turned into a memset when the source is a constant
3575 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3576 const TargetLowering &TLI, StringRef Str) {
3577 // Handle vector with all elements zero.
3580 return DAG.getConstant(0, VT);
3581 else if (VT == MVT::f32 || VT == MVT::f64)
3582 return DAG.getConstantFP(0.0, VT);
3583 else if (VT.isVector()) {
3584 unsigned NumElts = VT.getVectorNumElements();
3585 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3586 return DAG.getNode(ISD::BITCAST, dl, VT,
3587 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3590 llvm_unreachable("Expected type!");
3593 assert(!VT.isVector() && "Can't handle vector type here!");
3594 unsigned NumVTBits = VT.getSizeInBits();
3595 unsigned NumVTBytes = NumVTBits / 8;
3596 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3598 APInt Val(NumVTBits, 0);
3599 if (TLI.isLittleEndian()) {
3600 for (unsigned i = 0; i != NumBytes; ++i)
3601 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3603 for (unsigned i = 0; i != NumBytes; ++i)
3604 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3607 // If the "cost" of materializing the integer immediate is less than the cost
3608 // of a load, then it is cost effective to turn the load into the immediate.
3609 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
3610 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
3611 return DAG.getConstant(Val, VT);
3612 return SDValue(nullptr, 0);
3615 /// getMemBasePlusOffset - Returns base and offset node for the
3617 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3618 SelectionDAG &DAG) {
3619 EVT VT = Base.getValueType();
3620 return DAG.getNode(ISD::ADD, dl,
3621 VT, Base, DAG.getConstant(Offset, VT));
3624 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3626 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3627 unsigned SrcDelta = 0;
3628 GlobalAddressSDNode *G = nullptr;
3629 if (Src.getOpcode() == ISD::GlobalAddress)
3630 G = cast<GlobalAddressSDNode>(Src);
3631 else if (Src.getOpcode() == ISD::ADD &&
3632 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3633 Src.getOperand(1).getOpcode() == ISD::Constant) {
3634 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3635 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3640 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3643 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3644 /// to replace the memset / memcpy. Return true if the number of memory ops
3645 /// is below the threshold. It returns the types of the sequence of
3646 /// memory ops to perform memset / memcpy by reference.
3647 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3648 unsigned Limit, uint64_t Size,
3649 unsigned DstAlign, unsigned SrcAlign,
3655 const TargetLowering &TLI) {
3656 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3657 "Expecting memcpy / memset source to meet alignment requirement!");
3658 // If 'SrcAlign' is zero, that means the memory operation does not need to
3659 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3660 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3661 // is the specified alignment of the memory operation. If it is zero, that
3662 // means it's possible to change the alignment of the destination.
3663 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3664 // not need to be loaded.
3665 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3666 IsMemset, ZeroMemset, MemcpyStrSrc,
3667 DAG.getMachineFunction());
3669 if (VT == MVT::Other) {
3671 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
3672 TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
3673 VT = TLI.getPointerTy();
3675 switch (DstAlign & 7) {
3676 case 0: VT = MVT::i64; break;
3677 case 4: VT = MVT::i32; break;
3678 case 2: VT = MVT::i16; break;
3679 default: VT = MVT::i8; break;
3684 while (!TLI.isTypeLegal(LVT))
3685 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3686 assert(LVT.isInteger());
3692 unsigned NumMemOps = 0;
3694 unsigned VTSize = VT.getSizeInBits() / 8;
3695 while (VTSize > Size) {
3696 // For now, only use non-vector load / store's for the left-over pieces.
3701 if (VT.isVector() || VT.isFloatingPoint()) {
3702 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3703 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3704 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3706 else if (NewVT == MVT::i64 &&
3707 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3708 TLI.isSafeMemOpType(MVT::f64)) {
3709 // i64 is usually not legal on 32-bit targets, but f64 may be.
3717 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3718 if (NewVT == MVT::i8)
3720 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3722 NewVTSize = NewVT.getSizeInBits() / 8;
3724 // If the new VT cannot cover all of the remaining bits, then consider
3725 // issuing a (or a pair of) unaligned and overlapping load / store.
3726 // FIXME: Only does this for 64-bit or more since we don't have proper
3727 // cost model for unaligned load / store.
3730 if (NumMemOps && AllowOverlap &&
3731 VTSize >= 8 && NewVTSize < Size &&
3732 TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
3740 if (++NumMemOps > Limit)
3743 MemOps.push_back(VT);
3750 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3751 SDValue Chain, SDValue Dst,
3752 SDValue Src, uint64_t Size,
3753 unsigned Align, bool isVol,
3755 MachinePointerInfo DstPtrInfo,
3756 MachinePointerInfo SrcPtrInfo) {
3757 // Turn a memcpy of undef to nop.
3758 if (Src.getOpcode() == ISD::UNDEF)
3761 // Expand memcpy to a series of load and store ops if the size operand falls
3762 // below a certain threshold.
3763 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3764 // rather than maybe a humongous number of loads and stores.
3765 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3766 std::vector<EVT> MemOps;
3767 bool DstAlignCanChange = false;
3768 MachineFunction &MF = DAG.getMachineFunction();
3769 MachineFrameInfo *MFI = MF.getFrameInfo();
3771 MF.getFunction()->getAttributes().
3772 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3773 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3774 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3775 DstAlignCanChange = true;
3776 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3777 if (Align > SrcAlign)
3780 bool CopyFromStr = isMemSrcFromString(Src, Str);
3781 bool isZeroStr = CopyFromStr && Str.empty();
3782 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3784 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3785 (DstAlignCanChange ? 0 : Align),
3786 (isZeroStr ? 0 : SrcAlign),
3787 false, false, CopyFromStr, true, DAG, TLI))
3790 if (DstAlignCanChange) {
3791 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3792 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3794 // Don't promote to an alignment that would require dynamic stack
3796 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3797 if (!TRI->needsStackRealignment(MF))
3798 while (NewAlign > Align &&
3799 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3802 if (NewAlign > Align) {
3803 // Give the stack frame object a larger alignment if needed.
3804 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3805 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3810 SmallVector<SDValue, 8> OutChains;
3811 unsigned NumMemOps = MemOps.size();
3812 uint64_t SrcOff = 0, DstOff = 0;
3813 for (unsigned i = 0; i != NumMemOps; ++i) {
3815 unsigned VTSize = VT.getSizeInBits() / 8;
3816 SDValue Value, Store;
3818 if (VTSize > Size) {
3819 // Issuing an unaligned load / store pair that overlaps with the previous
3820 // pair. Adjust the offset accordingly.
3821 assert(i == NumMemOps-1 && i != 0);
3822 SrcOff -= VTSize - Size;
3823 DstOff -= VTSize - Size;
3827 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3828 // It's unlikely a store of a vector immediate can be done in a single
3829 // instruction. It would require a load from a constantpool first.
3830 // We only handle zero vectors here.
3831 // FIXME: Handle other cases where store of vector immediate is done in
3832 // a single instruction.
3833 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3834 if (Value.getNode())
3835 Store = DAG.getStore(Chain, dl, Value,
3836 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3837 DstPtrInfo.getWithOffset(DstOff), isVol,
3841 if (!Store.getNode()) {
3842 // The type might not be legal for the target. This should only happen
3843 // if the type is smaller than a legal type, as on PPC, so the right
3844 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3845 // to Load/Store if NVT==VT.
3846 // FIXME does the case above also need this?
3847 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3848 assert(NVT.bitsGE(VT));
3849 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3850 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3851 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3852 MinAlign(SrcAlign, SrcOff));
3853 Store = DAG.getTruncStore(Chain, dl, Value,
3854 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3855 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3858 OutChains.push_back(Store);
3864 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3867 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3868 SDValue Chain, SDValue Dst,
3869 SDValue Src, uint64_t Size,
3870 unsigned Align, bool isVol,
3872 MachinePointerInfo DstPtrInfo,
3873 MachinePointerInfo SrcPtrInfo) {
3874 // Turn a memmove of undef to nop.
3875 if (Src.getOpcode() == ISD::UNDEF)
3878 // Expand memmove to a series of load and store ops if the size operand falls
3879 // below a certain threshold.
3880 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3881 std::vector<EVT> MemOps;
3882 bool DstAlignCanChange = false;
3883 MachineFunction &MF = DAG.getMachineFunction();
3884 MachineFrameInfo *MFI = MF.getFrameInfo();
3885 bool OptSize = MF.getFunction()->getAttributes().
3886 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3887 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3888 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3889 DstAlignCanChange = true;
3890 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3891 if (Align > SrcAlign)
3893 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3895 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3896 (DstAlignCanChange ? 0 : Align), SrcAlign,
3897 false, false, false, false, DAG, TLI))
3900 if (DstAlignCanChange) {
3901 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3902 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3903 if (NewAlign > Align) {
3904 // Give the stack frame object a larger alignment if needed.
3905 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3906 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3911 uint64_t SrcOff = 0, DstOff = 0;
3912 SmallVector<SDValue, 8> LoadValues;
3913 SmallVector<SDValue, 8> LoadChains;
3914 SmallVector<SDValue, 8> OutChains;
3915 unsigned NumMemOps = MemOps.size();
3916 for (unsigned i = 0; i < NumMemOps; i++) {
3918 unsigned VTSize = VT.getSizeInBits() / 8;
3921 Value = DAG.getLoad(VT, dl, Chain,
3922 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3923 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3924 false, false, SrcAlign);
3925 LoadValues.push_back(Value);
3926 LoadChains.push_back(Value.getValue(1));
3929 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
3931 for (unsigned i = 0; i < NumMemOps; i++) {
3933 unsigned VTSize = VT.getSizeInBits() / 8;
3936 Store = DAG.getStore(Chain, dl, LoadValues[i],
3937 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3938 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3939 OutChains.push_back(Store);
3943 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3946 /// \brief Lower the call to 'memset' intrinsic function into a series of store
3949 /// \param DAG Selection DAG where lowered code is placed.
3950 /// \param dl Link to corresponding IR location.
3951 /// \param Chain Control flow dependency.
3952 /// \param Dst Pointer to destination memory location.
3953 /// \param Src Value of byte to write into the memory.
3954 /// \param Size Number of bytes to write.
3955 /// \param Align Alignment of the destination in bytes.
3956 /// \param isVol True if destination is volatile.
3957 /// \param DstPtrInfo IR information on the memory pointer.
3958 /// \returns New head in the control flow, if lowering was successful, empty
3959 /// SDValue otherwise.
3961 /// The function tries to replace 'llvm.memset' intrinsic with several store
3962 /// operations and value calculation code. This is usually profitable for small
3964 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
3965 SDValue Chain, SDValue Dst,
3966 SDValue Src, uint64_t Size,
3967 unsigned Align, bool isVol,
3968 MachinePointerInfo DstPtrInfo) {
3969 // Turn a memset of undef to nop.
3970 if (Src.getOpcode() == ISD::UNDEF)
3973 // Expand memset to a series of load/store ops if the size operand
3974 // falls below a certain threshold.
3975 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3976 std::vector<EVT> MemOps;
3977 bool DstAlignCanChange = false;
3978 MachineFunction &MF = DAG.getMachineFunction();
3979 MachineFrameInfo *MFI = MF.getFrameInfo();
3980 bool OptSize = MF.getFunction()->getAttributes().
3981 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3982 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3983 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3984 DstAlignCanChange = true;
3986 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3987 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3988 Size, (DstAlignCanChange ? 0 : Align), 0,
3989 true, IsZeroVal, false, true, DAG, TLI))
3992 if (DstAlignCanChange) {
3993 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3994 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3995 if (NewAlign > Align) {
3996 // Give the stack frame object a larger alignment if needed.
3997 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3998 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
4003 SmallVector<SDValue, 8> OutChains;
4004 uint64_t DstOff = 0;
4005 unsigned NumMemOps = MemOps.size();
4007 // Find the largest store and generate the bit pattern for it.
4008 EVT LargestVT = MemOps[0];
4009 for (unsigned i = 1; i < NumMemOps; i++)
4010 if (MemOps[i].bitsGT(LargestVT))
4011 LargestVT = MemOps[i];
4012 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4014 for (unsigned i = 0; i < NumMemOps; i++) {
4016 unsigned VTSize = VT.getSizeInBits() / 8;
4017 if (VTSize > Size) {
4018 // Issuing an unaligned load / store pair that overlaps with the previous
4019 // pair. Adjust the offset accordingly.
4020 assert(i == NumMemOps-1 && i != 0);
4021 DstOff -= VTSize - Size;
4024 // If this store is smaller than the largest store see whether we can get
4025 // the smaller value for free with a truncate.
4026 SDValue Value = MemSetValue;
4027 if (VT.bitsLT(LargestVT)) {
4028 if (!LargestVT.isVector() && !VT.isVector() &&
4029 TLI.isTruncateFree(LargestVT, VT))
4030 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4032 Value = getMemsetValue(Src, VT, DAG, dl);
4034 assert(Value.getValueType() == VT && "Value with wrong type.");
4035 SDValue Store = DAG.getStore(Chain, dl, Value,
4036 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4037 DstPtrInfo.getWithOffset(DstOff),
4038 isVol, false, Align);
4039 OutChains.push_back(Store);
4040 DstOff += VT.getSizeInBits() / 8;
4044 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4047 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
4048 SDValue Src, SDValue Size,
4049 unsigned Align, bool isVol, bool AlwaysInline,
4050 MachinePointerInfo DstPtrInfo,
4051 MachinePointerInfo SrcPtrInfo) {
4052 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4054 // Check to see if we should lower the memcpy to loads and stores first.
4055 // For cases within the target-specified limits, this is the best choice.
4056 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4058 // Memcpy with size zero? Just return the original chain.
4059 if (ConstantSize->isNullValue())
4062 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4063 ConstantSize->getZExtValue(),Align,
4064 isVol, false, DstPtrInfo, SrcPtrInfo);
4065 if (Result.getNode())
4069 // Then check to see if we should lower the memcpy with target-specific
4070 // code. If the target chooses to do this, this is the next best.
4072 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
4073 isVol, AlwaysInline,
4074 DstPtrInfo, SrcPtrInfo);
4075 if (Result.getNode())
4078 // If we really need inline code and the target declined to provide it,
4079 // use a (potentially long) sequence of loads and stores.
4081 assert(ConstantSize && "AlwaysInline requires a constant size!");
4082 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4083 ConstantSize->getZExtValue(), Align, isVol,
4084 true, DstPtrInfo, SrcPtrInfo);
4087 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4088 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4089 // respect volatile, so they may do things like read or write memory
4090 // beyond the given memory regions. But fixing this isn't easy, and most
4091 // people don't care.
4093 const TargetLowering *TLI = TM.getTargetLowering();
4095 // Emit a library call.
4096 TargetLowering::ArgListTy Args;
4097 TargetLowering::ArgListEntry Entry;
4098 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4099 Entry.Node = Dst; Args.push_back(Entry);
4100 Entry.Node = Src; Args.push_back(Entry);
4101 Entry.Node = Size; Args.push_back(Entry);
4102 // FIXME: pass in SDLoc
4104 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4105 false, false, false, false, 0,
4106 TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4107 /*isTailCall=*/false,
4108 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4109 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4110 TLI->getPointerTy()),
4112 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4114 return CallResult.second;
4117 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4118 SDValue Src, SDValue Size,
4119 unsigned Align, bool isVol,
4120 MachinePointerInfo DstPtrInfo,
4121 MachinePointerInfo SrcPtrInfo) {
4122 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4124 // Check to see if we should lower the memmove to loads and stores first.
4125 // For cases within the target-specified limits, this is the best choice.
4126 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4128 // Memmove with size zero? Just return the original chain.
4129 if (ConstantSize->isNullValue())
4133 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4134 ConstantSize->getZExtValue(), Align, isVol,
4135 false, DstPtrInfo, SrcPtrInfo);
4136 if (Result.getNode())
4140 // Then check to see if we should lower the memmove with target-specific
4141 // code. If the target chooses to do this, this is the next best.
4143 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4144 DstPtrInfo, SrcPtrInfo);
4145 if (Result.getNode())
4148 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4149 // not be safe. See memcpy above for more details.
4151 const TargetLowering *TLI = TM.getTargetLowering();
4153 // Emit a library call.
4154 TargetLowering::ArgListTy Args;
4155 TargetLowering::ArgListEntry Entry;
4156 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4157 Entry.Node = Dst; Args.push_back(Entry);
4158 Entry.Node = Src; Args.push_back(Entry);
4159 Entry.Node = Size; Args.push_back(Entry);
4160 // FIXME: pass in SDLoc
4162 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4163 false, false, false, false, 0,
4164 TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4165 /*isTailCall=*/false,
4166 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4167 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4168 TLI->getPointerTy()),
4170 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4172 return CallResult.second;
4175 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4176 SDValue Src, SDValue Size,
4177 unsigned Align, bool isVol,
4178 MachinePointerInfo DstPtrInfo) {
4179 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4181 // Check to see if we should lower the memset to stores first.
4182 // For cases within the target-specified limits, this is the best choice.
4183 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4185 // Memset with size zero? Just return the original chain.
4186 if (ConstantSize->isNullValue())
4190 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4191 Align, isVol, DstPtrInfo);
4193 if (Result.getNode())
4197 // Then check to see if we should lower the memset with target-specific
4198 // code. If the target chooses to do this, this is the next best.
4200 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4202 if (Result.getNode())
4205 // Emit a library call.
4206 const TargetLowering *TLI = TM.getTargetLowering();
4207 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4208 TargetLowering::ArgListTy Args;
4209 TargetLowering::ArgListEntry Entry;
4210 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4211 Args.push_back(Entry);
4212 // Extend or truncate the argument to be an i32 value for the call.
4213 if (Src.getValueType().bitsGT(MVT::i32))
4214 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4216 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4218 Entry.Ty = Type::getInt32Ty(*getContext());
4219 Entry.isSExt = true;
4220 Args.push_back(Entry);
4222 Entry.Ty = IntPtrTy;
4223 Entry.isSExt = false;
4224 Args.push_back(Entry);
4225 // FIXME: pass in SDLoc
4227 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4228 false, false, false, false, 0,
4229 TLI->getLibcallCallingConv(RTLIB::MEMSET),
4230 /*isTailCall=*/false,
4231 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4232 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4233 TLI->getPointerTy()),
4235 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4237 return CallResult.second;
4240 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4241 SDVTList VTList, const SDValue *Ops, unsigned NumOps,
4242 MachineMemOperand *MMO,
4243 AtomicOrdering SuccessOrdering,
4244 AtomicOrdering FailureOrdering,
4245 SynchronizationScope SynchScope) {
4246 FoldingSetNodeID ID;
4247 ID.AddInteger(MemVT.getRawBits());
4248 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4249 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4251 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4252 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4253 return SDValue(E, 0);
4256 // Allocate the operands array for the node out of the BumpPtrAllocator, since
4257 // SDNode doesn't have access to it. This memory will be "leaked" when
4258 // the node is deallocated, but recovered when the allocator is released.
4259 // If the number of operands is less than 5 we use AtomicSDNode's internal
4261 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : nullptr;
4263 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4264 dl.getDebugLoc(), VTList, MemVT,
4265 Ops, DynOps, NumOps, MMO,
4266 SuccessOrdering, FailureOrdering,
4268 CSEMap.InsertNode(N, IP);
4269 AllNodes.push_back(N);
4270 return SDValue(N, 0);
4273 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4274 SDVTList VTList, const SDValue *Ops, unsigned NumOps,
4275 MachineMemOperand *MMO,
4276 AtomicOrdering Ordering,
4277 SynchronizationScope SynchScope) {
4278 return getAtomic(Opcode, dl, MemVT, VTList, Ops, NumOps, MMO, Ordering,
4279 Ordering, SynchScope);
4282 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4283 SDValue Chain, SDValue Ptr, SDValue Cmp,
4284 SDValue Swp, MachinePointerInfo PtrInfo,
4286 AtomicOrdering SuccessOrdering,
4287 AtomicOrdering FailureOrdering,
4288 SynchronizationScope SynchScope) {
4289 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4290 Alignment = getEVTAlignment(MemVT);
4292 MachineFunction &MF = getMachineFunction();
4294 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4295 // For now, atomics are considered to be volatile always.
4296 // FIXME: Volatile isn't really correct; we should keep track of atomic
4297 // orderings in the memoperand.
4298 unsigned Flags = MachineMemOperand::MOVolatile;
4299 if (Opcode != ISD::ATOMIC_STORE)
4300 Flags |= MachineMemOperand::MOLoad;
4301 if (Opcode != ISD::ATOMIC_LOAD)
4302 Flags |= MachineMemOperand::MOStore;
4304 MachineMemOperand *MMO =
4305 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4307 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4308 SuccessOrdering, FailureOrdering, SynchScope);
4311 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4313 SDValue Ptr, SDValue Cmp,
4314 SDValue Swp, MachineMemOperand *MMO,
4315 AtomicOrdering SuccessOrdering,
4316 AtomicOrdering FailureOrdering,
4317 SynchronizationScope SynchScope) {
4318 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4319 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4321 EVT VT = Cmp.getValueType();
4323 SDVTList VTs = getVTList(VT, MVT::Other);
4324 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4325 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, SuccessOrdering,
4326 FailureOrdering, SynchScope);
4329 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4331 SDValue Ptr, SDValue Val,
4332 const Value* PtrVal,
4334 AtomicOrdering Ordering,
4335 SynchronizationScope SynchScope) {
4336 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4337 Alignment = getEVTAlignment(MemVT);
4339 MachineFunction &MF = getMachineFunction();
4340 // An atomic store does not load. An atomic load does not store.
4341 // (An atomicrmw obviously both loads and stores.)
4342 // For now, atomics are considered to be volatile always, and they are
4344 // FIXME: Volatile isn't really correct; we should keep track of atomic
4345 // orderings in the memoperand.
4346 unsigned Flags = MachineMemOperand::MOVolatile;
4347 if (Opcode != ISD::ATOMIC_STORE)
4348 Flags |= MachineMemOperand::MOLoad;
4349 if (Opcode != ISD::ATOMIC_LOAD)
4350 Flags |= MachineMemOperand::MOStore;
4352 MachineMemOperand *MMO =
4353 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4354 MemVT.getStoreSize(), Alignment);
4356 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4357 Ordering, SynchScope);
4360 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4362 SDValue Ptr, SDValue Val,
4363 MachineMemOperand *MMO,
4364 AtomicOrdering Ordering,
4365 SynchronizationScope SynchScope) {
4366 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4367 Opcode == ISD::ATOMIC_LOAD_SUB ||
4368 Opcode == ISD::ATOMIC_LOAD_AND ||
4369 Opcode == ISD::ATOMIC_LOAD_OR ||
4370 Opcode == ISD::ATOMIC_LOAD_XOR ||
4371 Opcode == ISD::ATOMIC_LOAD_NAND ||
4372 Opcode == ISD::ATOMIC_LOAD_MIN ||
4373 Opcode == ISD::ATOMIC_LOAD_MAX ||
4374 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4375 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4376 Opcode == ISD::ATOMIC_SWAP ||
4377 Opcode == ISD::ATOMIC_STORE) &&
4378 "Invalid Atomic Op");
4380 EVT VT = Val.getValueType();
4382 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4383 getVTList(VT, MVT::Other);
4384 SDValue Ops[] = {Chain, Ptr, Val};
4385 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope);
4388 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4389 EVT VT, SDValue Chain,
4391 MachineMemOperand *MMO,
4392 AtomicOrdering Ordering,
4393 SynchronizationScope SynchScope) {
4394 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4396 SDVTList VTs = getVTList(VT, MVT::Other);
4397 SDValue Ops[] = {Chain, Ptr};
4398 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope);
4401 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4402 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
4403 if (Ops.size() == 1)
4406 SmallVector<EVT, 4> VTs;
4407 VTs.reserve(Ops.size());
4408 for (unsigned i = 0; i < Ops.size(); ++i)
4409 VTs.push_back(Ops[i].getValueType());
4410 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
4414 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4415 ArrayRef<SDValue> Ops,
4416 EVT MemVT, MachinePointerInfo PtrInfo,
4417 unsigned Align, bool Vol,
4418 bool ReadMem, bool WriteMem) {
4419 if (Align == 0) // Ensure that codegen never sees alignment 0
4420 Align = getEVTAlignment(MemVT);
4422 MachineFunction &MF = getMachineFunction();
4425 Flags |= MachineMemOperand::MOStore;
4427 Flags |= MachineMemOperand::MOLoad;
4429 Flags |= MachineMemOperand::MOVolatile;
4430 MachineMemOperand *MMO =
4431 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4433 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
4437 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4438 ArrayRef<SDValue> Ops, EVT MemVT,
4439 MachineMemOperand *MMO) {
4440 assert((Opcode == ISD::INTRINSIC_VOID ||
4441 Opcode == ISD::INTRINSIC_W_CHAIN ||
4442 Opcode == ISD::PREFETCH ||
4443 Opcode == ISD::LIFETIME_START ||
4444 Opcode == ISD::LIFETIME_END ||
4445 (Opcode <= INT_MAX &&
4446 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4447 "Opcode is not a memory-accessing opcode!");
4449 // Memoize the node unless it returns a flag.
4450 MemIntrinsicSDNode *N;
4451 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4452 FoldingSetNodeID ID;
4453 AddNodeIDNode(ID, Opcode, VTList, Ops.data(), Ops.size());
4454 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4456 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4457 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4458 return SDValue(E, 0);
4461 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4462 dl.getDebugLoc(), VTList, Ops,
4464 CSEMap.InsertNode(N, IP);
4466 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4467 dl.getDebugLoc(), VTList, Ops,
4470 AllNodes.push_back(N);
4471 return SDValue(N, 0);
4474 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4475 /// MachinePointerInfo record from it. This is particularly useful because the
4476 /// code generator has many cases where it doesn't bother passing in a
4477 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4478 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4479 // If this is FI+Offset, we can model it.
4480 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4481 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4483 // If this is (FI+Offset1)+Offset2, we can model it.
4484 if (Ptr.getOpcode() != ISD::ADD ||
4485 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4486 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4487 return MachinePointerInfo();
4489 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4490 return MachinePointerInfo::getFixedStack(FI, Offset+
4491 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4494 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4495 /// MachinePointerInfo record from it. This is particularly useful because the
4496 /// code generator has many cases where it doesn't bother passing in a
4497 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4498 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4499 // If the 'Offset' value isn't a constant, we can't handle this.
4500 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4501 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4502 if (OffsetOp.getOpcode() == ISD::UNDEF)
4503 return InferPointerInfo(Ptr);
4504 return MachinePointerInfo();
4509 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4510 EVT VT, SDLoc dl, SDValue Chain,
4511 SDValue Ptr, SDValue Offset,
4512 MachinePointerInfo PtrInfo, EVT MemVT,
4513 bool isVolatile, bool isNonTemporal, bool isInvariant,
4514 unsigned Alignment, const MDNode *TBAAInfo,
4515 const MDNode *Ranges) {
4516 assert(Chain.getValueType() == MVT::Other &&
4517 "Invalid chain type");
4518 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4519 Alignment = getEVTAlignment(VT);
4521 unsigned Flags = MachineMemOperand::MOLoad;
4523 Flags |= MachineMemOperand::MOVolatile;
4525 Flags |= MachineMemOperand::MONonTemporal;
4527 Flags |= MachineMemOperand::MOInvariant;
4529 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4531 if (PtrInfo.V.isNull())
4532 PtrInfo = InferPointerInfo(Ptr, Offset);
4534 MachineFunction &MF = getMachineFunction();
4535 MachineMemOperand *MMO =
4536 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4538 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4542 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4543 EVT VT, SDLoc dl, SDValue Chain,
4544 SDValue Ptr, SDValue Offset, EVT MemVT,
4545 MachineMemOperand *MMO) {
4547 ExtType = ISD::NON_EXTLOAD;
4548 } else if (ExtType == ISD::NON_EXTLOAD) {
4549 assert(VT == MemVT && "Non-extending load from different memory type!");
4552 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4553 "Should only be an extending load, not truncating!");
4554 assert(VT.isInteger() == MemVT.isInteger() &&
4555 "Cannot convert from FP to Int or Int -> FP!");
4556 assert(VT.isVector() == MemVT.isVector() &&
4557 "Cannot use trunc store to convert to or from a vector!");
4558 assert((!VT.isVector() ||
4559 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4560 "Cannot use trunc store to change the number of vector elements!");
4563 bool Indexed = AM != ISD::UNINDEXED;
4564 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4565 "Unindexed load with an offset!");
4567 SDVTList VTs = Indexed ?
4568 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4569 SDValue Ops[] = { Chain, Ptr, Offset };
4570 FoldingSetNodeID ID;
4571 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4572 ID.AddInteger(MemVT.getRawBits());
4573 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4574 MMO->isNonTemporal(),
4575 MMO->isInvariant()));
4576 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4578 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4579 cast<LoadSDNode>(E)->refineAlignment(MMO);
4580 return SDValue(E, 0);
4582 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4583 dl.getDebugLoc(), VTs, AM, ExtType,
4585 CSEMap.InsertNode(N, IP);
4586 AllNodes.push_back(N);
4587 return SDValue(N, 0);
4590 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4591 SDValue Chain, SDValue Ptr,
4592 MachinePointerInfo PtrInfo,
4593 bool isVolatile, bool isNonTemporal,
4594 bool isInvariant, unsigned Alignment,
4595 const MDNode *TBAAInfo,
4596 const MDNode *Ranges) {
4597 SDValue Undef = getUNDEF(Ptr.getValueType());
4598 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4599 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4603 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4604 SDValue Chain, SDValue Ptr,
4605 MachineMemOperand *MMO) {
4606 SDValue Undef = getUNDEF(Ptr.getValueType());
4607 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4611 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4612 SDValue Chain, SDValue Ptr,
4613 MachinePointerInfo PtrInfo, EVT MemVT,
4614 bool isVolatile, bool isNonTemporal,
4615 unsigned Alignment, const MDNode *TBAAInfo) {
4616 SDValue Undef = getUNDEF(Ptr.getValueType());
4617 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4618 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4623 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4624 SDValue Chain, SDValue Ptr, EVT MemVT,
4625 MachineMemOperand *MMO) {
4626 SDValue Undef = getUNDEF(Ptr.getValueType());
4627 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4632 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4633 SDValue Offset, ISD::MemIndexedMode AM) {
4634 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4635 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4636 "Load is already a indexed load!");
4637 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4638 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4639 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4640 false, LD->getAlignment());
4643 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4644 SDValue Ptr, MachinePointerInfo PtrInfo,
4645 bool isVolatile, bool isNonTemporal,
4646 unsigned Alignment, const MDNode *TBAAInfo) {
4647 assert(Chain.getValueType() == MVT::Other &&
4648 "Invalid chain type");
4649 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4650 Alignment = getEVTAlignment(Val.getValueType());
4652 unsigned Flags = MachineMemOperand::MOStore;
4654 Flags |= MachineMemOperand::MOVolatile;
4656 Flags |= MachineMemOperand::MONonTemporal;
4658 if (PtrInfo.V.isNull())
4659 PtrInfo = InferPointerInfo(Ptr);
4661 MachineFunction &MF = getMachineFunction();
4662 MachineMemOperand *MMO =
4663 MF.getMachineMemOperand(PtrInfo, Flags,
4664 Val.getValueType().getStoreSize(), Alignment,
4667 return getStore(Chain, dl, Val, Ptr, MMO);
4670 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4671 SDValue Ptr, MachineMemOperand *MMO) {
4672 assert(Chain.getValueType() == MVT::Other &&
4673 "Invalid chain type");
4674 EVT VT = Val.getValueType();
4675 SDVTList VTs = getVTList(MVT::Other);
4676 SDValue Undef = getUNDEF(Ptr.getValueType());
4677 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4678 FoldingSetNodeID ID;
4679 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4680 ID.AddInteger(VT.getRawBits());
4681 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4682 MMO->isNonTemporal(), MMO->isInvariant()));
4683 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4685 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4686 cast<StoreSDNode>(E)->refineAlignment(MMO);
4687 return SDValue(E, 0);
4689 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4690 dl.getDebugLoc(), VTs,
4691 ISD::UNINDEXED, false, VT, MMO);
4692 CSEMap.InsertNode(N, IP);
4693 AllNodes.push_back(N);
4694 return SDValue(N, 0);
4697 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4698 SDValue Ptr, MachinePointerInfo PtrInfo,
4699 EVT SVT,bool isVolatile, bool isNonTemporal,
4701 const MDNode *TBAAInfo) {
4702 assert(Chain.getValueType() == MVT::Other &&
4703 "Invalid chain type");
4704 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4705 Alignment = getEVTAlignment(SVT);
4707 unsigned Flags = MachineMemOperand::MOStore;
4709 Flags |= MachineMemOperand::MOVolatile;
4711 Flags |= MachineMemOperand::MONonTemporal;
4713 if (PtrInfo.V.isNull())
4714 PtrInfo = InferPointerInfo(Ptr);
4716 MachineFunction &MF = getMachineFunction();
4717 MachineMemOperand *MMO =
4718 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4721 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4724 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4725 SDValue Ptr, EVT SVT,
4726 MachineMemOperand *MMO) {
4727 EVT VT = Val.getValueType();
4729 assert(Chain.getValueType() == MVT::Other &&
4730 "Invalid chain type");
4732 return getStore(Chain, dl, Val, Ptr, MMO);
4734 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4735 "Should only be a truncating store, not extending!");
4736 assert(VT.isInteger() == SVT.isInteger() &&
4737 "Can't do FP-INT conversion!");
4738 assert(VT.isVector() == SVT.isVector() &&
4739 "Cannot use trunc store to convert to or from a vector!");
4740 assert((!VT.isVector() ||
4741 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4742 "Cannot use trunc store to change the number of vector elements!");
4744 SDVTList VTs = getVTList(MVT::Other);
4745 SDValue Undef = getUNDEF(Ptr.getValueType());
4746 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4747 FoldingSetNodeID ID;
4748 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4749 ID.AddInteger(SVT.getRawBits());
4750 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4751 MMO->isNonTemporal(), MMO->isInvariant()));
4752 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4754 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4755 cast<StoreSDNode>(E)->refineAlignment(MMO);
4756 return SDValue(E, 0);
4758 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4759 dl.getDebugLoc(), VTs,
4760 ISD::UNINDEXED, true, SVT, MMO);
4761 CSEMap.InsertNode(N, IP);
4762 AllNodes.push_back(N);
4763 return SDValue(N, 0);
4767 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4768 SDValue Offset, ISD::MemIndexedMode AM) {
4769 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4770 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4771 "Store is already a indexed store!");
4772 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4773 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4774 FoldingSetNodeID ID;
4775 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4776 ID.AddInteger(ST->getMemoryVT().getRawBits());
4777 ID.AddInteger(ST->getRawSubclassData());
4778 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4780 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4781 return SDValue(E, 0);
4783 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4784 dl.getDebugLoc(), VTs, AM,
4785 ST->isTruncatingStore(),
4787 ST->getMemOperand());
4788 CSEMap.InsertNode(N, IP);
4789 AllNodes.push_back(N);
4790 return SDValue(N, 0);
4793 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4794 SDValue Chain, SDValue Ptr,
4797 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4798 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
4801 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4802 ArrayRef<SDUse> Ops) {
4803 switch (Ops.size()) {
4804 case 0: return getNode(Opcode, DL, VT);
4805 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
4806 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4807 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4811 // Copy from an SDUse array into an SDValue array for use with
4812 // the regular getNode logic.
4813 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
4814 return getNode(Opcode, DL, VT, NewOps);
4817 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4818 ArrayRef<SDValue> Ops) {
4819 unsigned NumOps = Ops.size();
4821 case 0: return getNode(Opcode, DL, VT);
4822 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4823 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4824 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4830 case ISD::SELECT_CC: {
4831 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4832 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4833 "LHS and RHS of condition must have same type!");
4834 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4835 "True and False arms of SelectCC must have same type!");
4836 assert(Ops[2].getValueType() == VT &&
4837 "select_cc node must be of same type as true and false value!");
4841 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4842 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4843 "LHS/RHS of comparison should match types!");
4850 SDVTList VTs = getVTList(VT);
4852 if (VT != MVT::Glue) {
4853 FoldingSetNodeID ID;
4854 AddNodeIDNode(ID, Opcode, VTs, Ops.data(), NumOps);
4857 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4858 return SDValue(E, 0);
4860 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4862 CSEMap.InsertNode(N, IP);
4864 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4868 AllNodes.push_back(N);
4872 return SDValue(N, 0);
4875 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4876 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
4877 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
4880 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4881 ArrayRef<SDValue> Ops) {
4882 if (VTList.NumVTs == 1)
4883 return getNode(Opcode, DL, VTList.VTs[0], Ops);
4887 // FIXME: figure out how to safely handle things like
4888 // int foo(int x) { return 1 << (x & 255); }
4889 // int bar() { return foo(256); }
4890 case ISD::SRA_PARTS:
4891 case ISD::SRL_PARTS:
4892 case ISD::SHL_PARTS:
4893 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4894 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4895 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4896 else if (N3.getOpcode() == ISD::AND)
4897 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4898 // If the and is only masking out bits that cannot effect the shift,
4899 // eliminate the and.
4900 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4901 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4902 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4908 // Memoize the node unless it returns a flag.
4910 unsigned NumOps = Ops.size();
4911 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4912 FoldingSetNodeID ID;
4913 AddNodeIDNode(ID, Opcode, VTList, Ops.data(), NumOps);
4915 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4916 return SDValue(E, 0);
4919 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4920 DL.getDebugLoc(), VTList, Ops[0]);
4921 } else if (NumOps == 2) {
4922 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4923 DL.getDebugLoc(), VTList, Ops[0],
4925 } else if (NumOps == 3) {
4926 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4927 DL.getDebugLoc(), VTList, Ops[0],
4930 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4933 CSEMap.InsertNode(N, IP);
4936 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4937 DL.getDebugLoc(), VTList, Ops[0]);
4938 } else if (NumOps == 2) {
4939 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4940 DL.getDebugLoc(), VTList, Ops[0],
4942 } else if (NumOps == 3) {
4943 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4944 DL.getDebugLoc(), VTList, Ops[0],
4947 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4951 AllNodes.push_back(N);
4955 return SDValue(N, 0);
4958 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
4959 return getNode(Opcode, DL, VTList, ArrayRef<SDValue>());
4962 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4964 SDValue Ops[] = { N1 };
4965 return getNode(Opcode, DL, VTList, Ops);
4968 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4969 SDValue N1, SDValue N2) {
4970 SDValue Ops[] = { N1, N2 };
4971 return getNode(Opcode, DL, VTList, Ops);
4974 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4975 SDValue N1, SDValue N2, SDValue N3) {
4976 SDValue Ops[] = { N1, N2, N3 };
4977 return getNode(Opcode, DL, VTList, Ops);
4980 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4981 SDValue N1, SDValue N2, SDValue N3,
4983 SDValue Ops[] = { N1, N2, N3, N4 };
4984 return getNode(Opcode, DL, VTList, Ops);
4987 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4988 SDValue N1, SDValue N2, SDValue N3,
4989 SDValue N4, SDValue N5) {
4990 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4991 return getNode(Opcode, DL, VTList, Ops);
4994 SDVTList SelectionDAG::getVTList(EVT VT) {
4995 return makeVTList(SDNode::getValueTypeList(VT), 1);
4998 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4999 FoldingSetNodeID ID;
5001 ID.AddInteger(VT1.getRawBits());
5002 ID.AddInteger(VT2.getRawBits());
5005 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5007 EVT *Array = Allocator.Allocate<EVT>(2);
5010 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5011 VTListMap.InsertNode(Result, IP);
5013 return Result->getSDVTList();
5016 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5017 FoldingSetNodeID ID;
5019 ID.AddInteger(VT1.getRawBits());
5020 ID.AddInteger(VT2.getRawBits());
5021 ID.AddInteger(VT3.getRawBits());
5024 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5026 EVT *Array = Allocator.Allocate<EVT>(3);
5030 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5031 VTListMap.InsertNode(Result, IP);
5033 return Result->getSDVTList();
5036 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5037 FoldingSetNodeID ID;
5039 ID.AddInteger(VT1.getRawBits());
5040 ID.AddInteger(VT2.getRawBits());
5041 ID.AddInteger(VT3.getRawBits());
5042 ID.AddInteger(VT4.getRawBits());
5045 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5047 EVT *Array = Allocator.Allocate<EVT>(4);
5052 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5053 VTListMap.InsertNode(Result, IP);
5055 return Result->getSDVTList();
5058 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5059 unsigned NumVTs = VTs.size();
5060 FoldingSetNodeID ID;
5061 ID.AddInteger(NumVTs);
5062 for (unsigned index = 0; index < NumVTs; index++) {
5063 ID.AddInteger(VTs[index].getRawBits());
5067 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5069 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5070 std::copy(VTs.begin(), VTs.end(), Array);
5071 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5072 VTListMap.InsertNode(Result, IP);
5074 return Result->getSDVTList();
5078 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5079 /// specified operands. If the resultant node already exists in the DAG,
5080 /// this does not modify the specified node, instead it returns the node that
5081 /// already exists. If the resultant node does not exist in the DAG, the
5082 /// input node is returned. As a degenerate case, if you specify the same
5083 /// input operands as the node already has, the input node is returned.
5084 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5085 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5087 // Check to see if there is no change.
5088 if (Op == N->getOperand(0)) return N;
5090 // See if the modified node already exists.
5091 void *InsertPos = nullptr;
5092 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5095 // Nope it doesn't. Remove the node from its current place in the maps.
5097 if (!RemoveNodeFromCSEMaps(N))
5098 InsertPos = nullptr;
5100 // Now we update the operands.
5101 N->OperandList[0].set(Op);
5103 // If this gets put into a CSE map, add it.
5104 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5108 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5109 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5111 // Check to see if there is no change.
5112 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5113 return N; // No operands changed, just return the input node.
5115 // See if the modified node already exists.
5116 void *InsertPos = nullptr;
5117 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5120 // Nope it doesn't. Remove the node from its current place in the maps.
5122 if (!RemoveNodeFromCSEMaps(N))
5123 InsertPos = nullptr;
5125 // Now we update the operands.
5126 if (N->OperandList[0] != Op1)
5127 N->OperandList[0].set(Op1);
5128 if (N->OperandList[1] != Op2)
5129 N->OperandList[1].set(Op2);
5131 // If this gets put into a CSE map, add it.
5132 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5136 SDNode *SelectionDAG::
5137 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5138 SDValue Ops[] = { Op1, Op2, Op3 };
5139 return UpdateNodeOperands(N, Ops, 3);
5142 SDNode *SelectionDAG::
5143 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5144 SDValue Op3, SDValue Op4) {
5145 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5146 return UpdateNodeOperands(N, Ops, 4);
5149 SDNode *SelectionDAG::
5150 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5151 SDValue Op3, SDValue Op4, SDValue Op5) {
5152 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5153 return UpdateNodeOperands(N, Ops, 5);
5156 SDNode *SelectionDAG::
5157 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
5158 assert(N->getNumOperands() == NumOps &&
5159 "Update with wrong number of operands");
5161 // Check to see if there is no change.
5162 bool AnyChange = false;
5163 for (unsigned i = 0; i != NumOps; ++i) {
5164 if (Ops[i] != N->getOperand(i)) {
5170 // No operands changed, just return the input node.
5171 if (!AnyChange) return N;
5173 // See if the modified node already exists.
5174 void *InsertPos = nullptr;
5175 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
5178 // Nope it doesn't. Remove the node from its current place in the maps.
5180 if (!RemoveNodeFromCSEMaps(N))
5181 InsertPos = nullptr;
5183 // Now we update the operands.
5184 for (unsigned i = 0; i != NumOps; ++i)
5185 if (N->OperandList[i] != Ops[i])
5186 N->OperandList[i].set(Ops[i]);
5188 // If this gets put into a CSE map, add it.
5189 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5193 /// DropOperands - Release the operands and set this node to have
5195 void SDNode::DropOperands() {
5196 // Unlike the code in MorphNodeTo that does this, we don't need to
5197 // watch for dead nodes here.
5198 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5204 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5207 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5209 SDVTList VTs = getVTList(VT);
5210 return SelectNodeTo(N, MachineOpc, VTs, nullptr, 0);
5213 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5214 EVT VT, SDValue Op1) {
5215 SDVTList VTs = getVTList(VT);
5216 SDValue Ops[] = { Op1 };
5217 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5220 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5221 EVT VT, SDValue Op1,
5223 SDVTList VTs = getVTList(VT);
5224 SDValue Ops[] = { Op1, Op2 };
5225 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5228 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5229 EVT VT, SDValue Op1,
5230 SDValue Op2, SDValue Op3) {
5231 SDVTList VTs = getVTList(VT);
5232 SDValue Ops[] = { Op1, Op2, Op3 };
5233 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5236 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5237 EVT VT, const SDValue *Ops,
5239 SDVTList VTs = getVTList(VT);
5240 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5243 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5244 EVT VT1, EVT VT2, const SDValue *Ops,
5246 SDVTList VTs = getVTList(VT1, VT2);
5247 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5250 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5252 SDVTList VTs = getVTList(VT1, VT2);
5253 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)nullptr, 0);
5256 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5257 EVT VT1, EVT VT2, EVT VT3,
5258 const SDValue *Ops, unsigned NumOps) {
5259 SDVTList VTs = getVTList(VT1, VT2, VT3);
5260 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5263 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5264 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5265 const SDValue *Ops, unsigned NumOps) {
5266 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5267 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5270 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5273 SDVTList VTs = getVTList(VT1, VT2);
5274 SDValue Ops[] = { Op1 };
5275 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5278 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5280 SDValue Op1, SDValue Op2) {
5281 SDVTList VTs = getVTList(VT1, VT2);
5282 SDValue Ops[] = { Op1, Op2 };
5283 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5286 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5288 SDValue Op1, SDValue Op2,
5290 SDVTList VTs = getVTList(VT1, VT2);
5291 SDValue Ops[] = { Op1, Op2, Op3 };
5292 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5295 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5296 EVT VT1, EVT VT2, EVT VT3,
5297 SDValue Op1, SDValue Op2,
5299 SDVTList VTs = getVTList(VT1, VT2, VT3);
5300 SDValue Ops[] = { Op1, Op2, Op3 };
5301 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5304 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5305 SDVTList VTs, const SDValue *Ops,
5307 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5308 // Reset the NodeID to -1.
5313 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5314 /// the line number information on the merged node since it is not possible to
5315 /// preserve the information that operation is associated with multiple lines.
5316 /// This will make the debugger working better at -O0, were there is a higher
5317 /// probability having other instructions associated with that line.
5319 /// For IROrder, we keep the smaller of the two
5320 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5321 DebugLoc NLoc = N->getDebugLoc();
5322 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5323 (OLoc.getDebugLoc() != NLoc)) {
5324 N->setDebugLoc(DebugLoc());
5326 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5327 N->setIROrder(Order);
5331 /// MorphNodeTo - This *mutates* the specified node to have the specified
5332 /// return type, opcode, and operands.
5334 /// Note that MorphNodeTo returns the resultant node. If there is already a
5335 /// node of the specified opcode and operands, it returns that node instead of
5336 /// the current one. Note that the SDLoc need not be the same.
5338 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5339 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5340 /// node, and because it doesn't require CSE recalculation for any of
5341 /// the node's users.
5343 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5344 SDVTList VTs, const SDValue *Ops,
5346 // If an identical node already exists, use it.
5348 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5349 FoldingSetNodeID ID;
5350 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5351 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5352 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5355 if (!RemoveNodeFromCSEMaps(N))
5358 // Start the morphing.
5360 N->ValueList = VTs.VTs;
5361 N->NumValues = VTs.NumVTs;
5363 // Clear the operands list, updating used nodes to remove this from their
5364 // use list. Keep track of any operands that become dead as a result.
5365 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5366 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5368 SDNode *Used = Use.getNode();
5370 if (Used->use_empty())
5371 DeadNodeSet.insert(Used);
5374 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5375 // Initialize the memory references information.
5376 MN->setMemRefs(nullptr, nullptr);
5377 // If NumOps is larger than the # of operands we can have in a
5378 // MachineSDNode, reallocate the operand list.
5379 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5380 if (MN->OperandsNeedDelete)
5381 delete[] MN->OperandList;
5382 if (NumOps > array_lengthof(MN->LocalOperands))
5383 // We're creating a final node that will live unmorphed for the
5384 // remainder of the current SelectionDAG iteration, so we can allocate
5385 // the operands directly out of a pool with no recycling metadata.
5386 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5389 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5390 MN->OperandsNeedDelete = false;
5392 MN->InitOperands(MN->OperandList, Ops, NumOps);
5394 // If NumOps is larger than the # of operands we currently have, reallocate
5395 // the operand list.
5396 if (NumOps > N->NumOperands) {
5397 if (N->OperandsNeedDelete)
5398 delete[] N->OperandList;
5399 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5400 N->OperandsNeedDelete = true;
5402 N->InitOperands(N->OperandList, Ops, NumOps);
5405 // Delete any nodes that are still dead after adding the uses for the
5407 if (!DeadNodeSet.empty()) {
5408 SmallVector<SDNode *, 16> DeadNodes;
5409 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5410 E = DeadNodeSet.end(); I != E; ++I)
5411 if ((*I)->use_empty())
5412 DeadNodes.push_back(*I);
5413 RemoveDeadNodes(DeadNodes);
5417 CSEMap.InsertNode(N, IP); // Memoize the new node.
5422 /// getMachineNode - These are used for target selectors to create a new node
5423 /// with specified return type(s), MachineInstr opcode, and operands.
5425 /// Note that getMachineNode returns the resultant node. If there is already a
5426 /// node of the specified opcode and operands, it returns that node instead of
5427 /// the current one.
5429 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5430 SDVTList VTs = getVTList(VT);
5431 return getMachineNode(Opcode, dl, VTs, None);
5435 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5436 SDVTList VTs = getVTList(VT);
5437 SDValue Ops[] = { Op1 };
5438 return getMachineNode(Opcode, dl, VTs, Ops);
5442 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5443 SDValue Op1, SDValue Op2) {
5444 SDVTList VTs = getVTList(VT);
5445 SDValue Ops[] = { Op1, Op2 };
5446 return getMachineNode(Opcode, dl, VTs, Ops);
5450 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5451 SDValue Op1, SDValue Op2, SDValue Op3) {
5452 SDVTList VTs = getVTList(VT);
5453 SDValue Ops[] = { Op1, Op2, Op3 };
5454 return getMachineNode(Opcode, dl, VTs, Ops);
5458 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5459 ArrayRef<SDValue> Ops) {
5460 SDVTList VTs = getVTList(VT);
5461 return getMachineNode(Opcode, dl, VTs, Ops);
5465 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5466 SDVTList VTs = getVTList(VT1, VT2);
5467 return getMachineNode(Opcode, dl, VTs, None);
5471 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5472 EVT VT1, EVT VT2, SDValue Op1) {
5473 SDVTList VTs = getVTList(VT1, VT2);
5474 SDValue Ops[] = { Op1 };
5475 return getMachineNode(Opcode, dl, VTs, Ops);
5479 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5480 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5481 SDVTList VTs = getVTList(VT1, VT2);
5482 SDValue Ops[] = { Op1, Op2 };
5483 return getMachineNode(Opcode, dl, VTs, Ops);
5487 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5488 EVT VT1, EVT VT2, SDValue Op1,
5489 SDValue Op2, SDValue Op3) {
5490 SDVTList VTs = getVTList(VT1, VT2);
5491 SDValue Ops[] = { Op1, Op2, Op3 };
5492 return getMachineNode(Opcode, dl, VTs, Ops);
5496 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5498 ArrayRef<SDValue> Ops) {
5499 SDVTList VTs = getVTList(VT1, VT2);
5500 return getMachineNode(Opcode, dl, VTs, Ops);
5504 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5505 EVT VT1, EVT VT2, EVT VT3,
5506 SDValue Op1, SDValue Op2) {
5507 SDVTList VTs = getVTList(VT1, VT2, VT3);
5508 SDValue Ops[] = { Op1, Op2 };
5509 return getMachineNode(Opcode, dl, VTs, Ops);
5513 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5514 EVT VT1, EVT VT2, EVT VT3,
5515 SDValue Op1, SDValue Op2, SDValue Op3) {
5516 SDVTList VTs = getVTList(VT1, VT2, VT3);
5517 SDValue Ops[] = { Op1, Op2, Op3 };
5518 return getMachineNode(Opcode, dl, VTs, Ops);
5522 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5523 EVT VT1, EVT VT2, EVT VT3,
5524 ArrayRef<SDValue> Ops) {
5525 SDVTList VTs = getVTList(VT1, VT2, VT3);
5526 return getMachineNode(Opcode, dl, VTs, Ops);
5530 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5531 EVT VT2, EVT VT3, EVT VT4,
5532 ArrayRef<SDValue> Ops) {
5533 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5534 return getMachineNode(Opcode, dl, VTs, Ops);
5538 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5539 ArrayRef<EVT> ResultTys,
5540 ArrayRef<SDValue> Ops) {
5541 SDVTList VTs = getVTList(ResultTys);
5542 return getMachineNode(Opcode, dl, VTs, Ops);
5546 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5547 ArrayRef<SDValue> OpsArray) {
5548 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5551 const SDValue *Ops = OpsArray.data();
5552 unsigned NumOps = OpsArray.size();
5555 FoldingSetNodeID ID;
5556 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5558 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5559 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5563 // Allocate a new MachineSDNode.
5564 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5565 DL.getDebugLoc(), VTs);
5567 // Initialize the operands list.
5568 if (NumOps > array_lengthof(N->LocalOperands))
5569 // We're creating a final node that will live unmorphed for the
5570 // remainder of the current SelectionDAG iteration, so we can allocate
5571 // the operands directly out of a pool with no recycling metadata.
5572 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5575 N->InitOperands(N->LocalOperands, Ops, NumOps);
5576 N->OperandsNeedDelete = false;
5579 CSEMap.InsertNode(N, IP);
5581 AllNodes.push_back(N);
5583 VerifyMachineNode(N);
5588 /// getTargetExtractSubreg - A convenience function for creating
5589 /// TargetOpcode::EXTRACT_SUBREG nodes.
5591 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5593 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5594 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5595 VT, Operand, SRIdxVal);
5596 return SDValue(Subreg, 0);
5599 /// getTargetInsertSubreg - A convenience function for creating
5600 /// TargetOpcode::INSERT_SUBREG nodes.
5602 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5603 SDValue Operand, SDValue Subreg) {
5604 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5605 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5606 VT, Operand, Subreg, SRIdxVal);
5607 return SDValue(Result, 0);
5610 /// getNodeIfExists - Get the specified node if it's already available, or
5611 /// else return NULL.
5612 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5613 const SDValue *Ops, unsigned NumOps) {
5614 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5615 FoldingSetNodeID ID;
5616 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5618 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5624 /// getDbgValue - Creates a SDDbgValue node.
5628 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R,
5629 bool IsIndirect, uint64_t Off,
5630 DebugLoc DL, unsigned O) {
5631 return new (Allocator) SDDbgValue(MDPtr, N, R, IsIndirect, Off, DL, O);
5636 SelectionDAG::getConstantDbgValue(MDNode *MDPtr, const Value *C,
5638 DebugLoc DL, unsigned O) {
5639 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5644 SelectionDAG::getFrameIndexDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5645 DebugLoc DL, unsigned O) {
5646 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5651 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5652 /// pointed to by a use iterator is deleted, increment the use iterator
5653 /// so that it doesn't dangle.
5655 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5656 SDNode::use_iterator &UI;
5657 SDNode::use_iterator &UE;
5659 void NodeDeleted(SDNode *N, SDNode *E) override {
5660 // Increment the iterator as needed.
5661 while (UI != UE && N == *UI)
5666 RAUWUpdateListener(SelectionDAG &d,
5667 SDNode::use_iterator &ui,
5668 SDNode::use_iterator &ue)
5669 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5674 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5675 /// This can cause recursive merging of nodes in the DAG.
5677 /// This version assumes From has a single result value.
5679 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5680 SDNode *From = FromN.getNode();
5681 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5682 "Cannot replace with this method!");
5683 assert(From != To.getNode() && "Cannot replace uses of with self");
5685 // Iterate over all the existing uses of From. New uses will be added
5686 // to the beginning of the use list, which we avoid visiting.
5687 // This specifically avoids visiting uses of From that arise while the
5688 // replacement is happening, because any such uses would be the result
5689 // of CSE: If an existing node looks like From after one of its operands
5690 // is replaced by To, we don't want to replace of all its users with To
5691 // too. See PR3018 for more info.
5692 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5693 RAUWUpdateListener Listener(*this, UI, UE);
5697 // This node is about to morph, remove its old self from the CSE maps.
5698 RemoveNodeFromCSEMaps(User);
5700 // A user can appear in a use list multiple times, and when this
5701 // happens the uses are usually next to each other in the list.
5702 // To help reduce the number of CSE recomputations, process all
5703 // the uses of this user that we can find this way.
5705 SDUse &Use = UI.getUse();
5708 } while (UI != UE && *UI == User);
5710 // Now that we have modified User, add it back to the CSE maps. If it
5711 // already exists there, recursively merge the results together.
5712 AddModifiedNodeToCSEMaps(User);
5715 // If we just RAUW'd the root, take note.
5716 if (FromN == getRoot())
5720 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5721 /// This can cause recursive merging of nodes in the DAG.
5723 /// This version assumes that for each value of From, there is a
5724 /// corresponding value in To in the same position with the same type.
5726 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5728 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5729 assert((!From->hasAnyUseOfValue(i) ||
5730 From->getValueType(i) == To->getValueType(i)) &&
5731 "Cannot use this version of ReplaceAllUsesWith!");
5734 // Handle the trivial case.
5738 // Iterate over just the existing users of From. See the comments in
5739 // the ReplaceAllUsesWith above.
5740 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5741 RAUWUpdateListener Listener(*this, UI, UE);
5745 // This node is about to morph, remove its old self from the CSE maps.
5746 RemoveNodeFromCSEMaps(User);
5748 // A user can appear in a use list multiple times, and when this
5749 // happens the uses are usually next to each other in the list.
5750 // To help reduce the number of CSE recomputations, process all
5751 // the uses of this user that we can find this way.
5753 SDUse &Use = UI.getUse();
5756 } while (UI != UE && *UI == User);
5758 // Now that we have modified User, add it back to the CSE maps. If it
5759 // already exists there, recursively merge the results together.
5760 AddModifiedNodeToCSEMaps(User);
5763 // If we just RAUW'd the root, take note.
5764 if (From == getRoot().getNode())
5765 setRoot(SDValue(To, getRoot().getResNo()));
5768 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5769 /// This can cause recursive merging of nodes in the DAG.
5771 /// This version can replace From with any result values. To must match the
5772 /// number and types of values returned by From.
5773 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5774 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5775 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5777 // Iterate over just the existing users of From. See the comments in
5778 // the ReplaceAllUsesWith above.
5779 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5780 RAUWUpdateListener Listener(*this, UI, UE);
5784 // This node is about to morph, remove its old self from the CSE maps.
5785 RemoveNodeFromCSEMaps(User);
5787 // A user can appear in a use list multiple times, and when this
5788 // happens the uses are usually next to each other in the list.
5789 // To help reduce the number of CSE recomputations, process all
5790 // the uses of this user that we can find this way.
5792 SDUse &Use = UI.getUse();
5793 const SDValue &ToOp = To[Use.getResNo()];
5796 } while (UI != UE && *UI == User);
5798 // Now that we have modified User, add it back to the CSE maps. If it
5799 // already exists there, recursively merge the results together.
5800 AddModifiedNodeToCSEMaps(User);
5803 // If we just RAUW'd the root, take note.
5804 if (From == getRoot().getNode())
5805 setRoot(SDValue(To[getRoot().getResNo()]));
5808 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5809 /// uses of other values produced by From.getNode() alone. The Deleted
5810 /// vector is handled the same way as for ReplaceAllUsesWith.
5811 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5812 // Handle the really simple, really trivial case efficiently.
5813 if (From == To) return;
5815 // Handle the simple, trivial, case efficiently.
5816 if (From.getNode()->getNumValues() == 1) {
5817 ReplaceAllUsesWith(From, To);
5821 // Iterate over just the existing users of From. See the comments in
5822 // the ReplaceAllUsesWith above.
5823 SDNode::use_iterator UI = From.getNode()->use_begin(),
5824 UE = From.getNode()->use_end();
5825 RAUWUpdateListener Listener(*this, UI, UE);
5828 bool UserRemovedFromCSEMaps = false;
5830 // A user can appear in a use list multiple times, and when this
5831 // happens the uses are usually next to each other in the list.
5832 // To help reduce the number of CSE recomputations, process all
5833 // the uses of this user that we can find this way.
5835 SDUse &Use = UI.getUse();
5837 // Skip uses of different values from the same node.
5838 if (Use.getResNo() != From.getResNo()) {
5843 // If this node hasn't been modified yet, it's still in the CSE maps,
5844 // so remove its old self from the CSE maps.
5845 if (!UserRemovedFromCSEMaps) {
5846 RemoveNodeFromCSEMaps(User);
5847 UserRemovedFromCSEMaps = true;
5852 } while (UI != UE && *UI == User);
5854 // We are iterating over all uses of the From node, so if a use
5855 // doesn't use the specific value, no changes are made.
5856 if (!UserRemovedFromCSEMaps)
5859 // Now that we have modified User, add it back to the CSE maps. If it
5860 // already exists there, recursively merge the results together.
5861 AddModifiedNodeToCSEMaps(User);
5864 // If we just RAUW'd the root, take note.
5865 if (From == getRoot())
5870 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5871 /// to record information about a use.
5878 /// operator< - Sort Memos by User.
5879 bool operator<(const UseMemo &L, const UseMemo &R) {
5880 return (intptr_t)L.User < (intptr_t)R.User;
5884 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5885 /// uses of other values produced by From.getNode() alone. The same value
5886 /// may appear in both the From and To list. The Deleted vector is
5887 /// handled the same way as for ReplaceAllUsesWith.
5888 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5891 // Handle the simple, trivial case efficiently.
5893 return ReplaceAllUsesOfValueWith(*From, *To);
5895 // Read up all the uses and make records of them. This helps
5896 // processing new uses that are introduced during the
5897 // replacement process.
5898 SmallVector<UseMemo, 4> Uses;
5899 for (unsigned i = 0; i != Num; ++i) {
5900 unsigned FromResNo = From[i].getResNo();
5901 SDNode *FromNode = From[i].getNode();
5902 for (SDNode::use_iterator UI = FromNode->use_begin(),
5903 E = FromNode->use_end(); UI != E; ++UI) {
5904 SDUse &Use = UI.getUse();
5905 if (Use.getResNo() == FromResNo) {
5906 UseMemo Memo = { *UI, i, &Use };
5907 Uses.push_back(Memo);
5912 // Sort the uses, so that all the uses from a given User are together.
5913 std::sort(Uses.begin(), Uses.end());
5915 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5916 UseIndex != UseIndexEnd; ) {
5917 // We know that this user uses some value of From. If it is the right
5918 // value, update it.
5919 SDNode *User = Uses[UseIndex].User;
5921 // This node is about to morph, remove its old self from the CSE maps.
5922 RemoveNodeFromCSEMaps(User);
5924 // The Uses array is sorted, so all the uses for a given User
5925 // are next to each other in the list.
5926 // To help reduce the number of CSE recomputations, process all
5927 // the uses of this user that we can find this way.
5929 unsigned i = Uses[UseIndex].Index;
5930 SDUse &Use = *Uses[UseIndex].Use;
5934 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5936 // Now that we have modified User, add it back to the CSE maps. If it
5937 // already exists there, recursively merge the results together.
5938 AddModifiedNodeToCSEMaps(User);
5942 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5943 /// based on their topological order. It returns the maximum id and a vector
5944 /// of the SDNodes* in assigned order by reference.
5945 unsigned SelectionDAG::AssignTopologicalOrder() {
5947 unsigned DAGSize = 0;
5949 // SortedPos tracks the progress of the algorithm. Nodes before it are
5950 // sorted, nodes after it are unsorted. When the algorithm completes
5951 // it is at the end of the list.
5952 allnodes_iterator SortedPos = allnodes_begin();
5954 // Visit all the nodes. Move nodes with no operands to the front of
5955 // the list immediately. Annotate nodes that do have operands with their
5956 // operand count. Before we do this, the Node Id fields of the nodes
5957 // may contain arbitrary values. After, the Node Id fields for nodes
5958 // before SortedPos will contain the topological sort index, and the
5959 // Node Id fields for nodes At SortedPos and after will contain the
5960 // count of outstanding operands.
5961 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5964 unsigned Degree = N->getNumOperands();
5966 // A node with no uses, add it to the result array immediately.
5967 N->setNodeId(DAGSize++);
5968 allnodes_iterator Q = N;
5970 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5971 assert(SortedPos != AllNodes.end() && "Overran node list");
5974 // Temporarily use the Node Id as scratch space for the degree count.
5975 N->setNodeId(Degree);
5979 // Visit all the nodes. As we iterate, move nodes into sorted order,
5980 // such that by the time the end is reached all nodes will be sorted.
5981 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5984 // N is in sorted position, so all its uses have one less operand
5985 // that needs to be sorted.
5986 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5989 unsigned Degree = P->getNodeId();
5990 assert(Degree != 0 && "Invalid node degree");
5993 // All of P's operands are sorted, so P may sorted now.
5994 P->setNodeId(DAGSize++);
5996 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5997 assert(SortedPos != AllNodes.end() && "Overran node list");
6000 // Update P's outstanding operand count.
6001 P->setNodeId(Degree);
6004 if (I == SortedPos) {
6007 dbgs() << "Overran sorted position:\n";
6010 llvm_unreachable(nullptr);
6014 assert(SortedPos == AllNodes.end() &&
6015 "Topological sort incomplete!");
6016 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6017 "First node in topological sort is not the entry token!");
6018 assert(AllNodes.front().getNodeId() == 0 &&
6019 "First node in topological sort has non-zero id!");
6020 assert(AllNodes.front().getNumOperands() == 0 &&
6021 "First node in topological sort has operands!");
6022 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6023 "Last node in topologic sort has unexpected id!");
6024 assert(AllNodes.back().use_empty() &&
6025 "Last node in topologic sort has users!");
6026 assert(DAGSize == allnodes_size() && "Node count mismatch!");
6030 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6031 /// value is produced by SD.
6032 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6033 DbgInfo->add(DB, SD, isParameter);
6035 SD->setHasDebugValue(true);
6038 /// TransferDbgValues - Transfer SDDbgValues.
6039 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6040 if (From == To || !From.getNode()->getHasDebugValue())
6042 SDNode *FromNode = From.getNode();
6043 SDNode *ToNode = To.getNode();
6044 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6045 SmallVector<SDDbgValue *, 2> ClonedDVs;
6046 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6048 SDDbgValue *Dbg = *I;
6049 if (Dbg->getKind() == SDDbgValue::SDNODE) {
6050 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
6052 Dbg->getOffset(), Dbg->getDebugLoc(),
6054 ClonedDVs.push_back(Clone);
6057 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6058 E = ClonedDVs.end(); I != E; ++I)
6059 AddDbgValue(*I, ToNode, false);
6062 //===----------------------------------------------------------------------===//
6064 //===----------------------------------------------------------------------===//
6066 HandleSDNode::~HandleSDNode() {
6070 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6071 DebugLoc DL, const GlobalValue *GA,
6072 EVT VT, int64_t o, unsigned char TF)
6073 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6077 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6078 SDValue X, unsigned SrcAS,
6080 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6081 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6083 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6084 EVT memvt, MachineMemOperand *mmo)
6085 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6086 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6087 MMO->isNonTemporal(), MMO->isInvariant());
6088 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6089 assert(isNonTemporal() == MMO->isNonTemporal() &&
6090 "Non-temporal encoding error!");
6091 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6094 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6095 ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
6096 : SDNode(Opc, Order, dl, VTs, Ops),
6097 MemoryVT(memvt), MMO(mmo) {
6098 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6099 MMO->isNonTemporal(), MMO->isInvariant());
6100 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6101 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6104 /// Profile - Gather unique data for the node.
6106 void SDNode::Profile(FoldingSetNodeID &ID) const {
6107 AddNodeIDNode(ID, this);
6112 std::vector<EVT> VTs;
6115 VTs.reserve(MVT::LAST_VALUETYPE);
6116 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6117 VTs.push_back(MVT((MVT::SimpleValueType)i));
6122 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6123 static ManagedStatic<EVTArray> SimpleVTArray;
6124 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6126 /// getValueTypeList - Return a pointer to the specified value type.
6128 const EVT *SDNode::getValueTypeList(EVT VT) {
6129 if (VT.isExtended()) {
6130 sys::SmartScopedLock<true> Lock(*VTMutex);
6131 return &(*EVTs->insert(VT).first);
6133 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6134 "Value type out of range!");
6135 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6139 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6140 /// indicated value. This method ignores uses of other values defined by this
6142 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6143 assert(Value < getNumValues() && "Bad value!");
6145 // TODO: Only iterate over uses of a given value of the node
6146 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6147 if (UI.getUse().getResNo() == Value) {
6154 // Found exactly the right number of uses?
6159 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6160 /// value. This method ignores uses of other values defined by this operation.
6161 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6162 assert(Value < getNumValues() && "Bad value!");
6164 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6165 if (UI.getUse().getResNo() == Value)
6172 /// isOnlyUserOf - Return true if this node is the only use of N.
6174 bool SDNode::isOnlyUserOf(SDNode *N) const {
6176 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6187 /// isOperand - Return true if this node is an operand of N.
6189 bool SDValue::isOperandOf(SDNode *N) const {
6190 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6191 if (*this == N->getOperand(i))
6196 bool SDNode::isOperandOf(SDNode *N) const {
6197 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6198 if (this == N->OperandList[i].getNode())
6203 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6204 /// be a chain) reaches the specified operand without crossing any
6205 /// side-effecting instructions on any chain path. In practice, this looks
6206 /// through token factors and non-volatile loads. In order to remain efficient,
6207 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6208 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6209 unsigned Depth) const {
6210 if (*this == Dest) return true;
6212 // Don't search too deeply, we just want to be able to see through
6213 // TokenFactor's etc.
6214 if (Depth == 0) return false;
6216 // If this is a token factor, all inputs to the TF happen in parallel. If any
6217 // of the operands of the TF does not reach dest, then we cannot do the xform.
6218 if (getOpcode() == ISD::TokenFactor) {
6219 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6220 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6225 // Loads don't have side effects, look through them.
6226 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6227 if (!Ld->isVolatile())
6228 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6233 /// hasPredecessor - Return true if N is a predecessor of this node.
6234 /// N is either an operand of this node, or can be reached by recursively
6235 /// traversing up the operands.
6236 /// NOTE: This is an expensive method. Use it carefully.
6237 bool SDNode::hasPredecessor(const SDNode *N) const {
6238 SmallPtrSet<const SDNode *, 32> Visited;
6239 SmallVector<const SDNode *, 16> Worklist;
6240 return hasPredecessorHelper(N, Visited, Worklist);
6244 SDNode::hasPredecessorHelper(const SDNode *N,
6245 SmallPtrSet<const SDNode *, 32> &Visited,
6246 SmallVectorImpl<const SDNode *> &Worklist) const {
6247 if (Visited.empty()) {
6248 Worklist.push_back(this);
6250 // Take a look in the visited set. If we've already encountered this node
6251 // we needn't search further.
6252 if (Visited.count(N))
6256 // Haven't visited N yet. Continue the search.
6257 while (!Worklist.empty()) {
6258 const SDNode *M = Worklist.pop_back_val();
6259 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6260 SDNode *Op = M->getOperand(i).getNode();
6261 if (Visited.insert(Op))
6262 Worklist.push_back(Op);
6271 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6272 assert(Num < NumOperands && "Invalid child # of SDNode!");
6273 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6276 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6277 assert(N->getNumValues() == 1 &&
6278 "Can't unroll a vector with multiple results!");
6280 EVT VT = N->getValueType(0);
6281 unsigned NE = VT.getVectorNumElements();
6282 EVT EltVT = VT.getVectorElementType();
6285 SmallVector<SDValue, 8> Scalars;
6286 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6288 // If ResNE is 0, fully unroll the vector op.
6291 else if (NE > ResNE)
6295 for (i= 0; i != NE; ++i) {
6296 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6297 SDValue Operand = N->getOperand(j);
6298 EVT OperandVT = Operand.getValueType();
6299 if (OperandVT.isVector()) {
6300 // A vector operand; extract a single element.
6301 const TargetLowering *TLI = TM.getTargetLowering();
6302 EVT OperandEltVT = OperandVT.getVectorElementType();
6303 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6306 getConstant(i, TLI->getVectorIdxTy()));
6308 // A scalar operand; just use it as is.
6309 Operands[j] = Operand;
6313 switch (N->getOpcode()) {
6315 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
6318 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
6325 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6326 getShiftAmountOperand(Operands[0].getValueType(),
6329 case ISD::SIGN_EXTEND_INREG:
6330 case ISD::FP_ROUND_INREG: {
6331 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6332 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6334 getValueType(ExtVT)));
6339 for (; i < ResNE; ++i)
6340 Scalars.push_back(getUNDEF(EltVT));
6342 return getNode(ISD::BUILD_VECTOR, dl,
6343 EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
6347 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6348 /// location that is 'Dist' units away from the location that the 'Base' load
6349 /// is loading from.
6350 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6351 unsigned Bytes, int Dist) const {
6352 if (LD->getChain() != Base->getChain())
6354 EVT VT = LD->getValueType(0);
6355 if (VT.getSizeInBits() / 8 != Bytes)
6358 SDValue Loc = LD->getOperand(1);
6359 SDValue BaseLoc = Base->getOperand(1);
6360 if (Loc.getOpcode() == ISD::FrameIndex) {
6361 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6363 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6364 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6365 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6366 int FS = MFI->getObjectSize(FI);
6367 int BFS = MFI->getObjectSize(BFI);
6368 if (FS != BFS || FS != (int)Bytes) return false;
6369 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6373 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6374 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6377 const GlobalValue *GV1 = nullptr;
6378 const GlobalValue *GV2 = nullptr;
6379 int64_t Offset1 = 0;
6380 int64_t Offset2 = 0;
6381 const TargetLowering *TLI = TM.getTargetLowering();
6382 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6383 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6384 if (isGA1 && isGA2 && GV1 == GV2)
6385 return Offset1 == (Offset2 + Dist*Bytes);
6390 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6391 /// it cannot be inferred.
6392 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6393 // If this is a GlobalAddress + cst, return the alignment.
6394 const GlobalValue *GV;
6395 int64_t GVOffset = 0;
6396 const TargetLowering *TLI = TM.getTargetLowering();
6397 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6398 unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
6399 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6400 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6401 TLI->getDataLayout());
6402 unsigned AlignBits = KnownZero.countTrailingOnes();
6403 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6405 return MinAlign(Align, GVOffset);
6408 // If this is a direct reference to a stack slot, use information about the
6409 // stack slot's alignment.
6410 int FrameIdx = 1 << 31;
6411 int64_t FrameOffset = 0;
6412 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6413 FrameIdx = FI->getIndex();
6414 } else if (isBaseWithConstantOffset(Ptr) &&
6415 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6417 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6418 FrameOffset = Ptr.getConstantOperandVal(1);
6421 if (FrameIdx != (1 << 31)) {
6422 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6423 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6431 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
6432 /// which is split (or expanded) into two not necessarily identical pieces.
6433 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
6434 // Currently all types are split in half.
6436 if (!VT.isVector()) {
6437 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
6439 unsigned NumElements = VT.getVectorNumElements();
6440 assert(!(NumElements & 1) && "Splitting vector, but not in half!");
6441 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
6444 return std::make_pair(LoVT, HiVT);
6447 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
6449 std::pair<SDValue, SDValue>
6450 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
6452 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
6453 N.getValueType().getVectorNumElements() &&
6454 "More vector elements requested than available!");
6456 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
6457 getConstant(0, TLI->getVectorIdxTy()));
6458 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
6459 getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy()));
6460 return std::make_pair(Lo, Hi);
6463 void SelectionDAG::ExtractVectorElements(SDValue Op,
6464 SmallVectorImpl<SDValue> &Args,
6465 unsigned Start, unsigned Count) {
6466 EVT VT = Op.getValueType();
6468 Count = VT.getVectorNumElements();
6470 EVT EltVT = VT.getVectorElementType();
6471 EVT IdxTy = TLI->getVectorIdxTy();
6473 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
6474 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6475 Op, getConstant(i, IdxTy)));
6479 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6480 unsigned GlobalAddressSDNode::getAddressSpace() const {
6481 return getGlobal()->getType()->getAddressSpace();
6485 Type *ConstantPoolSDNode::getType() const {
6486 if (isMachineConstantPoolEntry())
6487 return Val.MachineCPVal->getType();
6488 return Val.ConstVal->getType();
6491 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6493 unsigned &SplatBitSize,
6495 unsigned MinSplatBits,
6496 bool isBigEndian) const {
6497 EVT VT = getValueType(0);
6498 assert(VT.isVector() && "Expected a vector type");
6499 unsigned sz = VT.getSizeInBits();
6500 if (MinSplatBits > sz)
6503 SplatValue = APInt(sz, 0);
6504 SplatUndef = APInt(sz, 0);
6506 // Get the bits. Bits with undefined values (when the corresponding element
6507 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6508 // in SplatValue. If any of the values are not constant, give up and return
6510 unsigned int nOps = getNumOperands();
6511 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6512 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6514 for (unsigned j = 0; j < nOps; ++j) {
6515 unsigned i = isBigEndian ? nOps-1-j : j;
6516 SDValue OpVal = getOperand(i);
6517 unsigned BitPos = j * EltBitSize;
6519 if (OpVal.getOpcode() == ISD::UNDEF)
6520 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6521 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6522 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6523 zextOrTrunc(sz) << BitPos;
6524 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6525 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6530 // The build_vector is all constants or undefs. Find the smallest element
6531 // size that splats the vector.
6533 HasAnyUndefs = (SplatUndef != 0);
6536 unsigned HalfSize = sz / 2;
6537 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6538 APInt LowValue = SplatValue.trunc(HalfSize);
6539 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6540 APInt LowUndef = SplatUndef.trunc(HalfSize);
6542 // If the two halves do not match (ignoring undef bits), stop here.
6543 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6544 MinSplatBits > HalfSize)
6547 SplatValue = HighValue | LowValue;
6548 SplatUndef = HighUndef & LowUndef;
6557 ConstantSDNode *BuildVectorSDNode::getConstantSplatValue() const {
6558 SDValue Op0 = getOperand(0);
6559 if (Op0.getOpcode() != ISD::Constant)
6562 for (unsigned i = 1, e = getNumOperands(); i != e; ++i)
6563 if (getOperand(i) != Op0)
6566 return cast<ConstantSDNode>(Op0);
6569 bool BuildVectorSDNode::isConstant() const {
6570 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6571 unsigned Opc = getOperand(i).getOpcode();
6572 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
6578 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6579 // Find the first non-undef value in the shuffle mask.
6581 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6584 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6586 // Make sure all remaining elements are either undef or the same as the first
6588 for (int Idx = Mask[i]; i != e; ++i)
6589 if (Mask[i] >= 0 && Mask[i] != Idx)
6595 static void checkForCyclesHelper(const SDNode *N,
6596 SmallPtrSet<const SDNode*, 32> &Visited,
6597 SmallPtrSet<const SDNode*, 32> &Checked) {
6598 // If this node has already been checked, don't check it again.
6599 if (Checked.count(N))
6602 // If a node has already been visited on this depth-first walk, reject it as
6604 if (!Visited.insert(N)) {
6605 dbgs() << "Offending node:\n";
6607 errs() << "Detected cycle in SelectionDAG\n";
6611 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6612 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6619 void llvm::checkForCycles(const llvm::SDNode *N) {
6621 assert(N && "Checking nonexistent SDNode");
6622 SmallPtrSet<const SDNode*, 32> visited;
6623 SmallPtrSet<const SDNode*, 32> checked;
6624 checkForCyclesHelper(N, visited, checked);
6628 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6629 checkForCycles(DAG->getRoot().getNode());