1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Assembly/Writer.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/DebugInfo.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/ManagedStatic.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/Mutex.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetIntrinsicInfo.h"
46 #include "llvm/Target/TargetLowering.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Target/TargetRegisterInfo.h"
50 #include "llvm/Target/TargetSelectionDAGInfo.h"
55 /// makeVTList - Return an instance of the SDVTList struct initialized with the
56 /// specified members.
57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
58 SDVTList Res = {VTs, NumVTs};
62 // Default null implementations of the callbacks.
63 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
64 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
66 //===----------------------------------------------------------------------===//
67 // ConstantFPSDNode Class
68 //===----------------------------------------------------------------------===//
70 /// isExactlyValue - We don't rely on operator== working on double values, as
71 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
72 /// As such, this method can be used to do an exact bit-for-bit comparison of
73 /// two floating point values.
74 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
75 return getValueAPF().bitwiseIsEqual(V);
78 bool ConstantFPSDNode::isValueValidForType(EVT VT,
80 assert(VT.isFloatingPoint() && "Can only convert between FP types");
82 // convert modifies in place, so make a copy.
83 APFloat Val2 = APFloat(Val);
85 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
86 APFloat::rmNearestTiesToEven,
91 //===----------------------------------------------------------------------===//
93 //===----------------------------------------------------------------------===//
95 /// isBuildVectorAllOnes - Return true if the specified node is a
96 /// BUILD_VECTOR where all of the elements are ~0 or undef.
97 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
98 // Look through a bit convert.
99 if (N->getOpcode() == ISD::BITCAST)
100 N = N->getOperand(0).getNode();
102 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
104 unsigned i = 0, e = N->getNumOperands();
106 // Skip over all of the undef values.
107 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
110 // Do not accept an all-undef vector.
111 if (i == e) return false;
113 // Do not accept build_vectors that aren't all constants or which have non-~0
114 // elements. We have to be a bit careful here, as the type of the constant
115 // may not be the same as the type of the vector elements due to type
116 // legalization (the elements are promoted to a legal type for the target and
117 // a vector of a type may be legal when the base element type is not).
118 // We only want to check enough bits to cover the vector elements, because
119 // we care if the resultant vector is all ones, not whether the individual
121 SDValue NotZero = N->getOperand(i);
122 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
123 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
124 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
126 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
127 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
132 // Okay, we have at least one ~0 value, check to see if the rest match or are
133 // undefs. Even with the above element type twiddling, this should be OK, as
134 // the same type legalization should have applied to all the elements.
135 for (++i; i != e; ++i)
136 if (N->getOperand(i) != NotZero &&
137 N->getOperand(i).getOpcode() != ISD::UNDEF)
143 /// isBuildVectorAllZeros - Return true if the specified node is a
144 /// BUILD_VECTOR where all of the elements are 0 or undef.
145 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
146 // Look through a bit convert.
147 if (N->getOpcode() == ISD::BITCAST)
148 N = N->getOperand(0).getNode();
150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
152 unsigned i = 0, e = N->getNumOperands();
154 // Skip over all of the undef values.
155 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
158 // Do not accept an all-undef vector.
159 if (i == e) return false;
161 // Do not accept build_vectors that aren't all constants or which have non-0
163 SDValue Zero = N->getOperand(i);
164 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
165 if (!CN->isNullValue())
167 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
168 if (!CFPN->getValueAPF().isPosZero())
173 // Okay, we have at least one 0 value, check to see if the rest match or are
175 for (++i; i != e; ++i)
176 if (N->getOperand(i) != Zero &&
177 N->getOperand(i).getOpcode() != ISD::UNDEF)
182 /// isScalarToVector - Return true if the specified node is a
183 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
184 /// element is not an undef.
185 bool ISD::isScalarToVector(const SDNode *N) {
186 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
189 if (N->getOpcode() != ISD::BUILD_VECTOR)
191 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
193 unsigned NumElems = N->getNumOperands();
196 for (unsigned i = 1; i < NumElems; ++i) {
197 SDValue V = N->getOperand(i);
198 if (V.getOpcode() != ISD::UNDEF)
204 /// allOperandsUndef - Return true if the node has at least one operand
205 /// and all operands of the specified node are ISD::UNDEF.
206 bool ISD::allOperandsUndef(const SDNode *N) {
207 // Return false if the node has no operands.
208 // This is "logically inconsistent" with the definition of "all" but
209 // is probably the desired behavior.
210 if (N->getNumOperands() == 0)
213 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
214 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
220 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
221 /// when given the operation for (X op Y).
222 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
223 // To perform this operation, we just need to swap the L and G bits of the
225 unsigned OldL = (Operation >> 2) & 1;
226 unsigned OldG = (Operation >> 1) & 1;
227 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
228 (OldL << 1) | // New G bit
229 (OldG << 2)); // New L bit.
232 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
233 /// 'op' is a valid SetCC operation.
234 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
235 unsigned Operation = Op;
237 Operation ^= 7; // Flip L, G, E bits, but not U.
239 Operation ^= 15; // Flip all of the condition bits.
241 if (Operation > ISD::SETTRUE2)
242 Operation &= ~8; // Don't let N and U bits get set.
244 return ISD::CondCode(Operation);
248 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
249 /// signed operation and 2 if the result is an unsigned comparison. Return zero
250 /// if the operation does not depend on the sign of the input (setne and seteq).
251 static int isSignedOp(ISD::CondCode Opcode) {
253 default: llvm_unreachable("Illegal integer setcc operation!");
255 case ISD::SETNE: return 0;
259 case ISD::SETGE: return 1;
263 case ISD::SETUGE: return 2;
267 /// getSetCCOrOperation - Return the result of a logical OR between different
268 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
269 /// returns SETCC_INVALID if it is not possible to represent the resultant
271 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
273 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
274 // Cannot fold a signed integer setcc with an unsigned integer setcc.
275 return ISD::SETCC_INVALID;
277 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
279 // If the N and U bits get set then the resultant comparison DOES suddenly
280 // care about orderedness, and is true when ordered.
281 if (Op > ISD::SETTRUE2)
282 Op &= ~16; // Clear the U bit if the N bit is set.
284 // Canonicalize illegal integer setcc's.
285 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
288 return ISD::CondCode(Op);
291 /// getSetCCAndOperation - Return the result of a logical AND between different
292 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
293 /// function returns zero if it is not possible to represent the resultant
295 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
297 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
298 // Cannot fold a signed setcc with an unsigned setcc.
299 return ISD::SETCC_INVALID;
301 // Combine all of the condition bits.
302 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
304 // Canonicalize illegal integer setcc's.
308 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
309 case ISD::SETOEQ: // SETEQ & SETU[LG]E
310 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
311 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
312 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
319 //===----------------------------------------------------------------------===//
320 // SDNode Profile Support
321 //===----------------------------------------------------------------------===//
323 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
325 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
329 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
330 /// solely with their pointer.
331 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
332 ID.AddPointer(VTList.VTs);
335 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
337 static void AddNodeIDOperands(FoldingSetNodeID &ID,
338 const SDValue *Ops, unsigned NumOps) {
339 for (; NumOps; --NumOps, ++Ops) {
340 ID.AddPointer(Ops->getNode());
341 ID.AddInteger(Ops->getResNo());
345 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
347 static void AddNodeIDOperands(FoldingSetNodeID &ID,
348 const SDUse *Ops, unsigned NumOps) {
349 for (; NumOps; --NumOps, ++Ops) {
350 ID.AddPointer(Ops->getNode());
351 ID.AddInteger(Ops->getResNo());
355 static void AddNodeIDNode(FoldingSetNodeID &ID,
356 unsigned short OpC, SDVTList VTList,
357 const SDValue *OpList, unsigned N) {
358 AddNodeIDOpcode(ID, OpC);
359 AddNodeIDValueTypes(ID, VTList);
360 AddNodeIDOperands(ID, OpList, N);
363 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
365 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
366 switch (N->getOpcode()) {
367 case ISD::TargetExternalSymbol:
368 case ISD::ExternalSymbol:
369 llvm_unreachable("Should only be used on nodes with operands");
370 default: break; // Normal nodes don't need extra info.
371 case ISD::TargetConstant:
373 ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
375 case ISD::TargetConstantFP:
376 case ISD::ConstantFP: {
377 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
380 case ISD::TargetGlobalAddress:
381 case ISD::GlobalAddress:
382 case ISD::TargetGlobalTLSAddress:
383 case ISD::GlobalTLSAddress: {
384 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
385 ID.AddPointer(GA->getGlobal());
386 ID.AddInteger(GA->getOffset());
387 ID.AddInteger(GA->getTargetFlags());
388 ID.AddInteger(GA->getAddressSpace());
391 case ISD::BasicBlock:
392 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
395 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
397 case ISD::RegisterMask:
398 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
401 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
403 case ISD::FrameIndex:
404 case ISD::TargetFrameIndex:
405 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
408 case ISD::TargetJumpTable:
409 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
410 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
412 case ISD::ConstantPool:
413 case ISD::TargetConstantPool: {
414 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
415 ID.AddInteger(CP->getAlignment());
416 ID.AddInteger(CP->getOffset());
417 if (CP->isMachineConstantPoolEntry())
418 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
420 ID.AddPointer(CP->getConstVal());
421 ID.AddInteger(CP->getTargetFlags());
424 case ISD::TargetIndex: {
425 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
426 ID.AddInteger(TI->getIndex());
427 ID.AddInteger(TI->getOffset());
428 ID.AddInteger(TI->getTargetFlags());
432 const LoadSDNode *LD = cast<LoadSDNode>(N);
433 ID.AddInteger(LD->getMemoryVT().getRawBits());
434 ID.AddInteger(LD->getRawSubclassData());
435 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
439 const StoreSDNode *ST = cast<StoreSDNode>(N);
440 ID.AddInteger(ST->getMemoryVT().getRawBits());
441 ID.AddInteger(ST->getRawSubclassData());
442 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
445 case ISD::ATOMIC_CMP_SWAP:
446 case ISD::ATOMIC_SWAP:
447 case ISD::ATOMIC_LOAD_ADD:
448 case ISD::ATOMIC_LOAD_SUB:
449 case ISD::ATOMIC_LOAD_AND:
450 case ISD::ATOMIC_LOAD_OR:
451 case ISD::ATOMIC_LOAD_XOR:
452 case ISD::ATOMIC_LOAD_NAND:
453 case ISD::ATOMIC_LOAD_MIN:
454 case ISD::ATOMIC_LOAD_MAX:
455 case ISD::ATOMIC_LOAD_UMIN:
456 case ISD::ATOMIC_LOAD_UMAX:
457 case ISD::ATOMIC_LOAD:
458 case ISD::ATOMIC_STORE: {
459 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
460 ID.AddInteger(AT->getMemoryVT().getRawBits());
461 ID.AddInteger(AT->getRawSubclassData());
462 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
465 case ISD::PREFETCH: {
466 const MemSDNode *PF = cast<MemSDNode>(N);
467 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
470 case ISD::VECTOR_SHUFFLE: {
471 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
472 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
474 ID.AddInteger(SVN->getMaskElt(i));
477 case ISD::TargetBlockAddress:
478 case ISD::BlockAddress: {
479 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
480 ID.AddPointer(BA->getBlockAddress());
481 ID.AddInteger(BA->getOffset());
482 ID.AddInteger(BA->getTargetFlags());
485 } // end switch (N->getOpcode())
487 // Target specific memory nodes could also have address spaces to check.
488 if (N->isTargetMemoryOpcode())
489 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
492 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
494 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
495 AddNodeIDOpcode(ID, N->getOpcode());
496 // Add the return value info.
497 AddNodeIDValueTypes(ID, N->getVTList());
498 // Add the operand info.
499 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
501 // Handle SDNode leafs with special info.
502 AddNodeIDCustom(ID, N);
505 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
506 /// the CSE map that carries volatility, temporalness, indexing mode, and
507 /// extension/truncation information.
509 static inline unsigned
510 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
511 bool isNonTemporal, bool isInvariant) {
512 assert((ConvType & 3) == ConvType &&
513 "ConvType may not require more than 2 bits!");
514 assert((AM & 7) == AM &&
515 "AM may not require more than 3 bits!");
519 (isNonTemporal << 6) |
523 //===----------------------------------------------------------------------===//
524 // SelectionDAG Class
525 //===----------------------------------------------------------------------===//
527 /// doNotCSE - Return true if CSE should not be performed for this node.
528 static bool doNotCSE(SDNode *N) {
529 if (N->getValueType(0) == MVT::Glue)
530 return true; // Never CSE anything that produces a flag.
532 switch (N->getOpcode()) {
534 case ISD::HANDLENODE:
536 return true; // Never CSE these nodes.
539 // Check that remaining values produced are not flags.
540 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
541 if (N->getValueType(i) == MVT::Glue)
542 return true; // Never CSE anything that produces a flag.
547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
549 void SelectionDAG::RemoveDeadNodes() {
550 // Create a dummy node (which is not added to allnodes), that adds a reference
551 // to the root node, preventing it from being deleted.
552 HandleSDNode Dummy(getRoot());
554 SmallVector<SDNode*, 128> DeadNodes;
556 // Add all obviously-dead nodes to the DeadNodes worklist.
557 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
559 DeadNodes.push_back(I);
561 RemoveDeadNodes(DeadNodes);
563 // If the root changed (e.g. it was a dead load, update the root).
564 setRoot(Dummy.getValue());
567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
568 /// given list, and any nodes that become unreachable as a result.
569 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
571 // Process the worklist, deleting the nodes and adding their uses to the
573 while (!DeadNodes.empty()) {
574 SDNode *N = DeadNodes.pop_back_val();
576 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
577 DUL->NodeDeleted(N, 0);
579 // Take the node out of the appropriate CSE map.
580 RemoveNodeFromCSEMaps(N);
582 // Next, brutally remove the operand list. This is safe to do, as there are
583 // no cycles in the graph.
584 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
586 SDNode *Operand = Use.getNode();
589 // Now that we removed this operand, see if there are no uses of it left.
590 if (Operand->use_empty())
591 DeadNodes.push_back(Operand);
598 void SelectionDAG::RemoveDeadNode(SDNode *N){
599 SmallVector<SDNode*, 16> DeadNodes(1, N);
601 // Create a dummy node that adds a reference to the root node, preventing
602 // it from being deleted. (This matters if the root is an operand of the
604 HandleSDNode Dummy(getRoot());
606 RemoveDeadNodes(DeadNodes);
609 void SelectionDAG::DeleteNode(SDNode *N) {
610 // First take this out of the appropriate CSE map.
611 RemoveNodeFromCSEMaps(N);
613 // Finally, remove uses due to operands of this node, remove from the
614 // AllNodes list, and delete the node.
615 DeleteNodeNotInCSEMaps(N);
618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
619 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
620 assert(N->use_empty() && "Cannot delete a node that is not dead!");
622 // Drop all of the operands and decrement used node's use counts.
628 void SelectionDAG::DeallocateNode(SDNode *N) {
629 if (N->OperandsNeedDelete)
630 delete[] N->OperandList;
632 // Set the opcode to DELETED_NODE to help catch bugs when node
633 // memory is reallocated.
634 N->NodeType = ISD::DELETED_NODE;
636 NodeAllocator.Deallocate(AllNodes.remove(N));
638 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
639 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
640 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
641 DbgVals[i]->setIsInvalidated();
644 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
645 /// correspond to it. This is useful when we're about to delete or repurpose
646 /// the node. We don't want future request for structurally identical nodes
647 /// to return N anymore.
648 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
650 switch (N->getOpcode()) {
651 case ISD::HANDLENODE: return false; // noop.
653 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
654 "Cond code doesn't exist!");
655 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
656 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
658 case ISD::ExternalSymbol:
659 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
661 case ISD::TargetExternalSymbol: {
662 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
663 Erased = TargetExternalSymbols.erase(
664 std::pair<std::string,unsigned char>(ESN->getSymbol(),
665 ESN->getTargetFlags()));
668 case ISD::VALUETYPE: {
669 EVT VT = cast<VTSDNode>(N)->getVT();
670 if (VT.isExtended()) {
671 Erased = ExtendedValueTypeNodes.erase(VT);
673 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
674 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
679 // Remove it from the CSE Map.
680 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
681 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
682 Erased = CSEMap.RemoveNode(N);
686 // Verify that the node was actually in one of the CSE maps, unless it has a
687 // flag result (which cannot be CSE'd) or is one of the special cases that are
688 // not subject to CSE.
689 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
690 !N->isMachineOpcode() && !doNotCSE(N)) {
693 llvm_unreachable("Node is not in map!");
699 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
700 /// maps and modified in place. Add it back to the CSE maps, unless an identical
701 /// node already exists, in which case transfer all its users to the existing
702 /// node. This transfer can potentially trigger recursive merging.
705 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
706 // For node types that aren't CSE'd, just act as if no identical node
709 SDNode *Existing = CSEMap.GetOrInsertNode(N);
711 // If there was already an existing matching node, use ReplaceAllUsesWith
712 // to replace the dead one with the existing one. This can cause
713 // recursive merging of other unrelated nodes down the line.
714 ReplaceAllUsesWith(N, Existing);
716 // N is now dead. Inform the listeners and delete it.
717 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
718 DUL->NodeDeleted(N, Existing);
719 DeleteNodeNotInCSEMaps(N);
724 // If the node doesn't already exist, we updated it. Inform listeners.
725 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
729 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
730 /// were replaced with those specified. If this node is never memoized,
731 /// return null, otherwise return a pointer to the slot it would take. If a
732 /// node already exists with these operands, the slot will be non-null.
733 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
738 SDValue Ops[] = { Op };
740 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
741 AddNodeIDCustom(ID, N);
742 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
746 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
747 /// were replaced with those specified. If this node is never memoized,
748 /// return null, otherwise return a pointer to the slot it would take. If a
749 /// node already exists with these operands, the slot will be non-null.
750 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
751 SDValue Op1, SDValue Op2,
756 SDValue Ops[] = { Op1, Op2 };
758 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
759 AddNodeIDCustom(ID, N);
760 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
765 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
766 /// were replaced with those specified. If this node is never memoized,
767 /// return null, otherwise return a pointer to the slot it would take. If a
768 /// node already exists with these operands, the slot will be non-null.
769 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
770 const SDValue *Ops,unsigned NumOps,
776 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
777 AddNodeIDCustom(ID, N);
778 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
783 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
784 static void VerifyNodeCommon(SDNode *N) {
785 switch (N->getOpcode()) {
788 case ISD::BUILD_PAIR: {
789 EVT VT = N->getValueType(0);
790 assert(N->getNumValues() == 1 && "Too many results!");
791 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
792 "Wrong return type!");
793 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
794 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
795 "Mismatched operand types!");
796 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
797 "Wrong operand type!");
798 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
799 "Wrong return type size");
802 case ISD::BUILD_VECTOR: {
803 assert(N->getNumValues() == 1 && "Too many results!");
804 assert(N->getValueType(0).isVector() && "Wrong return type!");
805 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
806 "Wrong number of operands!");
807 EVT EltVT = N->getValueType(0).getVectorElementType();
808 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
809 assert((I->getValueType() == EltVT ||
810 (EltVT.isInteger() && I->getValueType().isInteger() &&
811 EltVT.bitsLE(I->getValueType()))) &&
812 "Wrong operand type!");
813 assert(I->getValueType() == N->getOperand(0).getValueType() &&
814 "Operands must all have the same type");
821 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
822 static void VerifySDNode(SDNode *N) {
823 // The SDNode allocators cannot be used to allocate nodes with fields that are
824 // not present in an SDNode!
825 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
826 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
827 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
828 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
829 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
830 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
831 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
832 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
833 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
834 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
835 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
836 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
837 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
838 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
839 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
840 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
841 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
842 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
843 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
848 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
850 static void VerifyMachineNode(SDNode *N) {
851 // The MachineNode allocators cannot be used to allocate nodes with fields
852 // that are not present in a MachineNode!
853 // Currently there are no such nodes.
859 /// getEVTAlignment - Compute the default alignment value for the
862 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
863 Type *Ty = VT == MVT::iPTR ?
864 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
865 VT.getTypeForEVT(*getContext());
867 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
870 // EntryNode could meaningfully have debug info if we can find it...
871 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
872 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TTI(0), OptLevel(OL),
873 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
874 Root(getEntryNode()), UpdateListeners(0) {
875 AllNodes.push_back(&EntryNode);
876 DbgInfo = new SDDbgInfo();
879 void SelectionDAG::init(MachineFunction &mf, const TargetTransformInfo *tti) {
882 Context = &mf.getFunction()->getContext();
885 SelectionDAG::~SelectionDAG() {
886 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
891 void SelectionDAG::allnodes_clear() {
892 assert(&*AllNodes.begin() == &EntryNode);
893 AllNodes.remove(AllNodes.begin());
894 while (!AllNodes.empty())
895 DeallocateNode(AllNodes.begin());
898 void SelectionDAG::clear() {
900 OperandAllocator.Reset();
903 ExtendedValueTypeNodes.clear();
904 ExternalSymbols.clear();
905 TargetExternalSymbols.clear();
906 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
907 static_cast<CondCodeSDNode*>(0));
908 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
909 static_cast<SDNode*>(0));
911 EntryNode.UseList = 0;
912 AllNodes.push_back(&EntryNode);
913 Root = getEntryNode();
917 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
918 return VT.bitsGT(Op.getValueType()) ?
919 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
920 getNode(ISD::TRUNCATE, DL, VT, Op);
923 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
924 return VT.bitsGT(Op.getValueType()) ?
925 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
926 getNode(ISD::TRUNCATE, DL, VT, Op);
929 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
930 return VT.bitsGT(Op.getValueType()) ?
931 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
932 getNode(ISD::TRUNCATE, DL, VT, Op);
935 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
936 assert(!VT.isVector() &&
937 "getZeroExtendInReg should use the vector element type instead of "
939 if (Op.getValueType() == VT) return Op;
940 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
941 APInt Imm = APInt::getLowBitsSet(BitWidth,
943 return getNode(ISD::AND, DL, Op.getValueType(), Op,
944 getConstant(Imm, Op.getValueType()));
947 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
949 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
950 EVT EltVT = VT.getScalarType();
952 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
953 return getNode(ISD::XOR, DL, VT, Val, NegOne);
956 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
957 EVT EltVT = VT.getScalarType();
958 assert((EltVT.getSizeInBits() >= 64 ||
959 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
960 "getConstant with a uint64_t value that doesn't fit in the type!");
961 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
964 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
965 return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
968 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
969 assert(VT.isInteger() && "Cannot create FP integer constant!");
971 EVT EltVT = VT.getScalarType();
972 const ConstantInt *Elt = &Val;
974 const TargetLowering *TLI = TM.getTargetLowering();
976 // In some cases the vector type is legal but the element type is illegal and
977 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
978 // inserted value (the type does not need to match the vector element type).
979 // Any extra bits introduced will be truncated away.
980 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
981 TargetLowering::TypePromoteInteger) {
982 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
983 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
984 Elt = ConstantInt::get(*getContext(), NewVal);
987 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
988 "APInt size does not match type size!");
989 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
991 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
995 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
997 return SDValue(N, 0);
1000 N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT);
1001 CSEMap.InsertNode(N, IP);
1002 AllNodes.push_back(N);
1005 SDValue Result(N, 0);
1006 if (VT.isVector()) {
1007 SmallVector<SDValue, 8> Ops;
1008 Ops.assign(VT.getVectorNumElements(), Result);
1009 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1014 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1015 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
1019 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1020 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1023 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1024 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1026 EVT EltVT = VT.getScalarType();
1028 // Do the map lookup using the actual bit pattern for the floating point
1029 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1030 // we don't have issues with SNANs.
1031 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1032 FoldingSetNodeID ID;
1033 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1037 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1039 return SDValue(N, 0);
1042 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1043 CSEMap.InsertNode(N, IP);
1044 AllNodes.push_back(N);
1047 SDValue Result(N, 0);
1048 if (VT.isVector()) {
1049 SmallVector<SDValue, 8> Ops;
1050 Ops.assign(VT.getVectorNumElements(), Result);
1051 // FIXME SDLoc info might be appropriate here
1052 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1057 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1058 EVT EltVT = VT.getScalarType();
1059 if (EltVT==MVT::f32)
1060 return getConstantFP(APFloat((float)Val), VT, isTarget);
1061 else if (EltVT==MVT::f64)
1062 return getConstantFP(APFloat(Val), VT, isTarget);
1063 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1066 APFloat apf = APFloat(Val);
1067 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1069 return getConstantFP(apf, VT, isTarget);
1071 llvm_unreachable("Unsupported type in getConstantFP");
1074 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1075 EVT VT, int64_t Offset,
1077 unsigned char TargetFlags) {
1078 assert((TargetFlags == 0 || isTargetGA) &&
1079 "Cannot set target flags on target-independent globals");
1081 // Truncate (with sign-extension) the offset value to the pointer size.
1082 unsigned BitWidth = TM.getTargetLowering()->getPointerTy().getSizeInBits();
1084 Offset = SignExtend64(Offset, BitWidth);
1086 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1088 // If GV is an alias then use the aliasee for determining thread-localness.
1089 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1090 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
1094 if (GVar && GVar->isThreadLocal())
1095 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1097 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1099 FoldingSetNodeID ID;
1100 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1102 ID.AddInteger(Offset);
1103 ID.AddInteger(TargetFlags);
1104 ID.AddInteger(GV->getType()->getAddressSpace());
1106 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1107 return SDValue(E, 0);
1109 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1110 DL.getDebugLoc(), GV, VT,
1111 Offset, TargetFlags);
1112 CSEMap.InsertNode(N, IP);
1113 AllNodes.push_back(N);
1114 return SDValue(N, 0);
1117 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1118 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1119 FoldingSetNodeID ID;
1120 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1123 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1124 return SDValue(E, 0);
1126 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1127 CSEMap.InsertNode(N, IP);
1128 AllNodes.push_back(N);
1129 return SDValue(N, 0);
1132 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1133 unsigned char TargetFlags) {
1134 assert((TargetFlags == 0 || isTarget) &&
1135 "Cannot set target flags on target-independent jump tables");
1136 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1137 FoldingSetNodeID ID;
1138 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1140 ID.AddInteger(TargetFlags);
1142 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1143 return SDValue(E, 0);
1145 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1147 CSEMap.InsertNode(N, IP);
1148 AllNodes.push_back(N);
1149 return SDValue(N, 0);
1152 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1153 unsigned Alignment, int Offset,
1155 unsigned char TargetFlags) {
1156 assert((TargetFlags == 0 || isTarget) &&
1157 "Cannot set target flags on target-independent globals");
1160 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1161 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1162 FoldingSetNodeID ID;
1163 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1164 ID.AddInteger(Alignment);
1165 ID.AddInteger(Offset);
1167 ID.AddInteger(TargetFlags);
1169 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1170 return SDValue(E, 0);
1172 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1173 Alignment, TargetFlags);
1174 CSEMap.InsertNode(N, IP);
1175 AllNodes.push_back(N);
1176 return SDValue(N, 0);
1180 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1181 unsigned Alignment, int Offset,
1183 unsigned char TargetFlags) {
1184 assert((TargetFlags == 0 || isTarget) &&
1185 "Cannot set target flags on target-independent globals");
1188 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1189 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1190 FoldingSetNodeID ID;
1191 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1192 ID.AddInteger(Alignment);
1193 ID.AddInteger(Offset);
1194 C->addSelectionDAGCSEId(ID);
1195 ID.AddInteger(TargetFlags);
1197 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1198 return SDValue(E, 0);
1200 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1201 Alignment, TargetFlags);
1202 CSEMap.InsertNode(N, IP);
1203 AllNodes.push_back(N);
1204 return SDValue(N, 0);
1207 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1208 unsigned char TargetFlags) {
1209 FoldingSetNodeID ID;
1210 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
1211 ID.AddInteger(Index);
1212 ID.AddInteger(Offset);
1213 ID.AddInteger(TargetFlags);
1215 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1216 return SDValue(E, 0);
1218 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1220 CSEMap.InsertNode(N, IP);
1221 AllNodes.push_back(N);
1222 return SDValue(N, 0);
1225 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1226 FoldingSetNodeID ID;
1227 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1230 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1231 return SDValue(E, 0);
1233 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1234 CSEMap.InsertNode(N, IP);
1235 AllNodes.push_back(N);
1236 return SDValue(N, 0);
1239 SDValue SelectionDAG::getValueType(EVT VT) {
1240 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1241 ValueTypeNodes.size())
1242 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1244 SDNode *&N = VT.isExtended() ?
1245 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1247 if (N) return SDValue(N, 0);
1248 N = new (NodeAllocator) VTSDNode(VT);
1249 AllNodes.push_back(N);
1250 return SDValue(N, 0);
1253 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1254 SDNode *&N = ExternalSymbols[Sym];
1255 if (N) return SDValue(N, 0);
1256 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1257 AllNodes.push_back(N);
1258 return SDValue(N, 0);
1261 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1262 unsigned char TargetFlags) {
1264 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1266 if (N) return SDValue(N, 0);
1267 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1268 AllNodes.push_back(N);
1269 return SDValue(N, 0);
1272 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1273 if ((unsigned)Cond >= CondCodeNodes.size())
1274 CondCodeNodes.resize(Cond+1);
1276 if (CondCodeNodes[Cond] == 0) {
1277 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1278 CondCodeNodes[Cond] = N;
1279 AllNodes.push_back(N);
1282 return SDValue(CondCodeNodes[Cond], 0);
1285 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1286 // the shuffle mask M that point at N1 to point at N2, and indices that point
1287 // N2 to point at N1.
1288 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1290 int NElts = M.size();
1291 for (int i = 0; i != NElts; ++i) {
1299 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1300 SDValue N2, const int *Mask) {
1301 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1302 "Invalid VECTOR_SHUFFLE");
1304 // Canonicalize shuffle undef, undef -> undef
1305 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1306 return getUNDEF(VT);
1308 // Validate that all indices in Mask are within the range of the elements
1309 // input to the shuffle.
1310 unsigned NElts = VT.getVectorNumElements();
1311 SmallVector<int, 8> MaskVec;
1312 for (unsigned i = 0; i != NElts; ++i) {
1313 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1314 MaskVec.push_back(Mask[i]);
1317 // Canonicalize shuffle v, v -> v, undef
1320 for (unsigned i = 0; i != NElts; ++i)
1321 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1324 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1325 if (N1.getOpcode() == ISD::UNDEF)
1326 commuteShuffle(N1, N2, MaskVec);
1328 // Canonicalize all index into lhs, -> shuffle lhs, undef
1329 // Canonicalize all index into rhs, -> shuffle rhs, undef
1330 bool AllLHS = true, AllRHS = true;
1331 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1332 for (unsigned i = 0; i != NElts; ++i) {
1333 if (MaskVec[i] >= (int)NElts) {
1338 } else if (MaskVec[i] >= 0) {
1342 if (AllLHS && AllRHS)
1343 return getUNDEF(VT);
1344 if (AllLHS && !N2Undef)
1348 commuteShuffle(N1, N2, MaskVec);
1351 // If Identity shuffle return that node.
1352 bool Identity = true;
1353 for (unsigned i = 0; i != NElts; ++i) {
1354 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1356 if (Identity && NElts)
1359 FoldingSetNodeID ID;
1360 SDValue Ops[2] = { N1, N2 };
1361 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1362 for (unsigned i = 0; i != NElts; ++i)
1363 ID.AddInteger(MaskVec[i]);
1366 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1367 return SDValue(E, 0);
1369 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1370 // SDNode doesn't have access to it. This memory will be "leaked" when
1371 // the node is deallocated, but recovered when the NodeAllocator is released.
1372 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1373 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1375 ShuffleVectorSDNode *N =
1376 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(), dl.getDebugLoc(), N1, N2, MaskAlloc);
1377 CSEMap.InsertNode(N, IP);
1378 AllNodes.push_back(N);
1379 return SDValue(N, 0);
1382 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1383 SDValue Val, SDValue DTy,
1384 SDValue STy, SDValue Rnd, SDValue Sat,
1385 ISD::CvtCode Code) {
1386 // If the src and dest types are the same and the conversion is between
1387 // integer types of the same sign or two floats, no conversion is necessary.
1389 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1392 FoldingSetNodeID ID;
1393 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1394 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1396 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1397 return SDValue(E, 0);
1399 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(), dl.getDebugLoc(), Ops, 5,
1401 CSEMap.InsertNode(N, IP);
1402 AllNodes.push_back(N);
1403 return SDValue(N, 0);
1406 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1407 FoldingSetNodeID ID;
1408 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1409 ID.AddInteger(RegNo);
1411 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1412 return SDValue(E, 0);
1414 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1415 CSEMap.InsertNode(N, IP);
1416 AllNodes.push_back(N);
1417 return SDValue(N, 0);
1420 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1421 FoldingSetNodeID ID;
1422 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
1423 ID.AddPointer(RegMask);
1425 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1426 return SDValue(E, 0);
1428 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1429 CSEMap.InsertNode(N, IP);
1430 AllNodes.push_back(N);
1431 return SDValue(N, 0);
1434 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1435 FoldingSetNodeID ID;
1436 SDValue Ops[] = { Root };
1437 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1438 ID.AddPointer(Label);
1440 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1441 return SDValue(E, 0);
1443 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(), dl.getDebugLoc(), Root, Label);
1444 CSEMap.InsertNode(N, IP);
1445 AllNodes.push_back(N);
1446 return SDValue(N, 0);
1450 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1453 unsigned char TargetFlags) {
1454 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1456 FoldingSetNodeID ID;
1457 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1459 ID.AddInteger(Offset);
1460 ID.AddInteger(TargetFlags);
1462 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1463 return SDValue(E, 0);
1465 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1467 CSEMap.InsertNode(N, IP);
1468 AllNodes.push_back(N);
1469 return SDValue(N, 0);
1472 SDValue SelectionDAG::getSrcValue(const Value *V) {
1473 assert((!V || V->getType()->isPointerTy()) &&
1474 "SrcValue is not a pointer?");
1476 FoldingSetNodeID ID;
1477 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1481 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1482 return SDValue(E, 0);
1484 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1485 CSEMap.InsertNode(N, IP);
1486 AllNodes.push_back(N);
1487 return SDValue(N, 0);
1490 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1491 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1492 FoldingSetNodeID ID;
1493 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
1497 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1498 return SDValue(E, 0);
1500 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1501 CSEMap.InsertNode(N, IP);
1502 AllNodes.push_back(N);
1503 return SDValue(N, 0);
1507 /// getShiftAmountOperand - Return the specified value casted to
1508 /// the target's desired shift amount type.
1509 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1510 EVT OpTy = Op.getValueType();
1511 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
1512 if (OpTy == ShTy || OpTy.isVector()) return Op;
1514 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1515 return getNode(Opcode, SDLoc(Op), ShTy, Op);
1518 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1519 /// specified value type.
1520 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1521 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1522 unsigned ByteSize = VT.getStoreSize();
1523 Type *Ty = VT.getTypeForEVT(*getContext());
1524 const TargetLowering *TLI = TM.getTargetLowering();
1525 unsigned StackAlign =
1526 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1528 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1529 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1532 /// CreateStackTemporary - Create a stack temporary suitable for holding
1533 /// either of the specified value types.
1534 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1535 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1536 VT2.getStoreSizeInBits())/8;
1537 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1538 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1539 const TargetLowering *TLI = TM.getTargetLowering();
1540 const DataLayout *TD = TLI->getDataLayout();
1541 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1542 TD->getPrefTypeAlignment(Ty2));
1544 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1545 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1546 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1549 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1550 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1551 // These setcc operations always fold.
1555 case ISD::SETFALSE2: return getConstant(0, VT);
1557 case ISD::SETTRUE2: {
1558 const TargetLowering *TLI = TM.getTargetLowering();
1559 TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
1561 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1574 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1578 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1579 const APInt &C2 = N2C->getAPIntValue();
1580 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1581 const APInt &C1 = N1C->getAPIntValue();
1584 default: llvm_unreachable("Unknown integer setcc!");
1585 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1586 case ISD::SETNE: return getConstant(C1 != C2, VT);
1587 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1588 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1589 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1590 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1591 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1592 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1593 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1594 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1598 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1599 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1600 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1603 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1604 return getUNDEF(VT);
1606 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1607 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1608 return getUNDEF(VT);
1610 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1611 R==APFloat::cmpLessThan, VT);
1612 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1613 return getUNDEF(VT);
1615 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1616 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1617 return getUNDEF(VT);
1619 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1620 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1621 return getUNDEF(VT);
1623 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1624 R==APFloat::cmpEqual, VT);
1625 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1626 return getUNDEF(VT);
1628 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1629 R==APFloat::cmpEqual, VT);
1630 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1631 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1632 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1633 R==APFloat::cmpEqual, VT);
1634 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1635 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1636 R==APFloat::cmpLessThan, VT);
1637 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1638 R==APFloat::cmpUnordered, VT);
1639 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1640 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1643 // Ensure that the constant occurs on the RHS.
1644 return getSetCC(dl, VT, N2, N1, ISD::getSetCCSwappedOperands(Cond));
1648 // Could not fold it.
1652 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1653 /// use this predicate to simplify operations downstream.
1654 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1655 // This predicate is not safe for vector operations.
1656 if (Op.getValueType().isVector())
1659 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1660 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1663 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1664 /// this predicate to simplify operations downstream. Mask is known to be zero
1665 /// for bits that V cannot have.
1666 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1667 unsigned Depth) const {
1668 APInt KnownZero, KnownOne;
1669 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1670 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1671 return (KnownZero & Mask) == Mask;
1674 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
1675 /// known to be either zero or one and return them in the KnownZero/KnownOne
1676 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1678 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1679 APInt &KnownOne, unsigned Depth) const {
1680 const TargetLowering *TLI = TM.getTargetLowering();
1681 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1683 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1685 return; // Limit search depth.
1687 APInt KnownZero2, KnownOne2;
1689 switch (Op.getOpcode()) {
1691 // We know all of the bits for a constant!
1692 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1693 KnownZero = ~KnownOne;
1696 // If either the LHS or the RHS are Zero, the result is zero.
1697 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1698 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1699 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1700 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1702 // Output known-1 bits are only known if set in both the LHS & RHS.
1703 KnownOne &= KnownOne2;
1704 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1705 KnownZero |= KnownZero2;
1708 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1709 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1710 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1711 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1713 // Output known-0 bits are only known if clear in both the LHS & RHS.
1714 KnownZero &= KnownZero2;
1715 // Output known-1 are known to be set if set in either the LHS | RHS.
1716 KnownOne |= KnownOne2;
1719 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1720 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1721 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1722 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1724 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1725 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1726 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1727 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1728 KnownZero = KnownZeroOut;
1732 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1733 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1734 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1735 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1737 // If low bits are zero in either operand, output low known-0 bits.
1738 // Also compute a conserative estimate for high known-0 bits.
1739 // More trickiness is possible, but this is sufficient for the
1740 // interesting case of alignment computation.
1741 KnownOne.clearAllBits();
1742 unsigned TrailZ = KnownZero.countTrailingOnes() +
1743 KnownZero2.countTrailingOnes();
1744 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1745 KnownZero2.countLeadingOnes(),
1746 BitWidth) - BitWidth;
1748 TrailZ = std::min(TrailZ, BitWidth);
1749 LeadZ = std::min(LeadZ, BitWidth);
1750 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1751 APInt::getHighBitsSet(BitWidth, LeadZ);
1755 // For the purposes of computing leading zeros we can conservatively
1756 // treat a udiv as a logical right shift by the power of 2 known to
1757 // be less than the denominator.
1758 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1759 unsigned LeadZ = KnownZero2.countLeadingOnes();
1761 KnownOne2.clearAllBits();
1762 KnownZero2.clearAllBits();
1763 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1764 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1765 if (RHSUnknownLeadingOnes != BitWidth)
1766 LeadZ = std::min(BitWidth,
1767 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1769 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1773 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1774 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1775 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1776 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1778 // Only known if known in both the LHS and RHS.
1779 KnownOne &= KnownOne2;
1780 KnownZero &= KnownZero2;
1782 case ISD::SELECT_CC:
1783 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1784 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1785 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1786 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1788 // Only known if known in both the LHS and RHS.
1789 KnownOne &= KnownOne2;
1790 KnownZero &= KnownZero2;
1798 if (Op.getResNo() != 1)
1800 // The boolean result conforms to getBooleanContents. Fall through.
1802 // If we know the result of a setcc has the top bits zero, use this info.
1803 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
1804 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1805 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1808 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1809 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1810 unsigned ShAmt = SA->getZExtValue();
1812 // If the shift count is an invalid immediate, don't do anything.
1813 if (ShAmt >= BitWidth)
1816 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1817 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1818 KnownZero <<= ShAmt;
1820 // low bits known zero.
1821 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1825 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1826 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1827 unsigned ShAmt = SA->getZExtValue();
1829 // If the shift count is an invalid immediate, don't do anything.
1830 if (ShAmt >= BitWidth)
1833 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1834 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1835 KnownZero = KnownZero.lshr(ShAmt);
1836 KnownOne = KnownOne.lshr(ShAmt);
1838 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1839 KnownZero |= HighBits; // High bits known zero.
1843 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1844 unsigned ShAmt = SA->getZExtValue();
1846 // If the shift count is an invalid immediate, don't do anything.
1847 if (ShAmt >= BitWidth)
1850 // If any of the demanded bits are produced by the sign extension, we also
1851 // demand the input sign bit.
1852 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1854 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1855 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1856 KnownZero = KnownZero.lshr(ShAmt);
1857 KnownOne = KnownOne.lshr(ShAmt);
1859 // Handle the sign bits.
1860 APInt SignBit = APInt::getSignBit(BitWidth);
1861 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1863 if (KnownZero.intersects(SignBit)) {
1864 KnownZero |= HighBits; // New bits are known zero.
1865 } else if (KnownOne.intersects(SignBit)) {
1866 KnownOne |= HighBits; // New bits are known one.
1870 case ISD::SIGN_EXTEND_INREG: {
1871 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1872 unsigned EBits = EVT.getScalarType().getSizeInBits();
1874 // Sign extension. Compute the demanded bits in the result that are not
1875 // present in the input.
1876 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1878 APInt InSignBit = APInt::getSignBit(EBits);
1879 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
1881 // If the sign extended bits are demanded, we know that the sign
1883 InSignBit = InSignBit.zext(BitWidth);
1884 if (NewBits.getBoolValue())
1885 InputDemandedBits |= InSignBit;
1887 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1888 KnownOne &= InputDemandedBits;
1889 KnownZero &= InputDemandedBits;
1890 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1892 // If the sign bit of the input is known set or clear, then we know the
1893 // top bits of the result.
1894 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
1895 KnownZero |= NewBits;
1896 KnownOne &= ~NewBits;
1897 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
1898 KnownOne |= NewBits;
1899 KnownZero &= ~NewBits;
1900 } else { // Input sign bit unknown
1901 KnownZero &= ~NewBits;
1902 KnownOne &= ~NewBits;
1907 case ISD::CTTZ_ZERO_UNDEF:
1909 case ISD::CTLZ_ZERO_UNDEF:
1911 unsigned LowBits = Log2_32(BitWidth)+1;
1912 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1913 KnownOne.clearAllBits();
1917 LoadSDNode *LD = cast<LoadSDNode>(Op);
1918 // If this is a ZEXTLoad and we are looking at the loaded value.
1919 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
1920 EVT VT = LD->getMemoryVT();
1921 unsigned MemBits = VT.getScalarType().getSizeInBits();
1922 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1923 } else if (const MDNode *Ranges = LD->getRanges()) {
1924 computeMaskedBitsLoad(*Ranges, KnownZero);
1928 case ISD::ZERO_EXTEND: {
1929 EVT InVT = Op.getOperand(0).getValueType();
1930 unsigned InBits = InVT.getScalarType().getSizeInBits();
1931 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1932 KnownZero = KnownZero.trunc(InBits);
1933 KnownOne = KnownOne.trunc(InBits);
1934 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1935 KnownZero = KnownZero.zext(BitWidth);
1936 KnownOne = KnownOne.zext(BitWidth);
1937 KnownZero |= NewBits;
1940 case ISD::SIGN_EXTEND: {
1941 EVT InVT = Op.getOperand(0).getValueType();
1942 unsigned InBits = InVT.getScalarType().getSizeInBits();
1943 APInt InSignBit = APInt::getSignBit(InBits);
1944 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1946 KnownZero = KnownZero.trunc(InBits);
1947 KnownOne = KnownOne.trunc(InBits);
1948 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1950 // Note if the sign bit is known to be zero or one.
1951 bool SignBitKnownZero = KnownZero.isNegative();
1952 bool SignBitKnownOne = KnownOne.isNegative();
1953 assert(!(SignBitKnownZero && SignBitKnownOne) &&
1954 "Sign bit can't be known to be both zero and one!");
1956 KnownZero = KnownZero.zext(BitWidth);
1957 KnownOne = KnownOne.zext(BitWidth);
1959 // If the sign bit is known zero or one, the top bits match.
1960 if (SignBitKnownZero)
1961 KnownZero |= NewBits;
1962 else if (SignBitKnownOne)
1963 KnownOne |= NewBits;
1966 case ISD::ANY_EXTEND: {
1967 EVT InVT = Op.getOperand(0).getValueType();
1968 unsigned InBits = InVT.getScalarType().getSizeInBits();
1969 KnownZero = KnownZero.trunc(InBits);
1970 KnownOne = KnownOne.trunc(InBits);
1971 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1972 KnownZero = KnownZero.zext(BitWidth);
1973 KnownOne = KnownOne.zext(BitWidth);
1976 case ISD::TRUNCATE: {
1977 EVT InVT = Op.getOperand(0).getValueType();
1978 unsigned InBits = InVT.getScalarType().getSizeInBits();
1979 KnownZero = KnownZero.zext(InBits);
1980 KnownOne = KnownOne.zext(InBits);
1981 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1982 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1983 KnownZero = KnownZero.trunc(BitWidth);
1984 KnownOne = KnownOne.trunc(BitWidth);
1987 case ISD::AssertZext: {
1988 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1989 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
1990 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1991 KnownZero |= (~InMask);
1992 KnownOne &= (~KnownZero);
1996 // All bits are zero except the low bit.
1997 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2001 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2002 // We know that the top bits of C-X are clear if X contains less bits
2003 // than C (i.e. no wrap-around can happen). For example, 20-X is
2004 // positive if we can prove that X is >= 0 and < 16.
2005 if (CLHS->getAPIntValue().isNonNegative()) {
2006 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2007 // NLZ can't be BitWidth with no sign bit
2008 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2009 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2011 // If all of the MaskV bits are known to be zero, then we know the
2012 // output top bits are zero, because we now know that the output is
2014 if ((KnownZero2 & MaskV) == MaskV) {
2015 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2016 // Top bits known zero.
2017 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2025 // Output known-0 bits are known if clear or set in both the low clear bits
2026 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2027 // low 3 bits clear.
2028 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2029 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2030 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2032 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2033 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2034 KnownZeroOut = std::min(KnownZeroOut,
2035 KnownZero2.countTrailingOnes());
2037 if (Op.getOpcode() == ISD::ADD) {
2038 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2042 // With ADDE, a carry bit may be added in, so we can only use this
2043 // information if we know (at least) that the low two bits are clear. We
2044 // then return to the caller that the low bit is unknown but that other bits
2046 if (KnownZeroOut >= 2) // ADDE
2047 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2051 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2052 const APInt &RA = Rem->getAPIntValue().abs();
2053 if (RA.isPowerOf2()) {
2054 APInt LowBits = RA - 1;
2055 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
2056 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2058 // The low bits of the first operand are unchanged by the srem.
2059 KnownZero = KnownZero2 & LowBits;
2060 KnownOne = KnownOne2 & LowBits;
2062 // If the first operand is non-negative or has all low bits zero, then
2063 // the upper bits are all zero.
2064 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2065 KnownZero |= ~LowBits;
2067 // If the first operand is negative and not all low bits are zero, then
2068 // the upper bits are all one.
2069 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2070 KnownOne |= ~LowBits;
2071 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2076 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2077 const APInt &RA = Rem->getAPIntValue();
2078 if (RA.isPowerOf2()) {
2079 APInt LowBits = (RA - 1);
2080 KnownZero |= ~LowBits;
2081 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2082 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2087 // Since the result is less than or equal to either operand, any leading
2088 // zero bits in either operand must also exist in the result.
2089 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2090 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2092 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2093 KnownZero2.countLeadingOnes());
2094 KnownOne.clearAllBits();
2095 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2098 case ISD::FrameIndex:
2099 case ISD::TargetFrameIndex:
2100 if (unsigned Align = InferPtrAlignment(Op)) {
2101 // The low bits are known zero if the pointer is aligned.
2102 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2108 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2111 case ISD::INTRINSIC_WO_CHAIN:
2112 case ISD::INTRINSIC_W_CHAIN:
2113 case ISD::INTRINSIC_VOID:
2114 // Allow the target to implement this method for its nodes.
2115 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2120 /// ComputeNumSignBits - Return the number of times the sign bit of the
2121 /// register is replicated into the other bits. We know that at least 1 bit
2122 /// is always equal to the sign bit (itself), but other cases can give us
2123 /// information. For example, immediately after an "SRA X, 2", we know that
2124 /// the top 3 bits are all equal to each other, so we return 3.
2125 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2126 const TargetLowering *TLI = TM.getTargetLowering();
2127 EVT VT = Op.getValueType();
2128 assert(VT.isInteger() && "Invalid VT!");
2129 unsigned VTBits = VT.getScalarType().getSizeInBits();
2131 unsigned FirstAnswer = 1;
2134 return 1; // Limit search depth.
2136 switch (Op.getOpcode()) {
2138 case ISD::AssertSext:
2139 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2140 return VTBits-Tmp+1;
2141 case ISD::AssertZext:
2142 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2145 case ISD::Constant: {
2146 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2147 return Val.getNumSignBits();
2150 case ISD::SIGN_EXTEND:
2151 Tmp = VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2152 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2154 case ISD::SIGN_EXTEND_INREG:
2155 // Max of the input and what this extends.
2157 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2160 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2161 return std::max(Tmp, Tmp2);
2164 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2165 // SRA X, C -> adds C sign bits.
2166 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2167 Tmp += C->getZExtValue();
2168 if (Tmp > VTBits) Tmp = VTBits;
2172 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2173 // shl destroys sign bits.
2174 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2175 if (C->getZExtValue() >= VTBits || // Bad shift.
2176 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2177 return Tmp - C->getZExtValue();
2182 case ISD::XOR: // NOT is handled here.
2183 // Logical binary ops preserve the number of sign bits at the worst.
2184 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2186 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2187 FirstAnswer = std::min(Tmp, Tmp2);
2188 // We computed what we know about the sign bits as our first
2189 // answer. Now proceed to the generic code that uses
2190 // ComputeMaskedBits, and pick whichever answer is better.
2195 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2196 if (Tmp == 1) return 1; // Early out.
2197 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2198 return std::min(Tmp, Tmp2);
2206 if (Op.getResNo() != 1)
2208 // The boolean result conforms to getBooleanContents. Fall through.
2210 // If setcc returns 0/-1, all bits are sign bits.
2211 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
2212 TargetLowering::ZeroOrNegativeOneBooleanContent)
2217 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2218 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2220 // Handle rotate right by N like a rotate left by 32-N.
2221 if (Op.getOpcode() == ISD::ROTR)
2222 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2224 // If we aren't rotating out all of the known-in sign bits, return the
2225 // number that are left. This handles rotl(sext(x), 1) for example.
2226 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2227 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2231 // Add can have at most one carry bit. Thus we know that the output
2232 // is, at worst, one more bit than the inputs.
2233 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2234 if (Tmp == 1) return 1; // Early out.
2236 // Special case decrementing a value (ADD X, -1):
2237 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2238 if (CRHS->isAllOnesValue()) {
2239 APInt KnownZero, KnownOne;
2240 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2242 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2244 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2247 // If we are subtracting one from a positive number, there is no carry
2248 // out of the result.
2249 if (KnownZero.isNegative())
2253 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2254 if (Tmp2 == 1) return 1;
2255 return std::min(Tmp, Tmp2)-1;
2258 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2259 if (Tmp2 == 1) return 1;
2262 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2263 if (CLHS->isNullValue()) {
2264 APInt KnownZero, KnownOne;
2265 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2266 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2268 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2271 // If the input is known to be positive (the sign bit is known clear),
2272 // the output of the NEG has the same number of sign bits as the input.
2273 if (KnownZero.isNegative())
2276 // Otherwise, we treat this like a SUB.
2279 // Sub can have at most one carry bit. Thus we know that the output
2280 // is, at worst, one more bit than the inputs.
2281 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2282 if (Tmp == 1) return 1; // Early out.
2283 return std::min(Tmp, Tmp2)-1;
2285 // FIXME: it's tricky to do anything useful for this, but it is an important
2286 // case for targets like X86.
2290 // If we are looking at the loaded value of the SDNode.
2291 if (Op.getResNo() == 0) {
2292 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2293 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2294 unsigned ExtType = LD->getExtensionType();
2297 case ISD::SEXTLOAD: // '17' bits known
2298 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2299 return VTBits-Tmp+1;
2300 case ISD::ZEXTLOAD: // '16' bits known
2301 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2307 // Allow the target to implement this method for its nodes.
2308 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2309 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2310 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2311 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2312 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth);
2313 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2316 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2317 // use this information.
2318 APInt KnownZero, KnownOne;
2319 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2322 if (KnownZero.isNegative()) { // sign bit is 0
2324 } else if (KnownOne.isNegative()) { // sign bit is 1;
2331 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2332 // the number of identical bits in the top of the input value.
2334 Mask <<= Mask.getBitWidth()-VTBits;
2335 // Return # leading zeros. We use 'min' here in case Val was zero before
2336 // shifting. We don't want to return '64' as for an i32 "0".
2337 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2340 /// isBaseWithConstantOffset - Return true if the specified operand is an
2341 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2342 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2343 /// semantics as an ADD. This handles the equivalence:
2344 /// X|Cst == X+Cst iff X&Cst = 0.
2345 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2346 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2347 !isa<ConstantSDNode>(Op.getOperand(1)))
2350 if (Op.getOpcode() == ISD::OR &&
2351 !MaskedValueIsZero(Op.getOperand(0),
2352 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2359 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2360 // If we're told that NaNs won't happen, assume they won't.
2361 if (getTarget().Options.NoNaNsFPMath)
2364 // If the value is a constant, we can obviously see if it is a NaN or not.
2365 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2366 return !C->getValueAPF().isNaN();
2368 // TODO: Recognize more cases here.
2373 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2374 // If the value is a constant, we can obviously see if it is a zero or not.
2375 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2376 return !C->isZero();
2378 // TODO: Recognize more cases here.
2379 switch (Op.getOpcode()) {
2382 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2383 return !C->isNullValue();
2390 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2391 // Check the obvious case.
2392 if (A == B) return true;
2394 // For for negative and positive zero.
2395 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2396 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2397 if (CA->isZero() && CB->isZero()) return true;
2399 // Otherwise they may not be equal.
2403 /// getNode - Gets or creates the specified node.
2405 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2406 FoldingSetNodeID ID;
2407 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2409 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2410 return SDValue(E, 0);
2412 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), getVTList(VT));
2413 CSEMap.InsertNode(N, IP);
2415 AllNodes.push_back(N);
2419 return SDValue(N, 0);
2422 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2423 EVT VT, SDValue Operand) {
2424 // Constant fold unary operations with an integer constant operand.
2425 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2426 const APInt &Val = C->getAPIntValue();
2429 case ISD::SIGN_EXTEND:
2430 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
2431 case ISD::ANY_EXTEND:
2432 case ISD::ZERO_EXTEND:
2434 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
2435 case ISD::UINT_TO_FP:
2436 case ISD::SINT_TO_FP: {
2437 APFloat apf(EVTToAPFloatSemantics(VT),
2438 APInt::getNullValue(VT.getSizeInBits()));
2439 (void)apf.convertFromAPInt(Val,
2440 Opcode==ISD::SINT_TO_FP,
2441 APFloat::rmNearestTiesToEven);
2442 return getConstantFP(apf, VT);
2445 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2446 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2447 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2448 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2451 return getConstant(Val.byteSwap(), VT);
2453 return getConstant(Val.countPopulation(), VT);
2455 case ISD::CTLZ_ZERO_UNDEF:
2456 return getConstant(Val.countLeadingZeros(), VT);
2458 case ISD::CTTZ_ZERO_UNDEF:
2459 return getConstant(Val.countTrailingZeros(), VT);
2463 // Constant fold unary operations with a floating point constant operand.
2464 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2465 APFloat V = C->getValueAPF(); // make copy
2469 return getConstantFP(V, VT);
2472 return getConstantFP(V, VT);
2474 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2475 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2476 return getConstantFP(V, VT);
2480 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2481 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2482 return getConstantFP(V, VT);
2486 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2487 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2488 return getConstantFP(V, VT);
2491 case ISD::FP_EXTEND: {
2493 // This can return overflow, underflow, or inexact; we don't care.
2494 // FIXME need to be more flexible about rounding mode.
2495 (void)V.convert(EVTToAPFloatSemantics(VT),
2496 APFloat::rmNearestTiesToEven, &ignored);
2497 return getConstantFP(V, VT);
2499 case ISD::FP_TO_SINT:
2500 case ISD::FP_TO_UINT: {
2503 assert(integerPartWidth >= 64);
2504 // FIXME need to be more flexible about rounding mode.
2505 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2506 Opcode==ISD::FP_TO_SINT,
2507 APFloat::rmTowardZero, &ignored);
2508 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2510 APInt api(VT.getSizeInBits(), x);
2511 return getConstant(api, VT);
2514 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2515 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2516 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2517 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2522 unsigned OpOpcode = Operand.getNode()->getOpcode();
2524 case ISD::TokenFactor:
2525 case ISD::MERGE_VALUES:
2526 case ISD::CONCAT_VECTORS:
2527 return Operand; // Factor, merge or concat of one node? No need.
2528 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2529 case ISD::FP_EXTEND:
2530 assert(VT.isFloatingPoint() &&
2531 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2532 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2533 assert((!VT.isVector() ||
2534 VT.getVectorNumElements() ==
2535 Operand.getValueType().getVectorNumElements()) &&
2536 "Vector element count mismatch!");
2537 if (Operand.getOpcode() == ISD::UNDEF)
2538 return getUNDEF(VT);
2540 case ISD::SIGN_EXTEND:
2541 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2542 "Invalid SIGN_EXTEND!");
2543 if (Operand.getValueType() == VT) return Operand; // noop extension
2544 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2545 "Invalid sext node, dst < src!");
2546 assert((!VT.isVector() ||
2547 VT.getVectorNumElements() ==
2548 Operand.getValueType().getVectorNumElements()) &&
2549 "Vector element count mismatch!");
2550 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2551 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2552 else if (OpOpcode == ISD::UNDEF)
2553 // sext(undef) = 0, because the top bits will all be the same.
2554 return getConstant(0, VT);
2556 case ISD::ZERO_EXTEND:
2557 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2558 "Invalid ZERO_EXTEND!");
2559 if (Operand.getValueType() == VT) return Operand; // noop extension
2560 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2561 "Invalid zext node, dst < src!");
2562 assert((!VT.isVector() ||
2563 VT.getVectorNumElements() ==
2564 Operand.getValueType().getVectorNumElements()) &&
2565 "Vector element count mismatch!");
2566 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2567 return getNode(ISD::ZERO_EXTEND, DL, VT,
2568 Operand.getNode()->getOperand(0));
2569 else if (OpOpcode == ISD::UNDEF)
2570 // zext(undef) = 0, because the top bits will be zero.
2571 return getConstant(0, VT);
2573 case ISD::ANY_EXTEND:
2574 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2575 "Invalid ANY_EXTEND!");
2576 if (Operand.getValueType() == VT) return Operand; // noop extension
2577 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2578 "Invalid anyext node, dst < src!");
2579 assert((!VT.isVector() ||
2580 VT.getVectorNumElements() ==
2581 Operand.getValueType().getVectorNumElements()) &&
2582 "Vector element count mismatch!");
2584 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2585 OpOpcode == ISD::ANY_EXTEND)
2586 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2587 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2588 else if (OpOpcode == ISD::UNDEF)
2589 return getUNDEF(VT);
2591 // (ext (trunx x)) -> x
2592 if (OpOpcode == ISD::TRUNCATE) {
2593 SDValue OpOp = Operand.getNode()->getOperand(0);
2594 if (OpOp.getValueType() == VT)
2599 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2600 "Invalid TRUNCATE!");
2601 if (Operand.getValueType() == VT) return Operand; // noop truncate
2602 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2603 "Invalid truncate node, src < dst!");
2604 assert((!VT.isVector() ||
2605 VT.getVectorNumElements() ==
2606 Operand.getValueType().getVectorNumElements()) &&
2607 "Vector element count mismatch!");
2608 if (OpOpcode == ISD::TRUNCATE)
2609 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2610 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2611 OpOpcode == ISD::ANY_EXTEND) {
2612 // If the source is smaller than the dest, we still need an extend.
2613 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2614 .bitsLT(VT.getScalarType()))
2615 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2616 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2617 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2618 return Operand.getNode()->getOperand(0);
2620 if (OpOpcode == ISD::UNDEF)
2621 return getUNDEF(VT);
2624 // Basic sanity checking.
2625 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2626 && "Cannot BITCAST between types of different sizes!");
2627 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2628 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2629 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2630 if (OpOpcode == ISD::UNDEF)
2631 return getUNDEF(VT);
2633 case ISD::SCALAR_TO_VECTOR:
2634 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2635 (VT.getVectorElementType() == Operand.getValueType() ||
2636 (VT.getVectorElementType().isInteger() &&
2637 Operand.getValueType().isInteger() &&
2638 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2639 "Illegal SCALAR_TO_VECTOR node!");
2640 if (OpOpcode == ISD::UNDEF)
2641 return getUNDEF(VT);
2642 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2643 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2644 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2645 Operand.getConstantOperandVal(1) == 0 &&
2646 Operand.getOperand(0).getValueType() == VT)
2647 return Operand.getOperand(0);
2650 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2651 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2652 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2653 Operand.getNode()->getOperand(0));
2654 if (OpOpcode == ISD::FNEG) // --X -> X
2655 return Operand.getNode()->getOperand(0);
2658 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2659 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2664 SDVTList VTs = getVTList(VT);
2665 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2666 FoldingSetNodeID ID;
2667 SDValue Ops[1] = { Operand };
2668 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2670 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2671 return SDValue(E, 0);
2673 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Operand);
2674 CSEMap.InsertNode(N, IP);
2676 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Operand);
2679 AllNodes.push_back(N);
2683 return SDValue(N, 0);
2686 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2687 SDNode *Cst1, SDNode *Cst2) {
2688 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2689 SmallVector<SDValue, 4> Outputs;
2690 EVT SVT = VT.getScalarType();
2692 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2693 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2694 if (Scalar1 && Scalar2) {
2695 // Scalar instruction.
2696 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2698 // For vectors extract each constant element into Inputs so we can constant
2699 // fold them individually.
2700 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2701 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2705 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2707 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2708 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2709 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2710 if (!V1 || !V2) // Not a constant, bail.
2713 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2714 // FIXME: This is valid and could be handled by truncating the APInts.
2715 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2718 Inputs.push_back(std::make_pair(V1, V2));
2722 // We have a number of constant values, constant fold them element by element.
2723 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2724 const APInt &C1 = Inputs[I].first->getAPIntValue();
2725 const APInt &C2 = Inputs[I].second->getAPIntValue();
2729 Outputs.push_back(getConstant(C1 + C2, SVT));
2732 Outputs.push_back(getConstant(C1 - C2, SVT));
2735 Outputs.push_back(getConstant(C1 * C2, SVT));
2738 if (!C2.getBoolValue())
2740 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2743 if (!C2.getBoolValue())
2745 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2748 if (!C2.getBoolValue())
2750 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2753 if (!C2.getBoolValue())
2755 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2758 Outputs.push_back(getConstant(C1 & C2, SVT));
2761 Outputs.push_back(getConstant(C1 | C2, SVT));
2764 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2767 Outputs.push_back(getConstant(C1 << C2, SVT));
2770 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2773 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2776 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2779 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2786 // Handle the scalar case first.
2787 if (Scalar1 && Scalar2)
2788 return Outputs.back();
2790 // Otherwise build a big vector out of the scalar elements we generated.
2791 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(),
2795 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
2797 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2798 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2801 case ISD::TokenFactor:
2802 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2803 N2.getValueType() == MVT::Other && "Invalid token factor!");
2804 // Fold trivial token factors.
2805 if (N1.getOpcode() == ISD::EntryToken) return N2;
2806 if (N2.getOpcode() == ISD::EntryToken) return N1;
2807 if (N1 == N2) return N1;
2809 case ISD::CONCAT_VECTORS:
2810 // Concat of UNDEFs is UNDEF.
2811 if (N1.getOpcode() == ISD::UNDEF &&
2812 N2.getOpcode() == ISD::UNDEF)
2813 return getUNDEF(VT);
2815 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2816 // one big BUILD_VECTOR.
2817 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2818 N2.getOpcode() == ISD::BUILD_VECTOR) {
2819 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2820 N1.getNode()->op_end());
2821 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2822 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2826 assert(VT.isInteger() && "This operator does not apply to FP types!");
2827 assert(N1.getValueType() == N2.getValueType() &&
2828 N1.getValueType() == VT && "Binary operator types must match!");
2829 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2830 // worth handling here.
2831 if (N2C && N2C->isNullValue())
2833 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2840 assert(VT.isInteger() && "This operator does not apply to FP types!");
2841 assert(N1.getValueType() == N2.getValueType() &&
2842 N1.getValueType() == VT && "Binary operator types must match!");
2843 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2844 // it's worth handling here.
2845 if (N2C && N2C->isNullValue())
2855 assert(VT.isInteger() && "This operator does not apply to FP types!");
2856 assert(N1.getValueType() == N2.getValueType() &&
2857 N1.getValueType() == VT && "Binary operator types must match!");
2864 if (getTarget().Options.UnsafeFPMath) {
2865 if (Opcode == ISD::FADD) {
2867 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2868 if (CFP->getValueAPF().isZero())
2871 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2872 if (CFP->getValueAPF().isZero())
2874 } else if (Opcode == ISD::FSUB) {
2876 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2877 if (CFP->getValueAPF().isZero())
2879 } else if (Opcode == ISD::FMUL) {
2880 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
2883 // If the first operand isn't the constant, try the second
2885 CFP = dyn_cast<ConstantFPSDNode>(N2);
2892 return SDValue(CFP,0);
2894 if (CFP->isExactlyValue(1.0))
2899 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
2900 assert(N1.getValueType() == N2.getValueType() &&
2901 N1.getValueType() == VT && "Binary operator types must match!");
2903 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
2904 assert(N1.getValueType() == VT &&
2905 N1.getValueType().isFloatingPoint() &&
2906 N2.getValueType().isFloatingPoint() &&
2907 "Invalid FCOPYSIGN!");
2914 assert(VT == N1.getValueType() &&
2915 "Shift operators return type must be the same as their first arg");
2916 assert(VT.isInteger() && N2.getValueType().isInteger() &&
2917 "Shifts only work on integers");
2918 assert((!VT.isVector() || VT == N2.getValueType()) &&
2919 "Vector shift amounts must be in the same as their first arg");
2920 // Verify that the shift amount VT is bit enough to hold valid shift
2921 // amounts. This catches things like trying to shift an i1024 value by an
2922 // i8, which is easy to fall into in generic code that uses
2923 // TLI.getShiftAmount().
2924 assert(N2.getValueType().getSizeInBits() >=
2925 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
2926 "Invalid use of small shift amount with oversized value!");
2928 // Always fold shifts of i1 values so the code generator doesn't need to
2929 // handle them. Since we know the size of the shift has to be less than the
2930 // size of the value, the shift/rotate count is guaranteed to be zero.
2933 if (N2C && N2C->isNullValue())
2936 case ISD::FP_ROUND_INREG: {
2937 EVT EVT = cast<VTSDNode>(N2)->getVT();
2938 assert(VT == N1.getValueType() && "Not an inreg round!");
2939 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
2940 "Cannot FP_ROUND_INREG integer types");
2941 assert(EVT.isVector() == VT.isVector() &&
2942 "FP_ROUND_INREG type should be vector iff the operand "
2944 assert((!EVT.isVector() ||
2945 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2946 "Vector element counts must match in FP_ROUND_INREG");
2947 assert(EVT.bitsLE(VT) && "Not rounding down!");
2949 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
2953 assert(VT.isFloatingPoint() &&
2954 N1.getValueType().isFloatingPoint() &&
2955 VT.bitsLE(N1.getValueType()) &&
2956 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
2957 if (N1.getValueType() == VT) return N1; // noop conversion.
2959 case ISD::AssertSext:
2960 case ISD::AssertZext: {
2961 EVT EVT = cast<VTSDNode>(N2)->getVT();
2962 assert(VT == N1.getValueType() && "Not an inreg extend!");
2963 assert(VT.isInteger() && EVT.isInteger() &&
2964 "Cannot *_EXTEND_INREG FP types");
2965 assert(!EVT.isVector() &&
2966 "AssertSExt/AssertZExt type should be the vector element type "
2967 "rather than the vector type!");
2968 assert(EVT.bitsLE(VT) && "Not extending!");
2969 if (VT == EVT) return N1; // noop assertion.
2972 case ISD::SIGN_EXTEND_INREG: {
2973 EVT EVT = cast<VTSDNode>(N2)->getVT();
2974 assert(VT == N1.getValueType() && "Not an inreg extend!");
2975 assert(VT.isInteger() && EVT.isInteger() &&
2976 "Cannot *_EXTEND_INREG FP types");
2977 assert(EVT.isVector() == VT.isVector() &&
2978 "SIGN_EXTEND_INREG type should be vector iff the operand "
2980 assert((!EVT.isVector() ||
2981 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2982 "Vector element counts must match in SIGN_EXTEND_INREG");
2983 assert(EVT.bitsLE(VT) && "Not extending!");
2984 if (EVT == VT) return N1; // Not actually extending
2987 APInt Val = N1C->getAPIntValue();
2988 unsigned FromBits = EVT.getScalarType().getSizeInBits();
2989 Val <<= Val.getBitWidth()-FromBits;
2990 Val = Val.ashr(Val.getBitWidth()-FromBits);
2991 return getConstant(Val, VT);
2995 case ISD::EXTRACT_VECTOR_ELT:
2996 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
2997 if (N1.getOpcode() == ISD::UNDEF)
2998 return getUNDEF(VT);
3000 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3001 // expanding copies of large vectors from registers.
3003 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3004 N1.getNumOperands() > 0) {
3006 N1.getOperand(0).getValueType().getVectorNumElements();
3007 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3008 N1.getOperand(N2C->getZExtValue() / Factor),
3009 getConstant(N2C->getZExtValue() % Factor,
3010 N2.getValueType()));
3013 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3014 // expanding large vector constants.
3015 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3016 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3018 if (VT != Elt.getValueType())
3019 // If the vector element type is not legal, the BUILD_VECTOR operands
3020 // are promoted and implicitly truncated, and the result implicitly
3021 // extended. Make that explicit here.
3022 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3027 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3028 // operations are lowered to scalars.
3029 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3030 // If the indices are the same, return the inserted element else
3031 // if the indices are known different, extract the element from
3032 // the original vector.
3033 SDValue N1Op2 = N1.getOperand(2);
3034 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3036 if (N1Op2C && N2C) {
3037 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3038 if (VT == N1.getOperand(1).getValueType())
3039 return N1.getOperand(1);
3041 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3044 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3048 case ISD::EXTRACT_ELEMENT:
3049 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3050 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3051 (N1.getValueType().isInteger() == VT.isInteger()) &&
3052 N1.getValueType() != VT &&
3053 "Wrong types for EXTRACT_ELEMENT!");
3055 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3056 // 64-bit integers into 32-bit parts. Instead of building the extract of
3057 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3058 if (N1.getOpcode() == ISD::BUILD_PAIR)
3059 return N1.getOperand(N2C->getZExtValue());
3061 // EXTRACT_ELEMENT of a constant int is also very common.
3062 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3063 unsigned ElementSize = VT.getSizeInBits();
3064 unsigned Shift = ElementSize * N2C->getZExtValue();
3065 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3066 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3069 case ISD::EXTRACT_SUBVECTOR: {
3071 if (VT.isSimple() && N1.getValueType().isSimple()) {
3072 assert(VT.isVector() && N1.getValueType().isVector() &&
3073 "Extract subvector VTs must be a vectors!");
3074 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() &&
3075 "Extract subvector VTs must have the same element type!");
3076 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3077 "Extract subvector must be from larger vector to smaller vector!");
3079 if (isa<ConstantSDNode>(Index.getNode())) {
3080 assert((VT.getVectorNumElements() +
3081 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3082 <= N1.getValueType().getVectorNumElements())
3083 && "Extract subvector overflow!");
3086 // Trivial extraction.
3087 if (VT.getSimpleVT() == N1.getSimpleValueType())
3094 // Perform trivial constant folding.
3095 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3096 if (SV.getNode()) return SV;
3098 // Canonicalize constant to RHS if commutative.
3099 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3100 std::swap(N1C, N2C);
3104 // Constant fold FP operations.
3105 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3106 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3108 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3109 // Canonicalize constant to RHS if commutative.
3110 std::swap(N1CFP, N2CFP);
3113 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3114 APFloat::opStatus s;
3117 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3118 if (s != APFloat::opInvalidOp)
3119 return getConstantFP(V1, VT);
3122 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3123 if (s!=APFloat::opInvalidOp)
3124 return getConstantFP(V1, VT);
3127 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3128 if (s!=APFloat::opInvalidOp)
3129 return getConstantFP(V1, VT);
3132 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3133 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3134 return getConstantFP(V1, VT);
3137 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3138 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3139 return getConstantFP(V1, VT);
3141 case ISD::FCOPYSIGN:
3143 return getConstantFP(V1, VT);
3148 if (Opcode == ISD::FP_ROUND) {
3149 APFloat V = N1CFP->getValueAPF(); // make copy
3151 // This can return overflow, underflow, or inexact; we don't care.
3152 // FIXME need to be more flexible about rounding mode.
3153 (void)V.convert(EVTToAPFloatSemantics(VT),
3154 APFloat::rmNearestTiesToEven, &ignored);
3155 return getConstantFP(V, VT);
3159 // Canonicalize an UNDEF to the RHS, even over a constant.
3160 if (N1.getOpcode() == ISD::UNDEF) {
3161 if (isCommutativeBinOp(Opcode)) {
3165 case ISD::FP_ROUND_INREG:
3166 case ISD::SIGN_EXTEND_INREG:
3172 return N1; // fold op(undef, arg2) -> undef
3180 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3181 // For vectors, we can't easily build an all zero vector, just return
3188 // Fold a bunch of operators when the RHS is undef.
3189 if (N2.getOpcode() == ISD::UNDEF) {
3192 if (N1.getOpcode() == ISD::UNDEF)
3193 // Handle undef ^ undef -> 0 special case. This is a common
3195 return getConstant(0, VT);
3205 return N2; // fold op(arg1, undef) -> undef
3211 if (getTarget().Options.UnsafeFPMath)
3219 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3220 // For vectors, we can't easily build an all zero vector, just return
3225 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3226 // For vectors, we can't easily build an all one vector, just return
3234 // Memoize this node if possible.
3236 SDVTList VTs = getVTList(VT);
3237 if (VT != MVT::Glue) {
3238 SDValue Ops[] = { N1, N2 };
3239 FoldingSetNodeID ID;
3240 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3242 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3243 return SDValue(E, 0);
3245 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
3246 CSEMap.InsertNode(N, IP);
3248 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
3251 AllNodes.push_back(N);
3255 return SDValue(N, 0);
3258 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3259 SDValue N1, SDValue N2, SDValue N3) {
3260 // Perform various simplifications.
3261 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3264 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3265 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3266 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3267 if (N1CFP && N2CFP && N3CFP) {
3268 APFloat V1 = N1CFP->getValueAPF();
3269 const APFloat &V2 = N2CFP->getValueAPF();
3270 const APFloat &V3 = N3CFP->getValueAPF();
3271 APFloat::opStatus s =
3272 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3273 if (s != APFloat::opInvalidOp)
3274 return getConstantFP(V1, VT);
3278 case ISD::CONCAT_VECTORS:
3279 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3280 // one big BUILD_VECTOR.
3281 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3282 N2.getOpcode() == ISD::BUILD_VECTOR &&
3283 N3.getOpcode() == ISD::BUILD_VECTOR) {
3284 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3285 N1.getNode()->op_end());
3286 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3287 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3288 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
3292 // Use FoldSetCC to simplify SETCC's.
3293 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3294 if (Simp.getNode()) return Simp;
3299 if (N1C->getZExtValue())
3300 return N2; // select true, X, Y -> X
3301 return N3; // select false, X, Y -> Y
3304 if (N2 == N3) return N2; // select C, X, X -> X
3306 case ISD::VECTOR_SHUFFLE:
3307 llvm_unreachable("should use getVectorShuffle constructor!");
3308 case ISD::INSERT_SUBVECTOR: {
3310 if (VT.isSimple() && N1.getValueType().isSimple()
3311 && N2.getValueType().isSimple()) {
3312 assert(VT.isVector() && N1.getValueType().isVector() &&
3313 N2.getValueType().isVector() &&
3314 "Insert subvector VTs must be a vectors");
3315 assert(VT == N1.getValueType() &&
3316 "Dest and insert subvector source types must match!");
3317 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3318 "Insert subvector must be from smaller vector to larger vector!");
3319 if (isa<ConstantSDNode>(Index.getNode())) {
3320 assert((N2.getValueType().getVectorNumElements() +
3321 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3322 <= VT.getVectorNumElements())
3323 && "Insert subvector overflow!");
3326 // Trivial insertion.
3327 if (VT.getSimpleVT() == N2.getSimpleValueType())
3333 // Fold bit_convert nodes from a type to themselves.
3334 if (N1.getValueType() == VT)
3339 // Memoize node if it doesn't produce a flag.
3341 SDVTList VTs = getVTList(VT);
3342 if (VT != MVT::Glue) {
3343 SDValue Ops[] = { N1, N2, N3 };
3344 FoldingSetNodeID ID;
3345 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3347 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3348 return SDValue(E, 0);
3350 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, N3);
3351 CSEMap.InsertNode(N, IP);
3353 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, N3);
3356 AllNodes.push_back(N);
3360 return SDValue(N, 0);
3363 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3364 SDValue N1, SDValue N2, SDValue N3,
3366 SDValue Ops[] = { N1, N2, N3, N4 };
3367 return getNode(Opcode, DL, VT, Ops, 4);
3370 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3371 SDValue N1, SDValue N2, SDValue N3,
3372 SDValue N4, SDValue N5) {
3373 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3374 return getNode(Opcode, DL, VT, Ops, 5);
3377 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3378 /// the incoming stack arguments to be loaded from the stack.
3379 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3380 SmallVector<SDValue, 8> ArgChains;
3382 // Include the original chain at the beginning of the list. When this is
3383 // used by target LowerCall hooks, this helps legalize find the
3384 // CALLSEQ_BEGIN node.
3385 ArgChains.push_back(Chain);
3387 // Add a chain value for each stack argument.
3388 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3389 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3390 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3391 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3392 if (FI->getIndex() < 0)
3393 ArgChains.push_back(SDValue(L, 1));
3395 // Build a tokenfactor for all the chains.
3396 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
3397 &ArgChains[0], ArgChains.size());
3400 /// getMemsetValue - Vectorized representation of the memset value
3402 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3404 assert(Value.getOpcode() != ISD::UNDEF);
3406 unsigned NumBits = VT.getScalarType().getSizeInBits();
3407 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3408 assert(C->getAPIntValue().getBitWidth() == 8);
3409 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3411 return DAG.getConstant(Val, VT);
3412 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3415 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3417 // Use a multiplication with 0x010101... to extend the input to the
3419 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3420 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3426 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3427 /// used when a memcpy is turned into a memset when the source is a constant
3429 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3430 const TargetLowering &TLI, StringRef Str) {
3431 // Handle vector with all elements zero.
3434 return DAG.getConstant(0, VT);
3435 else if (VT == MVT::f32 || VT == MVT::f64)
3436 return DAG.getConstantFP(0.0, VT);
3437 else if (VT.isVector()) {
3438 unsigned NumElts = VT.getVectorNumElements();
3439 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3440 return DAG.getNode(ISD::BITCAST, dl, VT,
3441 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3444 llvm_unreachable("Expected type!");
3447 assert(!VT.isVector() && "Can't handle vector type here!");
3448 unsigned NumVTBits = VT.getSizeInBits();
3449 unsigned NumVTBytes = NumVTBits / 8;
3450 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3452 APInt Val(NumVTBits, 0);
3453 if (TLI.isLittleEndian()) {
3454 for (unsigned i = 0; i != NumBytes; ++i)
3455 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3457 for (unsigned i = 0; i != NumBytes; ++i)
3458 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3461 // If the "cost" of materializing the integer immediate is 1 or free, then
3462 // it is cost effective to turn the load into the immediate.
3463 const TargetTransformInfo *TTI = DAG.getTargetTransformInfo();
3464 if (TTI->getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2)
3465 return DAG.getConstant(Val, VT);
3466 return SDValue(0, 0);
3469 /// getMemBasePlusOffset - Returns base and offset node for the
3471 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3472 SelectionDAG &DAG) {
3473 EVT VT = Base.getValueType();
3474 return DAG.getNode(ISD::ADD, dl,
3475 VT, Base, DAG.getConstant(Offset, VT));
3478 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3480 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3481 unsigned SrcDelta = 0;
3482 GlobalAddressSDNode *G = NULL;
3483 if (Src.getOpcode() == ISD::GlobalAddress)
3484 G = cast<GlobalAddressSDNode>(Src);
3485 else if (Src.getOpcode() == ISD::ADD &&
3486 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3487 Src.getOperand(1).getOpcode() == ISD::Constant) {
3488 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3489 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3494 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3497 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3498 /// to replace the memset / memcpy. Return true if the number of memory ops
3499 /// is below the threshold. It returns the types of the sequence of
3500 /// memory ops to perform memset / memcpy by reference.
3501 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3502 unsigned Limit, uint64_t Size,
3503 unsigned DstAlign, unsigned SrcAlign,
3509 const TargetLowering &TLI) {
3510 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3511 "Expecting memcpy / memset source to meet alignment requirement!");
3512 // If 'SrcAlign' is zero, that means the memory operation does not need to
3513 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3514 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3515 // is the specified alignment of the memory operation. If it is zero, that
3516 // means it's possible to change the alignment of the destination.
3517 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3518 // not need to be loaded.
3519 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3520 IsMemset, ZeroMemset, MemcpyStrSrc,
3521 DAG.getMachineFunction());
3523 if (VT == MVT::Other) {
3524 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
3525 TLI.allowsUnalignedMemoryAccesses(VT)) {
3526 VT = TLI.getPointerTy();
3528 switch (DstAlign & 7) {
3529 case 0: VT = MVT::i64; break;
3530 case 4: VT = MVT::i32; break;
3531 case 2: VT = MVT::i16; break;
3532 default: VT = MVT::i8; break;
3537 while (!TLI.isTypeLegal(LVT))
3538 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3539 assert(LVT.isInteger());
3545 unsigned NumMemOps = 0;
3547 unsigned VTSize = VT.getSizeInBits() / 8;
3548 while (VTSize > Size) {
3549 // For now, only use non-vector load / store's for the left-over pieces.
3554 if (VT.isVector() || VT.isFloatingPoint()) {
3555 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3556 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3557 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3559 else if (NewVT == MVT::i64 &&
3560 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3561 TLI.isSafeMemOpType(MVT::f64)) {
3562 // i64 is usually not legal on 32-bit targets, but f64 may be.
3570 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3571 if (NewVT == MVT::i8)
3573 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3575 NewVTSize = NewVT.getSizeInBits() / 8;
3577 // If the new VT cannot cover all of the remaining bits, then consider
3578 // issuing a (or a pair of) unaligned and overlapping load / store.
3579 // FIXME: Only does this for 64-bit or more since we don't have proper
3580 // cost model for unaligned load / store.
3582 if (NumMemOps && AllowOverlap &&
3583 VTSize >= 8 && NewVTSize < Size &&
3584 TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
3592 if (++NumMemOps > Limit)
3595 MemOps.push_back(VT);
3602 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3603 SDValue Chain, SDValue Dst,
3604 SDValue Src, uint64_t Size,
3605 unsigned Align, bool isVol,
3607 MachinePointerInfo DstPtrInfo,
3608 MachinePointerInfo SrcPtrInfo) {
3609 // Turn a memcpy of undef to nop.
3610 if (Src.getOpcode() == ISD::UNDEF)
3613 // Expand memcpy to a series of load and store ops if the size operand falls
3614 // below a certain threshold.
3615 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3616 // rather than maybe a humongous number of loads and stores.
3617 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3618 std::vector<EVT> MemOps;
3619 bool DstAlignCanChange = false;
3620 MachineFunction &MF = DAG.getMachineFunction();
3621 MachineFrameInfo *MFI = MF.getFrameInfo();
3623 MF.getFunction()->getAttributes().
3624 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3625 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3626 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3627 DstAlignCanChange = true;
3628 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3629 if (Align > SrcAlign)
3632 bool CopyFromStr = isMemSrcFromString(Src, Str);
3633 bool isZeroStr = CopyFromStr && Str.empty();
3634 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3636 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3637 (DstAlignCanChange ? 0 : Align),
3638 (isZeroStr ? 0 : SrcAlign),
3639 false, false, CopyFromStr, true, DAG, TLI))
3642 if (DstAlignCanChange) {
3643 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3644 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3646 // Don't promote to an alignment that would require dynamic stack
3648 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3649 if (!TRI->needsStackRealignment(MF))
3650 while (NewAlign > Align &&
3651 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3654 if (NewAlign > Align) {
3655 // Give the stack frame object a larger alignment if needed.
3656 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3657 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3662 SmallVector<SDValue, 8> OutChains;
3663 unsigned NumMemOps = MemOps.size();
3664 uint64_t SrcOff = 0, DstOff = 0;
3665 for (unsigned i = 0; i != NumMemOps; ++i) {
3667 unsigned VTSize = VT.getSizeInBits() / 8;
3668 SDValue Value, Store;
3670 if (VTSize > Size) {
3671 // Issuing an unaligned load / store pair that overlaps with the previous
3672 // pair. Adjust the offset accordingly.
3673 assert(i == NumMemOps-1 && i != 0);
3674 SrcOff -= VTSize - Size;
3675 DstOff -= VTSize - Size;
3679 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3680 // It's unlikely a store of a vector immediate can be done in a single
3681 // instruction. It would require a load from a constantpool first.
3682 // We only handle zero vectors here.
3683 // FIXME: Handle other cases where store of vector immediate is done in
3684 // a single instruction.
3685 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3686 if (Value.getNode())
3687 Store = DAG.getStore(Chain, dl, Value,
3688 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3689 DstPtrInfo.getWithOffset(DstOff), isVol,
3693 if (!Store.getNode()) {
3694 // The type might not be legal for the target. This should only happen
3695 // if the type is smaller than a legal type, as on PPC, so the right
3696 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3697 // to Load/Store if NVT==VT.
3698 // FIXME does the case above also need this?
3699 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3700 assert(NVT.bitsGE(VT));
3701 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3702 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3703 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3704 MinAlign(SrcAlign, SrcOff));
3705 Store = DAG.getTruncStore(Chain, dl, Value,
3706 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3707 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3710 OutChains.push_back(Store);
3716 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3717 &OutChains[0], OutChains.size());
3720 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3721 SDValue Chain, SDValue Dst,
3722 SDValue Src, uint64_t Size,
3723 unsigned Align, bool isVol,
3725 MachinePointerInfo DstPtrInfo,
3726 MachinePointerInfo SrcPtrInfo) {
3727 // Turn a memmove of undef to nop.
3728 if (Src.getOpcode() == ISD::UNDEF)
3731 // Expand memmove to a series of load and store ops if the size operand falls
3732 // below a certain threshold.
3733 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3734 std::vector<EVT> MemOps;
3735 bool DstAlignCanChange = false;
3736 MachineFunction &MF = DAG.getMachineFunction();
3737 MachineFrameInfo *MFI = MF.getFrameInfo();
3738 bool OptSize = MF.getFunction()->getAttributes().
3739 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3740 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3741 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3742 DstAlignCanChange = true;
3743 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3744 if (Align > SrcAlign)
3746 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3748 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3749 (DstAlignCanChange ? 0 : Align), SrcAlign,
3750 false, false, false, false, DAG, TLI))
3753 if (DstAlignCanChange) {
3754 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3755 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3756 if (NewAlign > Align) {
3757 // Give the stack frame object a larger alignment if needed.
3758 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3759 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3764 uint64_t SrcOff = 0, DstOff = 0;
3765 SmallVector<SDValue, 8> LoadValues;
3766 SmallVector<SDValue, 8> LoadChains;
3767 SmallVector<SDValue, 8> OutChains;
3768 unsigned NumMemOps = MemOps.size();
3769 for (unsigned i = 0; i < NumMemOps; i++) {
3771 unsigned VTSize = VT.getSizeInBits() / 8;
3772 SDValue Value, Store;
3774 Value = DAG.getLoad(VT, dl, Chain,
3775 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3776 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3777 false, false, SrcAlign);
3778 LoadValues.push_back(Value);
3779 LoadChains.push_back(Value.getValue(1));
3782 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3783 &LoadChains[0], LoadChains.size());
3785 for (unsigned i = 0; i < NumMemOps; i++) {
3787 unsigned VTSize = VT.getSizeInBits() / 8;
3788 SDValue Value, Store;
3790 Store = DAG.getStore(Chain, dl, LoadValues[i],
3791 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3792 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3793 OutChains.push_back(Store);
3797 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3798 &OutChains[0], OutChains.size());
3801 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
3802 SDValue Chain, SDValue Dst,
3803 SDValue Src, uint64_t Size,
3804 unsigned Align, bool isVol,
3805 MachinePointerInfo DstPtrInfo) {
3806 // Turn a memset of undef to nop.
3807 if (Src.getOpcode() == ISD::UNDEF)
3810 // Expand memset to a series of load/store ops if the size operand
3811 // falls below a certain threshold.
3812 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3813 std::vector<EVT> MemOps;
3814 bool DstAlignCanChange = false;
3815 MachineFunction &MF = DAG.getMachineFunction();
3816 MachineFrameInfo *MFI = MF.getFrameInfo();
3817 bool OptSize = MF.getFunction()->getAttributes().
3818 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3819 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3820 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3821 DstAlignCanChange = true;
3823 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3824 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3825 Size, (DstAlignCanChange ? 0 : Align), 0,
3826 true, IsZeroVal, false, true, DAG, TLI))
3829 if (DstAlignCanChange) {
3830 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3831 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3832 if (NewAlign > Align) {
3833 // Give the stack frame object a larger alignment if needed.
3834 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3835 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3840 SmallVector<SDValue, 8> OutChains;
3841 uint64_t DstOff = 0;
3842 unsigned NumMemOps = MemOps.size();
3844 // Find the largest store and generate the bit pattern for it.
3845 EVT LargestVT = MemOps[0];
3846 for (unsigned i = 1; i < NumMemOps; i++)
3847 if (MemOps[i].bitsGT(LargestVT))
3848 LargestVT = MemOps[i];
3849 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
3851 for (unsigned i = 0; i < NumMemOps; i++) {
3853 unsigned VTSize = VT.getSizeInBits() / 8;
3854 if (VTSize > Size) {
3855 // Issuing an unaligned load / store pair that overlaps with the previous
3856 // pair. Adjust the offset accordingly.
3857 assert(i == NumMemOps-1 && i != 0);
3858 DstOff -= VTSize - Size;
3861 // If this store is smaller than the largest store see whether we can get
3862 // the smaller value for free with a truncate.
3863 SDValue Value = MemSetValue;
3864 if (VT.bitsLT(LargestVT)) {
3865 if (!LargestVT.isVector() && !VT.isVector() &&
3866 TLI.isTruncateFree(LargestVT, VT))
3867 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
3869 Value = getMemsetValue(Src, VT, DAG, dl);
3871 assert(Value.getValueType() == VT && "Value with wrong type.");
3872 SDValue Store = DAG.getStore(Chain, dl, Value,
3873 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3874 DstPtrInfo.getWithOffset(DstOff),
3875 isVol, false, Align);
3876 OutChains.push_back(Store);
3877 DstOff += VT.getSizeInBits() / 8;
3881 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3882 &OutChains[0], OutChains.size());
3885 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
3886 SDValue Src, SDValue Size,
3887 unsigned Align, bool isVol, bool AlwaysInline,
3888 MachinePointerInfo DstPtrInfo,
3889 MachinePointerInfo SrcPtrInfo) {
3890 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
3892 // Check to see if we should lower the memcpy to loads and stores first.
3893 // For cases within the target-specified limits, this is the best choice.
3894 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3896 // Memcpy with size zero? Just return the original chain.
3897 if (ConstantSize->isNullValue())
3900 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3901 ConstantSize->getZExtValue(),Align,
3902 isVol, false, DstPtrInfo, SrcPtrInfo);
3903 if (Result.getNode())
3907 // Then check to see if we should lower the memcpy with target-specific
3908 // code. If the target chooses to do this, this is the next best.
3910 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
3911 isVol, AlwaysInline,
3912 DstPtrInfo, SrcPtrInfo);
3913 if (Result.getNode())
3916 // If we really need inline code and the target declined to provide it,
3917 // use a (potentially long) sequence of loads and stores.
3919 assert(ConstantSize && "AlwaysInline requires a constant size!");
3920 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3921 ConstantSize->getZExtValue(), Align, isVol,
3922 true, DstPtrInfo, SrcPtrInfo);
3925 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
3926 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
3927 // respect volatile, so they may do things like read or write memory
3928 // beyond the given memory regions. But fixing this isn't easy, and most
3929 // people don't care.
3931 const TargetLowering *TLI = TM.getTargetLowering();
3933 // Emit a library call.
3934 TargetLowering::ArgListTy Args;
3935 TargetLowering::ArgListEntry Entry;
3936 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
3937 Entry.Node = Dst; Args.push_back(Entry);
3938 Entry.Node = Src; Args.push_back(Entry);
3939 Entry.Node = Size; Args.push_back(Entry);
3940 // FIXME: pass in SDLoc
3942 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
3943 false, false, false, false, 0,
3944 TLI->getLibcallCallingConv(RTLIB::MEMCPY),
3945 /*isTailCall=*/false,
3946 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
3947 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
3948 TLI->getPointerTy()),
3950 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
3952 return CallResult.second;
3955 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
3956 SDValue Src, SDValue Size,
3957 unsigned Align, bool isVol,
3958 MachinePointerInfo DstPtrInfo,
3959 MachinePointerInfo SrcPtrInfo) {
3960 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
3962 // Check to see if we should lower the memmove to loads and stores first.
3963 // For cases within the target-specified limits, this is the best choice.
3964 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3966 // Memmove with size zero? Just return the original chain.
3967 if (ConstantSize->isNullValue())
3971 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
3972 ConstantSize->getZExtValue(), Align, isVol,
3973 false, DstPtrInfo, SrcPtrInfo);
3974 if (Result.getNode())
3978 // Then check to see if we should lower the memmove with target-specific
3979 // code. If the target chooses to do this, this is the next best.
3981 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
3982 DstPtrInfo, SrcPtrInfo);
3983 if (Result.getNode())
3986 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
3987 // not be safe. See memcpy above for more details.
3989 const TargetLowering *TLI = TM.getTargetLowering();
3991 // Emit a library call.
3992 TargetLowering::ArgListTy Args;
3993 TargetLowering::ArgListEntry Entry;
3994 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
3995 Entry.Node = Dst; Args.push_back(Entry);
3996 Entry.Node = Src; Args.push_back(Entry);
3997 Entry.Node = Size; Args.push_back(Entry);
3998 // FIXME: pass in SDLoc
4000 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4001 false, false, false, false, 0,
4002 TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4003 /*isTailCall=*/false,
4004 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4005 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4006 TLI->getPointerTy()),
4008 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4010 return CallResult.second;
4013 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4014 SDValue Src, SDValue Size,
4015 unsigned Align, bool isVol,
4016 MachinePointerInfo DstPtrInfo) {
4017 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4019 // Check to see if we should lower the memset to stores first.
4020 // For cases within the target-specified limits, this is the best choice.
4021 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4023 // Memset with size zero? Just return the original chain.
4024 if (ConstantSize->isNullValue())
4028 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4029 Align, isVol, DstPtrInfo);
4031 if (Result.getNode())
4035 // Then check to see if we should lower the memset with target-specific
4036 // code. If the target chooses to do this, this is the next best.
4038 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4040 if (Result.getNode())
4043 // Emit a library call.
4044 const TargetLowering *TLI = TM.getTargetLowering();
4045 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4046 TargetLowering::ArgListTy Args;
4047 TargetLowering::ArgListEntry Entry;
4048 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4049 Args.push_back(Entry);
4050 // Extend or truncate the argument to be an i32 value for the call.
4051 if (Src.getValueType().bitsGT(MVT::i32))
4052 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4054 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4056 Entry.Ty = Type::getInt32Ty(*getContext());
4057 Entry.isSExt = true;
4058 Args.push_back(Entry);
4060 Entry.Ty = IntPtrTy;
4061 Entry.isSExt = false;
4062 Args.push_back(Entry);
4063 // FIXME: pass in SDLoc
4065 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4066 false, false, false, false, 0,
4067 TLI->getLibcallCallingConv(RTLIB::MEMSET),
4068 /*isTailCall=*/false,
4069 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4070 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4071 TLI->getPointerTy()),
4073 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4075 return CallResult.second;
4078 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4079 SDValue Chain, SDValue Ptr, SDValue Cmp,
4080 SDValue Swp, MachinePointerInfo PtrInfo,
4082 AtomicOrdering Ordering,
4083 SynchronizationScope SynchScope) {
4084 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4085 Alignment = getEVTAlignment(MemVT);
4087 MachineFunction &MF = getMachineFunction();
4089 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4090 // For now, atomics are considered to be volatile always.
4091 // FIXME: Volatile isn't really correct; we should keep track of atomic
4092 // orderings in the memoperand.
4093 unsigned Flags = MachineMemOperand::MOVolatile;
4094 if (Opcode != ISD::ATOMIC_STORE)
4095 Flags |= MachineMemOperand::MOLoad;
4096 if (Opcode != ISD::ATOMIC_LOAD)
4097 Flags |= MachineMemOperand::MOStore;
4099 MachineMemOperand *MMO =
4100 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4102 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4103 Ordering, SynchScope);
4106 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4108 SDValue Ptr, SDValue Cmp,
4109 SDValue Swp, MachineMemOperand *MMO,
4110 AtomicOrdering Ordering,
4111 SynchronizationScope SynchScope) {
4112 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4113 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4115 EVT VT = Cmp.getValueType();
4117 SDVTList VTs = getVTList(VT, MVT::Other);
4118 FoldingSetNodeID ID;
4119 ID.AddInteger(MemVT.getRawBits());
4120 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4121 AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
4122 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4124 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4125 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4126 return SDValue(E, 0);
4128 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, Chain,
4129 Ptr, Cmp, Swp, MMO, Ordering,
4131 CSEMap.InsertNode(N, IP);
4132 AllNodes.push_back(N);
4133 return SDValue(N, 0);
4136 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4138 SDValue Ptr, SDValue Val,
4139 const Value* PtrVal,
4141 AtomicOrdering Ordering,
4142 SynchronizationScope SynchScope) {
4143 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4144 Alignment = getEVTAlignment(MemVT);
4146 MachineFunction &MF = getMachineFunction();
4147 // An atomic store does not load. An atomic load does not store.
4148 // (An atomicrmw obviously both loads and stores.)
4149 // For now, atomics are considered to be volatile always, and they are
4151 // FIXME: Volatile isn't really correct; we should keep track of atomic
4152 // orderings in the memoperand.
4153 unsigned Flags = MachineMemOperand::MOVolatile;
4154 if (Opcode != ISD::ATOMIC_STORE)
4155 Flags |= MachineMemOperand::MOLoad;
4156 if (Opcode != ISD::ATOMIC_LOAD)
4157 Flags |= MachineMemOperand::MOStore;
4159 MachineMemOperand *MMO =
4160 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4161 MemVT.getStoreSize(), Alignment);
4163 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4164 Ordering, SynchScope);
4167 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4169 SDValue Ptr, SDValue Val,
4170 MachineMemOperand *MMO,
4171 AtomicOrdering Ordering,
4172 SynchronizationScope SynchScope) {
4173 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4174 Opcode == ISD::ATOMIC_LOAD_SUB ||
4175 Opcode == ISD::ATOMIC_LOAD_AND ||
4176 Opcode == ISD::ATOMIC_LOAD_OR ||
4177 Opcode == ISD::ATOMIC_LOAD_XOR ||
4178 Opcode == ISD::ATOMIC_LOAD_NAND ||
4179 Opcode == ISD::ATOMIC_LOAD_MIN ||
4180 Opcode == ISD::ATOMIC_LOAD_MAX ||
4181 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4182 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4183 Opcode == ISD::ATOMIC_SWAP ||
4184 Opcode == ISD::ATOMIC_STORE) &&
4185 "Invalid Atomic Op");
4187 EVT VT = Val.getValueType();
4189 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4190 getVTList(VT, MVT::Other);
4191 FoldingSetNodeID ID;
4192 ID.AddInteger(MemVT.getRawBits());
4193 SDValue Ops[] = {Chain, Ptr, Val};
4194 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
4195 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4197 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4198 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4199 return SDValue(E, 0);
4201 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, Chain,
4203 Ordering, SynchScope);
4204 CSEMap.InsertNode(N, IP);
4205 AllNodes.push_back(N);
4206 return SDValue(N, 0);
4209 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4210 EVT VT, SDValue Chain,
4212 const Value* PtrVal,
4214 AtomicOrdering Ordering,
4215 SynchronizationScope SynchScope) {
4216 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4217 Alignment = getEVTAlignment(MemVT);
4219 MachineFunction &MF = getMachineFunction();
4220 // An atomic store does not load. An atomic load does not store.
4221 // (An atomicrmw obviously both loads and stores.)
4222 // For now, atomics are considered to be volatile always, and they are
4224 // FIXME: Volatile isn't really correct; we should keep track of atomic
4225 // orderings in the memoperand.
4226 unsigned Flags = MachineMemOperand::MOVolatile;
4227 if (Opcode != ISD::ATOMIC_STORE)
4228 Flags |= MachineMemOperand::MOLoad;
4229 if (Opcode != ISD::ATOMIC_LOAD)
4230 Flags |= MachineMemOperand::MOStore;
4232 MachineMemOperand *MMO =
4233 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4234 MemVT.getStoreSize(), Alignment);
4236 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
4237 Ordering, SynchScope);
4240 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4241 EVT VT, SDValue Chain,
4243 MachineMemOperand *MMO,
4244 AtomicOrdering Ordering,
4245 SynchronizationScope SynchScope) {
4246 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4248 SDVTList VTs = getVTList(VT, MVT::Other);
4249 FoldingSetNodeID ID;
4250 ID.AddInteger(MemVT.getRawBits());
4251 SDValue Ops[] = {Chain, Ptr};
4252 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
4253 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4255 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4256 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4257 return SDValue(E, 0);
4259 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, Chain,
4260 Ptr, MMO, Ordering, SynchScope);
4261 CSEMap.InsertNode(N, IP);
4262 AllNodes.push_back(N);
4263 return SDValue(N, 0);
4266 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4267 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
4272 SmallVector<EVT, 4> VTs;
4273 VTs.reserve(NumOps);
4274 for (unsigned i = 0; i < NumOps; ++i)
4275 VTs.push_back(Ops[i].getValueType());
4276 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
4281 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl,
4282 const EVT *VTs, unsigned NumVTs,
4283 const SDValue *Ops, unsigned NumOps,
4284 EVT MemVT, MachinePointerInfo PtrInfo,
4285 unsigned Align, bool Vol,
4286 bool ReadMem, bool WriteMem) {
4287 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
4288 MemVT, PtrInfo, Align, Vol,
4293 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4294 const SDValue *Ops, unsigned NumOps,
4295 EVT MemVT, MachinePointerInfo PtrInfo,
4296 unsigned Align, bool Vol,
4297 bool ReadMem, bool WriteMem) {
4298 if (Align == 0) // Ensure that codegen never sees alignment 0
4299 Align = getEVTAlignment(MemVT);
4301 MachineFunction &MF = getMachineFunction();
4304 Flags |= MachineMemOperand::MOStore;
4306 Flags |= MachineMemOperand::MOLoad;
4308 Flags |= MachineMemOperand::MOVolatile;
4309 MachineMemOperand *MMO =
4310 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4312 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
4316 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4317 const SDValue *Ops, unsigned NumOps,
4318 EVT MemVT, MachineMemOperand *MMO) {
4319 assert((Opcode == ISD::INTRINSIC_VOID ||
4320 Opcode == ISD::INTRINSIC_W_CHAIN ||
4321 Opcode == ISD::PREFETCH ||
4322 Opcode == ISD::LIFETIME_START ||
4323 Opcode == ISD::LIFETIME_END ||
4324 (Opcode <= INT_MAX &&
4325 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4326 "Opcode is not a memory-accessing opcode!");
4328 // Memoize the node unless it returns a flag.
4329 MemIntrinsicSDNode *N;
4330 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4331 FoldingSetNodeID ID;
4332 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4333 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4335 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4336 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4337 return SDValue(E, 0);
4340 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTList, Ops, NumOps,
4342 CSEMap.InsertNode(N, IP);
4344 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTList, Ops, NumOps,
4347 AllNodes.push_back(N);
4348 return SDValue(N, 0);
4351 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4352 /// MachinePointerInfo record from it. This is particularly useful because the
4353 /// code generator has many cases where it doesn't bother passing in a
4354 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4355 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4356 // If this is FI+Offset, we can model it.
4357 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4358 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4360 // If this is (FI+Offset1)+Offset2, we can model it.
4361 if (Ptr.getOpcode() != ISD::ADD ||
4362 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4363 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4364 return MachinePointerInfo();
4366 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4367 return MachinePointerInfo::getFixedStack(FI, Offset+
4368 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4371 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4372 /// MachinePointerInfo record from it. This is particularly useful because the
4373 /// code generator has many cases where it doesn't bother passing in a
4374 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4375 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4376 // If the 'Offset' value isn't a constant, we can't handle this.
4377 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4378 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4379 if (OffsetOp.getOpcode() == ISD::UNDEF)
4380 return InferPointerInfo(Ptr);
4381 return MachinePointerInfo();
4386 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4387 EVT VT, SDLoc dl, SDValue Chain,
4388 SDValue Ptr, SDValue Offset,
4389 MachinePointerInfo PtrInfo, EVT MemVT,
4390 bool isVolatile, bool isNonTemporal, bool isInvariant,
4391 unsigned Alignment, const MDNode *TBAAInfo,
4392 const MDNode *Ranges) {
4393 assert(Chain.getValueType() == MVT::Other &&
4394 "Invalid chain type");
4395 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4396 Alignment = getEVTAlignment(VT);
4398 unsigned Flags = MachineMemOperand::MOLoad;
4400 Flags |= MachineMemOperand::MOVolatile;
4402 Flags |= MachineMemOperand::MONonTemporal;
4404 Flags |= MachineMemOperand::MOInvariant;
4406 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4409 PtrInfo = InferPointerInfo(Ptr, Offset);
4411 MachineFunction &MF = getMachineFunction();
4412 MachineMemOperand *MMO =
4413 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4415 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4419 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4420 EVT VT, SDLoc dl, SDValue Chain,
4421 SDValue Ptr, SDValue Offset, EVT MemVT,
4422 MachineMemOperand *MMO) {
4424 ExtType = ISD::NON_EXTLOAD;
4425 } else if (ExtType == ISD::NON_EXTLOAD) {
4426 assert(VT == MemVT && "Non-extending load from different memory type!");
4429 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4430 "Should only be an extending load, not truncating!");
4431 assert(VT.isInteger() == MemVT.isInteger() &&
4432 "Cannot convert from FP to Int or Int -> FP!");
4433 assert(VT.isVector() == MemVT.isVector() &&
4434 "Cannot use trunc store to convert to or from a vector!");
4435 assert((!VT.isVector() ||
4436 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4437 "Cannot use trunc store to change the number of vector elements!");
4440 bool Indexed = AM != ISD::UNINDEXED;
4441 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4442 "Unindexed load with an offset!");
4444 SDVTList VTs = Indexed ?
4445 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4446 SDValue Ops[] = { Chain, Ptr, Offset };
4447 FoldingSetNodeID ID;
4448 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4449 ID.AddInteger(MemVT.getRawBits());
4450 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4451 MMO->isNonTemporal(),
4452 MMO->isInvariant()));
4453 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4455 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4456 cast<LoadSDNode>(E)->refineAlignment(MMO);
4457 return SDValue(E, 0);
4459 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ExtType,
4461 CSEMap.InsertNode(N, IP);
4462 AllNodes.push_back(N);
4463 return SDValue(N, 0);
4466 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4467 SDValue Chain, SDValue Ptr,
4468 MachinePointerInfo PtrInfo,
4469 bool isVolatile, bool isNonTemporal,
4470 bool isInvariant, unsigned Alignment,
4471 const MDNode *TBAAInfo,
4472 const MDNode *Ranges) {
4473 SDValue Undef = getUNDEF(Ptr.getValueType());
4474 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4475 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4479 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4480 SDValue Chain, SDValue Ptr,
4481 MachinePointerInfo PtrInfo, EVT MemVT,
4482 bool isVolatile, bool isNonTemporal,
4483 unsigned Alignment, const MDNode *TBAAInfo) {
4484 SDValue Undef = getUNDEF(Ptr.getValueType());
4485 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4486 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4492 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4493 SDValue Offset, ISD::MemIndexedMode AM) {
4494 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4495 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4496 "Load is already a indexed load!");
4497 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4498 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4499 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4500 false, LD->getAlignment());
4503 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4504 SDValue Ptr, MachinePointerInfo PtrInfo,
4505 bool isVolatile, bool isNonTemporal,
4506 unsigned Alignment, const MDNode *TBAAInfo) {
4507 assert(Chain.getValueType() == MVT::Other &&
4508 "Invalid chain type");
4509 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4510 Alignment = getEVTAlignment(Val.getValueType());
4512 unsigned Flags = MachineMemOperand::MOStore;
4514 Flags |= MachineMemOperand::MOVolatile;
4516 Flags |= MachineMemOperand::MONonTemporal;
4519 PtrInfo = InferPointerInfo(Ptr);
4521 MachineFunction &MF = getMachineFunction();
4522 MachineMemOperand *MMO =
4523 MF.getMachineMemOperand(PtrInfo, Flags,
4524 Val.getValueType().getStoreSize(), Alignment,
4527 return getStore(Chain, dl, Val, Ptr, MMO);
4530 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4531 SDValue Ptr, MachineMemOperand *MMO) {
4532 assert(Chain.getValueType() == MVT::Other &&
4533 "Invalid chain type");
4534 EVT VT = Val.getValueType();
4535 SDVTList VTs = getVTList(MVT::Other);
4536 SDValue Undef = getUNDEF(Ptr.getValueType());
4537 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4538 FoldingSetNodeID ID;
4539 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4540 ID.AddInteger(VT.getRawBits());
4541 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4542 MMO->isNonTemporal(), MMO->isInvariant()));
4543 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4545 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4546 cast<StoreSDNode>(E)->refineAlignment(MMO);
4547 return SDValue(E, 0);
4549 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, ISD::UNINDEXED,
4551 CSEMap.InsertNode(N, IP);
4552 AllNodes.push_back(N);
4553 return SDValue(N, 0);
4556 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4557 SDValue Ptr, MachinePointerInfo PtrInfo,
4558 EVT SVT,bool isVolatile, bool isNonTemporal,
4560 const MDNode *TBAAInfo) {
4561 assert(Chain.getValueType() == MVT::Other &&
4562 "Invalid chain type");
4563 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4564 Alignment = getEVTAlignment(SVT);
4566 unsigned Flags = MachineMemOperand::MOStore;
4568 Flags |= MachineMemOperand::MOVolatile;
4570 Flags |= MachineMemOperand::MONonTemporal;
4573 PtrInfo = InferPointerInfo(Ptr);
4575 MachineFunction &MF = getMachineFunction();
4576 MachineMemOperand *MMO =
4577 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4580 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4583 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4584 SDValue Ptr, EVT SVT,
4585 MachineMemOperand *MMO) {
4586 EVT VT = Val.getValueType();
4588 assert(Chain.getValueType() == MVT::Other &&
4589 "Invalid chain type");
4591 return getStore(Chain, dl, Val, Ptr, MMO);
4593 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4594 "Should only be a truncating store, not extending!");
4595 assert(VT.isInteger() == SVT.isInteger() &&
4596 "Can't do FP-INT conversion!");
4597 assert(VT.isVector() == SVT.isVector() &&
4598 "Cannot use trunc store to convert to or from a vector!");
4599 assert((!VT.isVector() ||
4600 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4601 "Cannot use trunc store to change the number of vector elements!");
4603 SDVTList VTs = getVTList(MVT::Other);
4604 SDValue Undef = getUNDEF(Ptr.getValueType());
4605 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4606 FoldingSetNodeID ID;
4607 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4608 ID.AddInteger(SVT.getRawBits());
4609 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4610 MMO->isNonTemporal(), MMO->isInvariant()));
4611 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4613 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4614 cast<StoreSDNode>(E)->refineAlignment(MMO);
4615 return SDValue(E, 0);
4617 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, ISD::UNINDEXED,
4619 CSEMap.InsertNode(N, IP);
4620 AllNodes.push_back(N);
4621 return SDValue(N, 0);
4625 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4626 SDValue Offset, ISD::MemIndexedMode AM) {
4627 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4628 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4629 "Store is already a indexed store!");
4630 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4631 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4632 FoldingSetNodeID ID;
4633 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4634 ID.AddInteger(ST->getMemoryVT().getRawBits());
4635 ID.AddInteger(ST->getRawSubclassData());
4636 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4638 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4639 return SDValue(E, 0);
4641 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
4642 ST->isTruncatingStore(),
4644 ST->getMemOperand());
4645 CSEMap.InsertNode(N, IP);
4646 AllNodes.push_back(N);
4647 return SDValue(N, 0);
4650 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4651 SDValue Chain, SDValue Ptr,
4654 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4655 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
4658 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4659 const SDUse *Ops, unsigned NumOps) {
4661 case 0: return getNode(Opcode, DL, VT);
4662 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4663 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4664 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4668 // Copy from an SDUse array into an SDValue array for use with
4669 // the regular getNode logic.
4670 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4671 return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
4674 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4675 const SDValue *Ops, unsigned NumOps) {
4677 case 0: return getNode(Opcode, DL, VT);
4678 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4679 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4680 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4686 case ISD::SELECT_CC: {
4687 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4688 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4689 "LHS and RHS of condition must have same type!");
4690 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4691 "True and False arms of SelectCC must have same type!");
4692 assert(Ops[2].getValueType() == VT &&
4693 "select_cc node must be of same type as true and false value!");
4697 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4698 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4699 "LHS/RHS of comparison should match types!");
4706 SDVTList VTs = getVTList(VT);
4708 if (VT != MVT::Glue) {
4709 FoldingSetNodeID ID;
4710 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
4713 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4714 return SDValue(E, 0);
4716 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Ops, NumOps);
4717 CSEMap.InsertNode(N, IP);
4719 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Ops, NumOps);
4722 AllNodes.push_back(N);
4726 return SDValue(N, 0);
4729 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4730 ArrayRef<EVT> ResultTys,
4731 const SDValue *Ops, unsigned NumOps) {
4732 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
4736 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4737 const EVT *VTs, unsigned NumVTs,
4738 const SDValue *Ops, unsigned NumOps) {
4740 return getNode(Opcode, DL, VTs[0], Ops, NumOps);
4741 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
4744 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4745 const SDValue *Ops, unsigned NumOps) {
4746 if (VTList.NumVTs == 1)
4747 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
4751 // FIXME: figure out how to safely handle things like
4752 // int foo(int x) { return 1 << (x & 255); }
4753 // int bar() { return foo(256); }
4754 case ISD::SRA_PARTS:
4755 case ISD::SRL_PARTS:
4756 case ISD::SHL_PARTS:
4757 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4758 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4759 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4760 else if (N3.getOpcode() == ISD::AND)
4761 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4762 // If the and is only masking out bits that cannot effect the shift,
4763 // eliminate the and.
4764 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4765 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4766 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4772 // Memoize the node unless it returns a flag.
4774 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4775 FoldingSetNodeID ID;
4776 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4778 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4779 return SDValue(E, 0);
4782 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0]);
4783 } else if (NumOps == 2) {
4784 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1]);
4785 } else if (NumOps == 3) {
4786 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1],
4789 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops, NumOps);
4791 CSEMap.InsertNode(N, IP);
4794 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0]);
4795 } else if (NumOps == 2) {
4796 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1]);
4797 } else if (NumOps == 3) {
4798 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1],
4801 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops, NumOps);
4804 AllNodes.push_back(N);
4808 return SDValue(N, 0);
4811 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
4812 return getNode(Opcode, DL, VTList, 0, 0);
4815 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4817 SDValue Ops[] = { N1 };
4818 return getNode(Opcode, DL, VTList, Ops, 1);
4821 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4822 SDValue N1, SDValue N2) {
4823 SDValue Ops[] = { N1, N2 };
4824 return getNode(Opcode, DL, VTList, Ops, 2);
4827 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4828 SDValue N1, SDValue N2, SDValue N3) {
4829 SDValue Ops[] = { N1, N2, N3 };
4830 return getNode(Opcode, DL, VTList, Ops, 3);
4833 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4834 SDValue N1, SDValue N2, SDValue N3,
4836 SDValue Ops[] = { N1, N2, N3, N4 };
4837 return getNode(Opcode, DL, VTList, Ops, 4);
4840 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4841 SDValue N1, SDValue N2, SDValue N3,
4842 SDValue N4, SDValue N5) {
4843 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4844 return getNode(Opcode, DL, VTList, Ops, 5);
4847 SDVTList SelectionDAG::getVTList(EVT VT) {
4848 return makeVTList(SDNode::getValueTypeList(VT), 1);
4851 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4852 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4853 E = VTList.rend(); I != E; ++I)
4854 if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2)
4857 EVT *Array = Allocator.Allocate<EVT>(2);
4860 SDVTList Result = makeVTList(Array, 2);
4861 VTList.push_back(Result);
4865 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
4866 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4867 E = VTList.rend(); I != E; ++I)
4868 if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4872 EVT *Array = Allocator.Allocate<EVT>(3);
4876 SDVTList Result = makeVTList(Array, 3);
4877 VTList.push_back(Result);
4881 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
4882 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4883 E = VTList.rend(); I != E; ++I)
4884 if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4885 I->VTs[2] == VT3 && I->VTs[3] == VT4)
4888 EVT *Array = Allocator.Allocate<EVT>(4);
4893 SDVTList Result = makeVTList(Array, 4);
4894 VTList.push_back(Result);
4898 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
4900 case 0: llvm_unreachable("Cannot have nodes without results!");
4901 case 1: return getVTList(VTs[0]);
4902 case 2: return getVTList(VTs[0], VTs[1]);
4903 case 3: return getVTList(VTs[0], VTs[1], VTs[2]);
4904 case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]);
4908 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4909 E = VTList.rend(); I != E; ++I) {
4910 if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1])
4913 if (std::equal(&VTs[2], &VTs[NumVTs], &I->VTs[2]))
4917 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
4918 std::copy(VTs, VTs+NumVTs, Array);
4919 SDVTList Result = makeVTList(Array, NumVTs);
4920 VTList.push_back(Result);
4925 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
4926 /// specified operands. If the resultant node already exists in the DAG,
4927 /// this does not modify the specified node, instead it returns the node that
4928 /// already exists. If the resultant node does not exist in the DAG, the
4929 /// input node is returned. As a degenerate case, if you specify the same
4930 /// input operands as the node already has, the input node is returned.
4931 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
4932 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
4934 // Check to see if there is no change.
4935 if (Op == N->getOperand(0)) return N;
4937 // See if the modified node already exists.
4938 void *InsertPos = 0;
4939 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
4942 // Nope it doesn't. Remove the node from its current place in the maps.
4944 if (!RemoveNodeFromCSEMaps(N))
4947 // Now we update the operands.
4948 N->OperandList[0].set(Op);
4950 // If this gets put into a CSE map, add it.
4951 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4955 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
4956 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
4958 // Check to see if there is no change.
4959 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
4960 return N; // No operands changed, just return the input node.
4962 // See if the modified node already exists.
4963 void *InsertPos = 0;
4964 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
4967 // Nope it doesn't. Remove the node from its current place in the maps.
4969 if (!RemoveNodeFromCSEMaps(N))
4972 // Now we update the operands.
4973 if (N->OperandList[0] != Op1)
4974 N->OperandList[0].set(Op1);
4975 if (N->OperandList[1] != Op2)
4976 N->OperandList[1].set(Op2);
4978 // If this gets put into a CSE map, add it.
4979 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4983 SDNode *SelectionDAG::
4984 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
4985 SDValue Ops[] = { Op1, Op2, Op3 };
4986 return UpdateNodeOperands(N, Ops, 3);
4989 SDNode *SelectionDAG::
4990 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
4991 SDValue Op3, SDValue Op4) {
4992 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
4993 return UpdateNodeOperands(N, Ops, 4);
4996 SDNode *SelectionDAG::
4997 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
4998 SDValue Op3, SDValue Op4, SDValue Op5) {
4999 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5000 return UpdateNodeOperands(N, Ops, 5);
5003 SDNode *SelectionDAG::
5004 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
5005 assert(N->getNumOperands() == NumOps &&
5006 "Update with wrong number of operands");
5008 // Check to see if there is no change.
5009 bool AnyChange = false;
5010 for (unsigned i = 0; i != NumOps; ++i) {
5011 if (Ops[i] != N->getOperand(i)) {
5017 // No operands changed, just return the input node.
5018 if (!AnyChange) return N;
5020 // See if the modified node already exists.
5021 void *InsertPos = 0;
5022 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
5025 // Nope it doesn't. Remove the node from its current place in the maps.
5027 if (!RemoveNodeFromCSEMaps(N))
5030 // Now we update the operands.
5031 for (unsigned i = 0; i != NumOps; ++i)
5032 if (N->OperandList[i] != Ops[i])
5033 N->OperandList[i].set(Ops[i]);
5035 // If this gets put into a CSE map, add it.
5036 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5040 /// DropOperands - Release the operands and set this node to have
5042 void SDNode::DropOperands() {
5043 // Unlike the code in MorphNodeTo that does this, we don't need to
5044 // watch for dead nodes here.
5045 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5051 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5054 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5056 SDVTList VTs = getVTList(VT);
5057 return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
5060 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5061 EVT VT, SDValue Op1) {
5062 SDVTList VTs = getVTList(VT);
5063 SDValue Ops[] = { Op1 };
5064 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5067 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5068 EVT VT, SDValue Op1,
5070 SDVTList VTs = getVTList(VT);
5071 SDValue Ops[] = { Op1, Op2 };
5072 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5075 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5076 EVT VT, SDValue Op1,
5077 SDValue Op2, SDValue Op3) {
5078 SDVTList VTs = getVTList(VT);
5079 SDValue Ops[] = { Op1, Op2, Op3 };
5080 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5083 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5084 EVT VT, const SDValue *Ops,
5086 SDVTList VTs = getVTList(VT);
5087 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5090 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5091 EVT VT1, EVT VT2, const SDValue *Ops,
5093 SDVTList VTs = getVTList(VT1, VT2);
5094 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5097 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5099 SDVTList VTs = getVTList(VT1, VT2);
5100 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
5103 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5104 EVT VT1, EVT VT2, EVT VT3,
5105 const SDValue *Ops, unsigned NumOps) {
5106 SDVTList VTs = getVTList(VT1, VT2, VT3);
5107 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5110 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5111 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5112 const SDValue *Ops, unsigned NumOps) {
5113 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5114 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5117 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5120 SDVTList VTs = getVTList(VT1, VT2);
5121 SDValue Ops[] = { Op1 };
5122 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5125 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5127 SDValue Op1, SDValue Op2) {
5128 SDVTList VTs = getVTList(VT1, VT2);
5129 SDValue Ops[] = { Op1, Op2 };
5130 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5133 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5135 SDValue Op1, SDValue Op2,
5137 SDVTList VTs = getVTList(VT1, VT2);
5138 SDValue Ops[] = { Op1, Op2, Op3 };
5139 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5142 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5143 EVT VT1, EVT VT2, EVT VT3,
5144 SDValue Op1, SDValue Op2,
5146 SDVTList VTs = getVTList(VT1, VT2, VT3);
5147 SDValue Ops[] = { Op1, Op2, Op3 };
5148 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5151 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5152 SDVTList VTs, const SDValue *Ops,
5154 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5155 // Reset the NodeID to -1.
5160 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5161 /// the line number information on the merged node since it is not possible to
5162 /// preserve the information that operation is associated with multiple lines.
5163 /// This will make the debugger working better at -O0, were there is a higher
5164 /// probability having other instructions associated with that line.
5166 /// For IROrder, we keep the smaller of the two
5167 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5168 DebugLoc NLoc = N->getDebugLoc();
5169 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5170 (OLoc.getDebugLoc() != NLoc)) {
5171 N->setDebugLoc(DebugLoc());
5173 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5174 N->setIROrder(Order);
5178 /// MorphNodeTo - This *mutates* the specified node to have the specified
5179 /// return type, opcode, and operands.
5181 /// Note that MorphNodeTo returns the resultant node. If there is already a
5182 /// node of the specified opcode and operands, it returns that node instead of
5183 /// the current one. Note that the SDLoc need not be the same.
5185 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5186 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5187 /// node, and because it doesn't require CSE recalculation for any of
5188 /// the node's users.
5190 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5191 SDVTList VTs, const SDValue *Ops,
5193 // If an identical node already exists, use it.
5195 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5196 FoldingSetNodeID ID;
5197 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5198 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5199 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5202 if (!RemoveNodeFromCSEMaps(N))
5205 // Start the morphing.
5207 N->ValueList = VTs.VTs;
5208 N->NumValues = VTs.NumVTs;
5210 // Clear the operands list, updating used nodes to remove this from their
5211 // use list. Keep track of any operands that become dead as a result.
5212 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5213 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5215 SDNode *Used = Use.getNode();
5217 if (Used->use_empty())
5218 DeadNodeSet.insert(Used);
5221 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5222 // Initialize the memory references information.
5223 MN->setMemRefs(0, 0);
5224 // If NumOps is larger than the # of operands we can have in a
5225 // MachineSDNode, reallocate the operand list.
5226 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5227 if (MN->OperandsNeedDelete)
5228 delete[] MN->OperandList;
5229 if (NumOps > array_lengthof(MN->LocalOperands))
5230 // We're creating a final node that will live unmorphed for the
5231 // remainder of the current SelectionDAG iteration, so we can allocate
5232 // the operands directly out of a pool with no recycling metadata.
5233 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5236 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5237 MN->OperandsNeedDelete = false;
5239 MN->InitOperands(MN->OperandList, Ops, NumOps);
5241 // If NumOps is larger than the # of operands we currently have, reallocate
5242 // the operand list.
5243 if (NumOps > N->NumOperands) {
5244 if (N->OperandsNeedDelete)
5245 delete[] N->OperandList;
5246 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5247 N->OperandsNeedDelete = true;
5249 N->InitOperands(N->OperandList, Ops, NumOps);
5252 // Delete any nodes that are still dead after adding the uses for the
5254 if (!DeadNodeSet.empty()) {
5255 SmallVector<SDNode *, 16> DeadNodes;
5256 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5257 E = DeadNodeSet.end(); I != E; ++I)
5258 if ((*I)->use_empty())
5259 DeadNodes.push_back(*I);
5260 RemoveDeadNodes(DeadNodes);
5264 CSEMap.InsertNode(N, IP); // Memoize the new node.
5269 /// getMachineNode - These are used for target selectors to create a new node
5270 /// with specified return type(s), MachineInstr opcode, and operands.
5272 /// Note that getMachineNode returns the resultant node. If there is already a
5273 /// node of the specified opcode and operands, it returns that node instead of
5274 /// the current one.
5276 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5277 SDVTList VTs = getVTList(VT);
5278 return getMachineNode(Opcode, dl, VTs, None);
5282 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5283 SDVTList VTs = getVTList(VT);
5284 SDValue Ops[] = { Op1 };
5285 return getMachineNode(Opcode, dl, VTs, Ops);
5289 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5290 SDValue Op1, SDValue Op2) {
5291 SDVTList VTs = getVTList(VT);
5292 SDValue Ops[] = { Op1, Op2 };
5293 return getMachineNode(Opcode, dl, VTs, Ops);
5297 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5298 SDValue Op1, SDValue Op2, SDValue Op3) {
5299 SDVTList VTs = getVTList(VT);
5300 SDValue Ops[] = { Op1, Op2, Op3 };
5301 return getMachineNode(Opcode, dl, VTs, Ops);
5305 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5306 ArrayRef<SDValue> Ops) {
5307 SDVTList VTs = getVTList(VT);
5308 return getMachineNode(Opcode, dl, VTs, Ops);
5312 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5313 SDVTList VTs = getVTList(VT1, VT2);
5314 return getMachineNode(Opcode, dl, VTs, None);
5318 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5319 EVT VT1, EVT VT2, SDValue Op1) {
5320 SDVTList VTs = getVTList(VT1, VT2);
5321 SDValue Ops[] = { Op1 };
5322 return getMachineNode(Opcode, dl, VTs, Ops);
5326 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5327 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5328 SDVTList VTs = getVTList(VT1, VT2);
5329 SDValue Ops[] = { Op1, Op2 };
5330 return getMachineNode(Opcode, dl, VTs, Ops);
5334 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5335 EVT VT1, EVT VT2, SDValue Op1,
5336 SDValue Op2, SDValue Op3) {
5337 SDVTList VTs = getVTList(VT1, VT2);
5338 SDValue Ops[] = { Op1, Op2, Op3 };
5339 return getMachineNode(Opcode, dl, VTs, Ops);
5343 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5345 ArrayRef<SDValue> Ops) {
5346 SDVTList VTs = getVTList(VT1, VT2);
5347 return getMachineNode(Opcode, dl, VTs, Ops);
5351 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5352 EVT VT1, EVT VT2, EVT VT3,
5353 SDValue Op1, SDValue Op2) {
5354 SDVTList VTs = getVTList(VT1, VT2, VT3);
5355 SDValue Ops[] = { Op1, Op2 };
5356 return getMachineNode(Opcode, dl, VTs, Ops);
5360 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5361 EVT VT1, EVT VT2, EVT VT3,
5362 SDValue Op1, SDValue Op2, SDValue Op3) {
5363 SDVTList VTs = getVTList(VT1, VT2, VT3);
5364 SDValue Ops[] = { Op1, Op2, Op3 };
5365 return getMachineNode(Opcode, dl, VTs, Ops);
5369 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5370 EVT VT1, EVT VT2, EVT VT3,
5371 ArrayRef<SDValue> Ops) {
5372 SDVTList VTs = getVTList(VT1, VT2, VT3);
5373 return getMachineNode(Opcode, dl, VTs, Ops);
5377 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5378 EVT VT2, EVT VT3, EVT VT4,
5379 ArrayRef<SDValue> Ops) {
5380 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5381 return getMachineNode(Opcode, dl, VTs, Ops);
5385 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5386 ArrayRef<EVT> ResultTys,
5387 ArrayRef<SDValue> Ops) {
5388 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
5389 return getMachineNode(Opcode, dl, VTs, Ops);
5393 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5394 ArrayRef<SDValue> OpsArray) {
5395 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5398 const SDValue *Ops = OpsArray.data();
5399 unsigned NumOps = OpsArray.size();
5402 FoldingSetNodeID ID;
5403 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5405 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5406 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5410 // Allocate a new MachineSDNode.
5411 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5413 // Initialize the operands list.
5414 if (NumOps > array_lengthof(N->LocalOperands))
5415 // We're creating a final node that will live unmorphed for the
5416 // remainder of the current SelectionDAG iteration, so we can allocate
5417 // the operands directly out of a pool with no recycling metadata.
5418 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5421 N->InitOperands(N->LocalOperands, Ops, NumOps);
5422 N->OperandsNeedDelete = false;
5425 CSEMap.InsertNode(N, IP);
5427 AllNodes.push_back(N);
5429 VerifyMachineNode(N);
5434 /// getTargetExtractSubreg - A convenience function for creating
5435 /// TargetOpcode::EXTRACT_SUBREG nodes.
5437 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5439 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5440 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5441 VT, Operand, SRIdxVal);
5442 return SDValue(Subreg, 0);
5445 /// getTargetInsertSubreg - A convenience function for creating
5446 /// TargetOpcode::INSERT_SUBREG nodes.
5448 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5449 SDValue Operand, SDValue Subreg) {
5450 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5451 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5452 VT, Operand, Subreg, SRIdxVal);
5453 return SDValue(Result, 0);
5456 /// getNodeIfExists - Get the specified node if it's already available, or
5457 /// else return NULL.
5458 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5459 const SDValue *Ops, unsigned NumOps) {
5460 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5461 FoldingSetNodeID ID;
5462 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5464 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5470 /// getDbgValue - Creates a SDDbgValue node.
5473 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
5474 DebugLoc DL, unsigned O) {
5475 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
5479 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
5480 DebugLoc DL, unsigned O) {
5481 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5485 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5486 DebugLoc DL, unsigned O) {
5487 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5492 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5493 /// pointed to by a use iterator is deleted, increment the use iterator
5494 /// so that it doesn't dangle.
5496 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5497 SDNode::use_iterator &UI;
5498 SDNode::use_iterator &UE;
5500 virtual void NodeDeleted(SDNode *N, SDNode *E) {
5501 // Increment the iterator as needed.
5502 while (UI != UE && N == *UI)
5507 RAUWUpdateListener(SelectionDAG &d,
5508 SDNode::use_iterator &ui,
5509 SDNode::use_iterator &ue)
5510 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5515 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5516 /// This can cause recursive merging of nodes in the DAG.
5518 /// This version assumes From has a single result value.
5520 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5521 SDNode *From = FromN.getNode();
5522 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5523 "Cannot replace with this method!");
5524 assert(From != To.getNode() && "Cannot replace uses of with self");
5526 // Iterate over all the existing uses of From. New uses will be added
5527 // to the beginning of the use list, which we avoid visiting.
5528 // This specifically avoids visiting uses of From that arise while the
5529 // replacement is happening, because any such uses would be the result
5530 // of CSE: If an existing node looks like From after one of its operands
5531 // is replaced by To, we don't want to replace of all its users with To
5532 // too. See PR3018 for more info.
5533 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5534 RAUWUpdateListener Listener(*this, UI, UE);
5538 // This node is about to morph, remove its old self from the CSE maps.
5539 RemoveNodeFromCSEMaps(User);
5541 // A user can appear in a use list multiple times, and when this
5542 // happens the uses are usually next to each other in the list.
5543 // To help reduce the number of CSE recomputations, process all
5544 // the uses of this user that we can find this way.
5546 SDUse &Use = UI.getUse();
5549 } while (UI != UE && *UI == User);
5551 // Now that we have modified User, add it back to the CSE maps. If it
5552 // already exists there, recursively merge the results together.
5553 AddModifiedNodeToCSEMaps(User);
5556 // If we just RAUW'd the root, take note.
5557 if (FromN == getRoot())
5561 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5562 /// This can cause recursive merging of nodes in the DAG.
5564 /// This version assumes that for each value of From, there is a
5565 /// corresponding value in To in the same position with the same type.
5567 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5569 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5570 assert((!From->hasAnyUseOfValue(i) ||
5571 From->getValueType(i) == To->getValueType(i)) &&
5572 "Cannot use this version of ReplaceAllUsesWith!");
5575 // Handle the trivial case.
5579 // Iterate over just the existing users of From. See the comments in
5580 // the ReplaceAllUsesWith above.
5581 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5582 RAUWUpdateListener Listener(*this, UI, UE);
5586 // This node is about to morph, remove its old self from the CSE maps.
5587 RemoveNodeFromCSEMaps(User);
5589 // A user can appear in a use list multiple times, and when this
5590 // happens the uses are usually next to each other in the list.
5591 // To help reduce the number of CSE recomputations, process all
5592 // the uses of this user that we can find this way.
5594 SDUse &Use = UI.getUse();
5597 } while (UI != UE && *UI == User);
5599 // Now that we have modified User, add it back to the CSE maps. If it
5600 // already exists there, recursively merge the results together.
5601 AddModifiedNodeToCSEMaps(User);
5604 // If we just RAUW'd the root, take note.
5605 if (From == getRoot().getNode())
5606 setRoot(SDValue(To, getRoot().getResNo()));
5609 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5610 /// This can cause recursive merging of nodes in the DAG.
5612 /// This version can replace From with any result values. To must match the
5613 /// number and types of values returned by From.
5614 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5615 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5616 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5618 // Iterate over just the existing users of From. See the comments in
5619 // the ReplaceAllUsesWith above.
5620 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5621 RAUWUpdateListener Listener(*this, UI, UE);
5625 // This node is about to morph, remove its old self from the CSE maps.
5626 RemoveNodeFromCSEMaps(User);
5628 // A user can appear in a use list multiple times, and when this
5629 // happens the uses are usually next to each other in the list.
5630 // To help reduce the number of CSE recomputations, process all
5631 // the uses of this user that we can find this way.
5633 SDUse &Use = UI.getUse();
5634 const SDValue &ToOp = To[Use.getResNo()];
5637 } while (UI != UE && *UI == User);
5639 // Now that we have modified User, add it back to the CSE maps. If it
5640 // already exists there, recursively merge the results together.
5641 AddModifiedNodeToCSEMaps(User);
5644 // If we just RAUW'd the root, take note.
5645 if (From == getRoot().getNode())
5646 setRoot(SDValue(To[getRoot().getResNo()]));
5649 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5650 /// uses of other values produced by From.getNode() alone. The Deleted
5651 /// vector is handled the same way as for ReplaceAllUsesWith.
5652 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5653 // Handle the really simple, really trivial case efficiently.
5654 if (From == To) return;
5656 // Handle the simple, trivial, case efficiently.
5657 if (From.getNode()->getNumValues() == 1) {
5658 ReplaceAllUsesWith(From, To);
5662 // Iterate over just the existing users of From. See the comments in
5663 // the ReplaceAllUsesWith above.
5664 SDNode::use_iterator UI = From.getNode()->use_begin(),
5665 UE = From.getNode()->use_end();
5666 RAUWUpdateListener Listener(*this, UI, UE);
5669 bool UserRemovedFromCSEMaps = false;
5671 // A user can appear in a use list multiple times, and when this
5672 // happens the uses are usually next to each other in the list.
5673 // To help reduce the number of CSE recomputations, process all
5674 // the uses of this user that we can find this way.
5676 SDUse &Use = UI.getUse();
5678 // Skip uses of different values from the same node.
5679 if (Use.getResNo() != From.getResNo()) {
5684 // If this node hasn't been modified yet, it's still in the CSE maps,
5685 // so remove its old self from the CSE maps.
5686 if (!UserRemovedFromCSEMaps) {
5687 RemoveNodeFromCSEMaps(User);
5688 UserRemovedFromCSEMaps = true;
5693 } while (UI != UE && *UI == User);
5695 // We are iterating over all uses of the From node, so if a use
5696 // doesn't use the specific value, no changes are made.
5697 if (!UserRemovedFromCSEMaps)
5700 // Now that we have modified User, add it back to the CSE maps. If it
5701 // already exists there, recursively merge the results together.
5702 AddModifiedNodeToCSEMaps(User);
5705 // If we just RAUW'd the root, take note.
5706 if (From == getRoot())
5711 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5712 /// to record information about a use.
5719 /// operator< - Sort Memos by User.
5720 bool operator<(const UseMemo &L, const UseMemo &R) {
5721 return (intptr_t)L.User < (intptr_t)R.User;
5725 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5726 /// uses of other values produced by From.getNode() alone. The same value
5727 /// may appear in both the From and To list. The Deleted vector is
5728 /// handled the same way as for ReplaceAllUsesWith.
5729 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5732 // Handle the simple, trivial case efficiently.
5734 return ReplaceAllUsesOfValueWith(*From, *To);
5736 // Read up all the uses and make records of them. This helps
5737 // processing new uses that are introduced during the
5738 // replacement process.
5739 SmallVector<UseMemo, 4> Uses;
5740 for (unsigned i = 0; i != Num; ++i) {
5741 unsigned FromResNo = From[i].getResNo();
5742 SDNode *FromNode = From[i].getNode();
5743 for (SDNode::use_iterator UI = FromNode->use_begin(),
5744 E = FromNode->use_end(); UI != E; ++UI) {
5745 SDUse &Use = UI.getUse();
5746 if (Use.getResNo() == FromResNo) {
5747 UseMemo Memo = { *UI, i, &Use };
5748 Uses.push_back(Memo);
5753 // Sort the uses, so that all the uses from a given User are together.
5754 std::sort(Uses.begin(), Uses.end());
5756 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5757 UseIndex != UseIndexEnd; ) {
5758 // We know that this user uses some value of From. If it is the right
5759 // value, update it.
5760 SDNode *User = Uses[UseIndex].User;
5762 // This node is about to morph, remove its old self from the CSE maps.
5763 RemoveNodeFromCSEMaps(User);
5765 // The Uses array is sorted, so all the uses for a given User
5766 // are next to each other in the list.
5767 // To help reduce the number of CSE recomputations, process all
5768 // the uses of this user that we can find this way.
5770 unsigned i = Uses[UseIndex].Index;
5771 SDUse &Use = *Uses[UseIndex].Use;
5775 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5777 // Now that we have modified User, add it back to the CSE maps. If it
5778 // already exists there, recursively merge the results together.
5779 AddModifiedNodeToCSEMaps(User);
5783 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5784 /// based on their topological order. It returns the maximum id and a vector
5785 /// of the SDNodes* in assigned order by reference.
5786 unsigned SelectionDAG::AssignTopologicalOrder() {
5788 unsigned DAGSize = 0;
5790 // SortedPos tracks the progress of the algorithm. Nodes before it are
5791 // sorted, nodes after it are unsorted. When the algorithm completes
5792 // it is at the end of the list.
5793 allnodes_iterator SortedPos = allnodes_begin();
5795 // Visit all the nodes. Move nodes with no operands to the front of
5796 // the list immediately. Annotate nodes that do have operands with their
5797 // operand count. Before we do this, the Node Id fields of the nodes
5798 // may contain arbitrary values. After, the Node Id fields for nodes
5799 // before SortedPos will contain the topological sort index, and the
5800 // Node Id fields for nodes At SortedPos and after will contain the
5801 // count of outstanding operands.
5802 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5805 unsigned Degree = N->getNumOperands();
5807 // A node with no uses, add it to the result array immediately.
5808 N->setNodeId(DAGSize++);
5809 allnodes_iterator Q = N;
5811 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5812 assert(SortedPos != AllNodes.end() && "Overran node list");
5815 // Temporarily use the Node Id as scratch space for the degree count.
5816 N->setNodeId(Degree);
5820 // Visit all the nodes. As we iterate, move nodes into sorted order,
5821 // such that by the time the end is reached all nodes will be sorted.
5822 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5825 // N is in sorted position, so all its uses have one less operand
5826 // that needs to be sorted.
5827 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5830 unsigned Degree = P->getNodeId();
5831 assert(Degree != 0 && "Invalid node degree");
5834 // All of P's operands are sorted, so P may sorted now.
5835 P->setNodeId(DAGSize++);
5837 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5838 assert(SortedPos != AllNodes.end() && "Overran node list");
5841 // Update P's outstanding operand count.
5842 P->setNodeId(Degree);
5845 if (I == SortedPos) {
5848 dbgs() << "Overran sorted position:\n";
5851 llvm_unreachable(0);
5855 assert(SortedPos == AllNodes.end() &&
5856 "Topological sort incomplete!");
5857 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
5858 "First node in topological sort is not the entry token!");
5859 assert(AllNodes.front().getNodeId() == 0 &&
5860 "First node in topological sort has non-zero id!");
5861 assert(AllNodes.front().getNumOperands() == 0 &&
5862 "First node in topological sort has operands!");
5863 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
5864 "Last node in topologic sort has unexpected id!");
5865 assert(AllNodes.back().use_empty() &&
5866 "Last node in topologic sort has users!");
5867 assert(DAGSize == allnodes_size() && "Node count mismatch!");
5871 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
5872 /// value is produced by SD.
5873 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
5874 DbgInfo->add(DB, SD, isParameter);
5876 SD->setHasDebugValue(true);
5879 /// TransferDbgValues - Transfer SDDbgValues.
5880 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
5881 if (From == To || !From.getNode()->getHasDebugValue())
5883 SDNode *FromNode = From.getNode();
5884 SDNode *ToNode = To.getNode();
5885 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
5886 SmallVector<SDDbgValue *, 2> ClonedDVs;
5887 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
5889 SDDbgValue *Dbg = *I;
5890 if (Dbg->getKind() == SDDbgValue::SDNODE) {
5891 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
5892 Dbg->getOffset(), Dbg->getDebugLoc(),
5894 ClonedDVs.push_back(Clone);
5897 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
5898 E = ClonedDVs.end(); I != E; ++I)
5899 AddDbgValue(*I, ToNode, false);
5902 //===----------------------------------------------------------------------===//
5904 //===----------------------------------------------------------------------===//
5906 HandleSDNode::~HandleSDNode() {
5910 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
5911 DebugLoc DL, const GlobalValue *GA,
5912 EVT VT, int64_t o, unsigned char TF)
5913 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
5917 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
5918 EVT memvt, MachineMemOperand *mmo)
5919 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
5920 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5921 MMO->isNonTemporal(), MMO->isInvariant());
5922 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5923 assert(isNonTemporal() == MMO->isNonTemporal() &&
5924 "Non-temporal encoding error!");
5925 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5928 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
5929 const SDValue *Ops, unsigned NumOps, EVT memvt,
5930 MachineMemOperand *mmo)
5931 : SDNode(Opc, Order, dl, VTs, Ops, NumOps),
5932 MemoryVT(memvt), MMO(mmo) {
5933 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5934 MMO->isNonTemporal(), MMO->isInvariant());
5935 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5936 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5939 /// Profile - Gather unique data for the node.
5941 void SDNode::Profile(FoldingSetNodeID &ID) const {
5942 AddNodeIDNode(ID, this);
5947 std::vector<EVT> VTs;
5950 VTs.reserve(MVT::LAST_VALUETYPE);
5951 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
5952 VTs.push_back(MVT((MVT::SimpleValueType)i));
5957 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
5958 static ManagedStatic<EVTArray> SimpleVTArray;
5959 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
5961 /// getValueTypeList - Return a pointer to the specified value type.
5963 const EVT *SDNode::getValueTypeList(EVT VT) {
5964 if (VT.isExtended()) {
5965 sys::SmartScopedLock<true> Lock(*VTMutex);
5966 return &(*EVTs->insert(VT).first);
5968 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
5969 "Value type out of range!");
5970 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
5974 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
5975 /// indicated value. This method ignores uses of other values defined by this
5977 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
5978 assert(Value < getNumValues() && "Bad value!");
5980 // TODO: Only iterate over uses of a given value of the node
5981 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
5982 if (UI.getUse().getResNo() == Value) {
5989 // Found exactly the right number of uses?
5994 /// hasAnyUseOfValue - Return true if there are any use of the indicated
5995 /// value. This method ignores uses of other values defined by this operation.
5996 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
5997 assert(Value < getNumValues() && "Bad value!");
5999 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6000 if (UI.getUse().getResNo() == Value)
6007 /// isOnlyUserOf - Return true if this node is the only use of N.
6009 bool SDNode::isOnlyUserOf(SDNode *N) const {
6011 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6022 /// isOperand - Return true if this node is an operand of N.
6024 bool SDValue::isOperandOf(SDNode *N) const {
6025 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6026 if (*this == N->getOperand(i))
6031 bool SDNode::isOperandOf(SDNode *N) const {
6032 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6033 if (this == N->OperandList[i].getNode())
6038 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6039 /// be a chain) reaches the specified operand without crossing any
6040 /// side-effecting instructions on any chain path. In practice, this looks
6041 /// through token factors and non-volatile loads. In order to remain efficient,
6042 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6043 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6044 unsigned Depth) const {
6045 if (*this == Dest) return true;
6047 // Don't search too deeply, we just want to be able to see through
6048 // TokenFactor's etc.
6049 if (Depth == 0) return false;
6051 // If this is a token factor, all inputs to the TF happen in parallel. If any
6052 // of the operands of the TF does not reach dest, then we cannot do the xform.
6053 if (getOpcode() == ISD::TokenFactor) {
6054 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6055 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6060 // Loads don't have side effects, look through them.
6061 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6062 if (!Ld->isVolatile())
6063 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6068 /// hasPredecessor - Return true if N is a predecessor of this node.
6069 /// N is either an operand of this node, or can be reached by recursively
6070 /// traversing up the operands.
6071 /// NOTE: This is an expensive method. Use it carefully.
6072 bool SDNode::hasPredecessor(const SDNode *N) const {
6073 SmallPtrSet<const SDNode *, 32> Visited;
6074 SmallVector<const SDNode *, 16> Worklist;
6075 return hasPredecessorHelper(N, Visited, Worklist);
6079 SDNode::hasPredecessorHelper(const SDNode *N,
6080 SmallPtrSet<const SDNode *, 32> &Visited,
6081 SmallVectorImpl<const SDNode *> &Worklist) const {
6082 if (Visited.empty()) {
6083 Worklist.push_back(this);
6085 // Take a look in the visited set. If we've already encountered this node
6086 // we needn't search further.
6087 if (Visited.count(N))
6091 // Haven't visited N yet. Continue the search.
6092 while (!Worklist.empty()) {
6093 const SDNode *M = Worklist.pop_back_val();
6094 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6095 SDNode *Op = M->getOperand(i).getNode();
6096 if (Visited.insert(Op))
6097 Worklist.push_back(Op);
6106 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6107 assert(Num < NumOperands && "Invalid child # of SDNode!");
6108 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6111 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6112 assert(N->getNumValues() == 1 &&
6113 "Can't unroll a vector with multiple results!");
6115 EVT VT = N->getValueType(0);
6116 unsigned NE = VT.getVectorNumElements();
6117 EVT EltVT = VT.getVectorElementType();
6120 SmallVector<SDValue, 8> Scalars;
6121 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6123 // If ResNE is 0, fully unroll the vector op.
6126 else if (NE > ResNE)
6130 for (i= 0; i != NE; ++i) {
6131 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6132 SDValue Operand = N->getOperand(j);
6133 EVT OperandVT = Operand.getValueType();
6134 if (OperandVT.isVector()) {
6135 // A vector operand; extract a single element.
6136 const TargetLowering *TLI = TM.getTargetLowering();
6137 EVT OperandEltVT = OperandVT.getVectorElementType();
6138 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6141 getConstant(i, TLI->getVectorIdxTy()));
6143 // A scalar operand; just use it as is.
6144 Operands[j] = Operand;
6148 switch (N->getOpcode()) {
6150 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6151 &Operands[0], Operands.size()));
6154 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT,
6155 &Operands[0], Operands.size()));
6162 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6163 getShiftAmountOperand(Operands[0].getValueType(),
6166 case ISD::SIGN_EXTEND_INREG:
6167 case ISD::FP_ROUND_INREG: {
6168 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6169 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6171 getValueType(ExtVT)));
6176 for (; i < ResNE; ++i)
6177 Scalars.push_back(getUNDEF(EltVT));
6179 return getNode(ISD::BUILD_VECTOR, dl,
6180 EVT::getVectorVT(*getContext(), EltVT, ResNE),
6181 &Scalars[0], Scalars.size());
6185 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6186 /// location that is 'Dist' units away from the location that the 'Base' load
6187 /// is loading from.
6188 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6189 unsigned Bytes, int Dist) const {
6190 if (LD->getChain() != Base->getChain())
6192 EVT VT = LD->getValueType(0);
6193 if (VT.getSizeInBits() / 8 != Bytes)
6196 SDValue Loc = LD->getOperand(1);
6197 SDValue BaseLoc = Base->getOperand(1);
6198 if (Loc.getOpcode() == ISD::FrameIndex) {
6199 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6201 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6202 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6203 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6204 int FS = MFI->getObjectSize(FI);
6205 int BFS = MFI->getObjectSize(BFI);
6206 if (FS != BFS || FS != (int)Bytes) return false;
6207 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6211 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6212 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6215 const GlobalValue *GV1 = NULL;
6216 const GlobalValue *GV2 = NULL;
6217 int64_t Offset1 = 0;
6218 int64_t Offset2 = 0;
6219 const TargetLowering *TLI = TM.getTargetLowering();
6220 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6221 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6222 if (isGA1 && isGA2 && GV1 == GV2)
6223 return Offset1 == (Offset2 + Dist*Bytes);
6228 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6229 /// it cannot be inferred.
6230 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6231 // If this is a GlobalAddress + cst, return the alignment.
6232 const GlobalValue *GV;
6233 int64_t GVOffset = 0;
6234 const TargetLowering *TLI = TM.getTargetLowering();
6235 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6236 unsigned PtrWidth = TLI->getPointerTy().getSizeInBits();
6237 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6238 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6239 TLI->getDataLayout());
6240 unsigned AlignBits = KnownZero.countTrailingOnes();
6241 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6243 return MinAlign(Align, GVOffset);
6246 // If this is a direct reference to a stack slot, use information about the
6247 // stack slot's alignment.
6248 int FrameIdx = 1 << 31;
6249 int64_t FrameOffset = 0;
6250 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6251 FrameIdx = FI->getIndex();
6252 } else if (isBaseWithConstantOffset(Ptr) &&
6253 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6255 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6256 FrameOffset = Ptr.getConstantOperandVal(1);
6259 if (FrameIdx != (1 << 31)) {
6260 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6261 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6269 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6270 unsigned GlobalAddressSDNode::getAddressSpace() const {
6271 return getGlobal()->getType()->getAddressSpace();
6275 Type *ConstantPoolSDNode::getType() const {
6276 if (isMachineConstantPoolEntry())
6277 return Val.MachineCPVal->getType();
6278 return Val.ConstVal->getType();
6281 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6283 unsigned &SplatBitSize,
6285 unsigned MinSplatBits,
6287 EVT VT = getValueType(0);
6288 assert(VT.isVector() && "Expected a vector type");
6289 unsigned sz = VT.getSizeInBits();
6290 if (MinSplatBits > sz)
6293 SplatValue = APInt(sz, 0);
6294 SplatUndef = APInt(sz, 0);
6296 // Get the bits. Bits with undefined values (when the corresponding element
6297 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6298 // in SplatValue. If any of the values are not constant, give up and return
6300 unsigned int nOps = getNumOperands();
6301 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6302 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6304 for (unsigned j = 0; j < nOps; ++j) {
6305 unsigned i = isBigEndian ? nOps-1-j : j;
6306 SDValue OpVal = getOperand(i);
6307 unsigned BitPos = j * EltBitSize;
6309 if (OpVal.getOpcode() == ISD::UNDEF)
6310 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6311 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6312 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6313 zextOrTrunc(sz) << BitPos;
6314 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6315 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6320 // The build_vector is all constants or undefs. Find the smallest element
6321 // size that splats the vector.
6323 HasAnyUndefs = (SplatUndef != 0);
6326 unsigned HalfSize = sz / 2;
6327 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6328 APInt LowValue = SplatValue.trunc(HalfSize);
6329 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6330 APInt LowUndef = SplatUndef.trunc(HalfSize);
6332 // If the two halves do not match (ignoring undef bits), stop here.
6333 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6334 MinSplatBits > HalfSize)
6337 SplatValue = HighValue | LowValue;
6338 SplatUndef = HighUndef & LowUndef;
6347 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6348 // Find the first non-undef value in the shuffle mask.
6350 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6353 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6355 // Make sure all remaining elements are either undef or the same as the first
6357 for (int Idx = Mask[i]; i != e; ++i)
6358 if (Mask[i] >= 0 && Mask[i] != Idx)
6364 static void checkForCyclesHelper(const SDNode *N,
6365 SmallPtrSet<const SDNode*, 32> &Visited,
6366 SmallPtrSet<const SDNode*, 32> &Checked) {
6367 // If this node has already been checked, don't check it again.
6368 if (Checked.count(N))
6371 // If a node has already been visited on this depth-first walk, reject it as
6373 if (!Visited.insert(N)) {
6374 dbgs() << "Offending node:\n";
6376 errs() << "Detected cycle in SelectionDAG\n";
6380 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6381 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6388 void llvm::checkForCycles(const llvm::SDNode *N) {
6390 assert(N && "Checking nonexistant SDNode");
6391 SmallPtrSet<const SDNode*, 32> visited;
6392 SmallPtrSet<const SDNode*, 32> checked;
6393 checkForCyclesHelper(N, visited, checked);
6397 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6398 checkForCycles(DAG->getRoot().getNode());