1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Assembly/Writer.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/DebugInfo.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/ManagedStatic.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/Mutex.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetIntrinsicInfo.h"
46 #include "llvm/Target/TargetLowering.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Target/TargetRegisterInfo.h"
50 #include "llvm/Target/TargetSelectionDAGInfo.h"
55 /// makeVTList - Return an instance of the SDVTList struct initialized with the
56 /// specified members.
57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
58 SDVTList Res = {VTs, NumVTs};
62 // Default null implementations of the callbacks.
63 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
64 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
66 //===----------------------------------------------------------------------===//
67 // ConstantFPSDNode Class
68 //===----------------------------------------------------------------------===//
70 /// isExactlyValue - We don't rely on operator== working on double values, as
71 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
72 /// As such, this method can be used to do an exact bit-for-bit comparison of
73 /// two floating point values.
74 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
75 return getValueAPF().bitwiseIsEqual(V);
78 bool ConstantFPSDNode::isValueValidForType(EVT VT,
80 assert(VT.isFloatingPoint() && "Can only convert between FP types");
82 // convert modifies in place, so make a copy.
83 APFloat Val2 = APFloat(Val);
85 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
86 APFloat::rmNearestTiesToEven,
91 //===----------------------------------------------------------------------===//
93 //===----------------------------------------------------------------------===//
95 /// isBuildVectorAllOnes - Return true if the specified node is a
96 /// BUILD_VECTOR where all of the elements are ~0 or undef.
97 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
98 // Look through a bit convert.
99 if (N->getOpcode() == ISD::BITCAST)
100 N = N->getOperand(0).getNode();
102 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
104 unsigned i = 0, e = N->getNumOperands();
106 // Skip over all of the undef values.
107 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
110 // Do not accept an all-undef vector.
111 if (i == e) return false;
113 // Do not accept build_vectors that aren't all constants or which have non-~0
114 // elements. We have to be a bit careful here, as the type of the constant
115 // may not be the same as the type of the vector elements due to type
116 // legalization (the elements are promoted to a legal type for the target and
117 // a vector of a type may be legal when the base element type is not).
118 // We only want to check enough bits to cover the vector elements, because
119 // we care if the resultant vector is all ones, not whether the individual
121 SDValue NotZero = N->getOperand(i);
122 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
123 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
124 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
126 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
127 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
132 // Okay, we have at least one ~0 value, check to see if the rest match or are
133 // undefs. Even with the above element type twiddling, this should be OK, as
134 // the same type legalization should have applied to all the elements.
135 for (++i; i != e; ++i)
136 if (N->getOperand(i) != NotZero &&
137 N->getOperand(i).getOpcode() != ISD::UNDEF)
143 /// isBuildVectorAllZeros - Return true if the specified node is a
144 /// BUILD_VECTOR where all of the elements are 0 or undef.
145 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
146 // Look through a bit convert.
147 if (N->getOpcode() == ISD::BITCAST)
148 N = N->getOperand(0).getNode();
150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
152 unsigned i = 0, e = N->getNumOperands();
154 // Skip over all of the undef values.
155 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
158 // Do not accept an all-undef vector.
159 if (i == e) return false;
161 // Do not accept build_vectors that aren't all constants or which have non-0
163 SDValue Zero = N->getOperand(i);
164 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
165 if (!CN->isNullValue())
167 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
168 if (!CFPN->getValueAPF().isPosZero())
173 // Okay, we have at least one 0 value, check to see if the rest match or are
175 for (++i; i != e; ++i)
176 if (N->getOperand(i) != Zero &&
177 N->getOperand(i).getOpcode() != ISD::UNDEF)
182 /// isScalarToVector - Return true if the specified node is a
183 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
184 /// element is not an undef.
185 bool ISD::isScalarToVector(const SDNode *N) {
186 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
189 if (N->getOpcode() != ISD::BUILD_VECTOR)
191 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
193 unsigned NumElems = N->getNumOperands();
196 for (unsigned i = 1; i < NumElems; ++i) {
197 SDValue V = N->getOperand(i);
198 if (V.getOpcode() != ISD::UNDEF)
204 /// allOperandsUndef - Return true if the node has at least one operand
205 /// and all operands of the specified node are ISD::UNDEF.
206 bool ISD::allOperandsUndef(const SDNode *N) {
207 // Return false if the node has no operands.
208 // This is "logically inconsistent" with the definition of "all" but
209 // is probably the desired behavior.
210 if (N->getNumOperands() == 0)
213 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
214 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
220 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
221 /// when given the operation for (X op Y).
222 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
223 // To perform this operation, we just need to swap the L and G bits of the
225 unsigned OldL = (Operation >> 2) & 1;
226 unsigned OldG = (Operation >> 1) & 1;
227 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
228 (OldL << 1) | // New G bit
229 (OldG << 2)); // New L bit.
232 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
233 /// 'op' is a valid SetCC operation.
234 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
235 unsigned Operation = Op;
237 Operation ^= 7; // Flip L, G, E bits, but not U.
239 Operation ^= 15; // Flip all of the condition bits.
241 if (Operation > ISD::SETTRUE2)
242 Operation &= ~8; // Don't let N and U bits get set.
244 return ISD::CondCode(Operation);
248 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
249 /// signed operation and 2 if the result is an unsigned comparison. Return zero
250 /// if the operation does not depend on the sign of the input (setne and seteq).
251 static int isSignedOp(ISD::CondCode Opcode) {
253 default: llvm_unreachable("Illegal integer setcc operation!");
255 case ISD::SETNE: return 0;
259 case ISD::SETGE: return 1;
263 case ISD::SETUGE: return 2;
267 /// getSetCCOrOperation - Return the result of a logical OR between different
268 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
269 /// returns SETCC_INVALID if it is not possible to represent the resultant
271 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
273 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
274 // Cannot fold a signed integer setcc with an unsigned integer setcc.
275 return ISD::SETCC_INVALID;
277 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
279 // If the N and U bits get set then the resultant comparison DOES suddenly
280 // care about orderedness, and is true when ordered.
281 if (Op > ISD::SETTRUE2)
282 Op &= ~16; // Clear the U bit if the N bit is set.
284 // Canonicalize illegal integer setcc's.
285 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
288 return ISD::CondCode(Op);
291 /// getSetCCAndOperation - Return the result of a logical AND between different
292 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
293 /// function returns zero if it is not possible to represent the resultant
295 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
297 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
298 // Cannot fold a signed setcc with an unsigned setcc.
299 return ISD::SETCC_INVALID;
301 // Combine all of the condition bits.
302 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
304 // Canonicalize illegal integer setcc's.
308 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
309 case ISD::SETOEQ: // SETEQ & SETU[LG]E
310 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
311 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
312 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
319 //===----------------------------------------------------------------------===//
320 // SDNode Profile Support
321 //===----------------------------------------------------------------------===//
323 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
325 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
329 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
330 /// solely with their pointer.
331 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
332 ID.AddPointer(VTList.VTs);
335 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
337 static void AddNodeIDOperands(FoldingSetNodeID &ID,
338 const SDValue *Ops, unsigned NumOps) {
339 for (; NumOps; --NumOps, ++Ops) {
340 ID.AddPointer(Ops->getNode());
341 ID.AddInteger(Ops->getResNo());
345 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
347 static void AddNodeIDOperands(FoldingSetNodeID &ID,
348 const SDUse *Ops, unsigned NumOps) {
349 for (; NumOps; --NumOps, ++Ops) {
350 ID.AddPointer(Ops->getNode());
351 ID.AddInteger(Ops->getResNo());
355 static void AddNodeIDNode(FoldingSetNodeID &ID,
356 unsigned short OpC, SDVTList VTList,
357 const SDValue *OpList, unsigned N) {
358 AddNodeIDOpcode(ID, OpC);
359 AddNodeIDValueTypes(ID, VTList);
360 AddNodeIDOperands(ID, OpList, N);
363 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
365 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
366 switch (N->getOpcode()) {
367 case ISD::TargetExternalSymbol:
368 case ISD::ExternalSymbol:
369 llvm_unreachable("Should only be used on nodes with operands");
370 default: break; // Normal nodes don't need extra info.
371 case ISD::TargetConstant:
373 ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
375 case ISD::TargetConstantFP:
376 case ISD::ConstantFP: {
377 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
380 case ISD::TargetGlobalAddress:
381 case ISD::GlobalAddress:
382 case ISD::TargetGlobalTLSAddress:
383 case ISD::GlobalTLSAddress: {
384 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
385 ID.AddPointer(GA->getGlobal());
386 ID.AddInteger(GA->getOffset());
387 ID.AddInteger(GA->getTargetFlags());
388 ID.AddInteger(GA->getAddressSpace());
391 case ISD::BasicBlock:
392 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
395 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
397 case ISD::RegisterMask:
398 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
401 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
403 case ISD::FrameIndex:
404 case ISD::TargetFrameIndex:
405 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
408 case ISD::TargetJumpTable:
409 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
410 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
412 case ISD::ConstantPool:
413 case ISD::TargetConstantPool: {
414 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
415 ID.AddInteger(CP->getAlignment());
416 ID.AddInteger(CP->getOffset());
417 if (CP->isMachineConstantPoolEntry())
418 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
420 ID.AddPointer(CP->getConstVal());
421 ID.AddInteger(CP->getTargetFlags());
424 case ISD::TargetIndex: {
425 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
426 ID.AddInteger(TI->getIndex());
427 ID.AddInteger(TI->getOffset());
428 ID.AddInteger(TI->getTargetFlags());
432 const LoadSDNode *LD = cast<LoadSDNode>(N);
433 ID.AddInteger(LD->getMemoryVT().getRawBits());
434 ID.AddInteger(LD->getRawSubclassData());
435 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
439 const StoreSDNode *ST = cast<StoreSDNode>(N);
440 ID.AddInteger(ST->getMemoryVT().getRawBits());
441 ID.AddInteger(ST->getRawSubclassData());
442 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
445 case ISD::ATOMIC_CMP_SWAP:
446 case ISD::ATOMIC_SWAP:
447 case ISD::ATOMIC_LOAD_ADD:
448 case ISD::ATOMIC_LOAD_SUB:
449 case ISD::ATOMIC_LOAD_AND:
450 case ISD::ATOMIC_LOAD_OR:
451 case ISD::ATOMIC_LOAD_XOR:
452 case ISD::ATOMIC_LOAD_NAND:
453 case ISD::ATOMIC_LOAD_MIN:
454 case ISD::ATOMIC_LOAD_MAX:
455 case ISD::ATOMIC_LOAD_UMIN:
456 case ISD::ATOMIC_LOAD_UMAX:
457 case ISD::ATOMIC_LOAD:
458 case ISD::ATOMIC_STORE: {
459 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
460 ID.AddInteger(AT->getMemoryVT().getRawBits());
461 ID.AddInteger(AT->getRawSubclassData());
462 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
465 case ISD::PREFETCH: {
466 const MemSDNode *PF = cast<MemSDNode>(N);
467 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
470 case ISD::VECTOR_SHUFFLE: {
471 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
472 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
474 ID.AddInteger(SVN->getMaskElt(i));
477 case ISD::TargetBlockAddress:
478 case ISD::BlockAddress: {
479 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
480 ID.AddPointer(BA->getBlockAddress());
481 ID.AddInteger(BA->getOffset());
482 ID.AddInteger(BA->getTargetFlags());
485 } // end switch (N->getOpcode())
487 // Target specific memory nodes could also have address spaces to check.
488 if (N->isTargetMemoryOpcode())
489 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
492 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
494 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
495 AddNodeIDOpcode(ID, N->getOpcode());
496 // Add the return value info.
497 AddNodeIDValueTypes(ID, N->getVTList());
498 // Add the operand info.
499 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
501 // Handle SDNode leafs with special info.
502 AddNodeIDCustom(ID, N);
505 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
506 /// the CSE map that carries volatility, temporalness, indexing mode, and
507 /// extension/truncation information.
509 static inline unsigned
510 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
511 bool isNonTemporal, bool isInvariant) {
512 assert((ConvType & 3) == ConvType &&
513 "ConvType may not require more than 2 bits!");
514 assert((AM & 7) == AM &&
515 "AM may not require more than 3 bits!");
519 (isNonTemporal << 6) |
523 //===----------------------------------------------------------------------===//
524 // SelectionDAG Class
525 //===----------------------------------------------------------------------===//
527 /// doNotCSE - Return true if CSE should not be performed for this node.
528 static bool doNotCSE(SDNode *N) {
529 if (N->getValueType(0) == MVT::Glue)
530 return true; // Never CSE anything that produces a flag.
532 switch (N->getOpcode()) {
534 case ISD::HANDLENODE:
536 return true; // Never CSE these nodes.
539 // Check that remaining values produced are not flags.
540 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
541 if (N->getValueType(i) == MVT::Glue)
542 return true; // Never CSE anything that produces a flag.
547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
549 void SelectionDAG::RemoveDeadNodes() {
550 // Create a dummy node (which is not added to allnodes), that adds a reference
551 // to the root node, preventing it from being deleted.
552 HandleSDNode Dummy(getRoot());
554 SmallVector<SDNode*, 128> DeadNodes;
556 // Add all obviously-dead nodes to the DeadNodes worklist.
557 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
559 DeadNodes.push_back(I);
561 RemoveDeadNodes(DeadNodes);
563 // If the root changed (e.g. it was a dead load, update the root).
564 setRoot(Dummy.getValue());
567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
568 /// given list, and any nodes that become unreachable as a result.
569 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
571 // Process the worklist, deleting the nodes and adding their uses to the
573 while (!DeadNodes.empty()) {
574 SDNode *N = DeadNodes.pop_back_val();
576 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
577 DUL->NodeDeleted(N, 0);
579 // Take the node out of the appropriate CSE map.
580 RemoveNodeFromCSEMaps(N);
582 // Next, brutally remove the operand list. This is safe to do, as there are
583 // no cycles in the graph.
584 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
586 SDNode *Operand = Use.getNode();
589 // Now that we removed this operand, see if there are no uses of it left.
590 if (Operand->use_empty())
591 DeadNodes.push_back(Operand);
598 void SelectionDAG::RemoveDeadNode(SDNode *N){
599 SmallVector<SDNode*, 16> DeadNodes(1, N);
601 // Create a dummy node that adds a reference to the root node, preventing
602 // it from being deleted. (This matters if the root is an operand of the
604 HandleSDNode Dummy(getRoot());
606 RemoveDeadNodes(DeadNodes);
609 void SelectionDAG::DeleteNode(SDNode *N) {
610 // First take this out of the appropriate CSE map.
611 RemoveNodeFromCSEMaps(N);
613 // Finally, remove uses due to operands of this node, remove from the
614 // AllNodes list, and delete the node.
615 DeleteNodeNotInCSEMaps(N);
618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
619 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
620 assert(N->use_empty() && "Cannot delete a node that is not dead!");
622 // Drop all of the operands and decrement used node's use counts.
628 void SelectionDAG::DeallocateNode(SDNode *N) {
629 if (N->OperandsNeedDelete)
630 delete[] N->OperandList;
632 // Set the opcode to DELETED_NODE to help catch bugs when node
633 // memory is reallocated.
634 N->NodeType = ISD::DELETED_NODE;
636 NodeAllocator.Deallocate(AllNodes.remove(N));
638 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
639 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
640 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
641 DbgVals[i]->setIsInvalidated();
644 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
645 /// correspond to it. This is useful when we're about to delete or repurpose
646 /// the node. We don't want future request for structurally identical nodes
647 /// to return N anymore.
648 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
650 switch (N->getOpcode()) {
651 case ISD::HANDLENODE: return false; // noop.
653 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
654 "Cond code doesn't exist!");
655 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
656 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
658 case ISD::ExternalSymbol:
659 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
661 case ISD::TargetExternalSymbol: {
662 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
663 Erased = TargetExternalSymbols.erase(
664 std::pair<std::string,unsigned char>(ESN->getSymbol(),
665 ESN->getTargetFlags()));
668 case ISD::VALUETYPE: {
669 EVT VT = cast<VTSDNode>(N)->getVT();
670 if (VT.isExtended()) {
671 Erased = ExtendedValueTypeNodes.erase(VT);
673 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
674 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
679 // Remove it from the CSE Map.
680 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
681 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
682 Erased = CSEMap.RemoveNode(N);
686 // Verify that the node was actually in one of the CSE maps, unless it has a
687 // flag result (which cannot be CSE'd) or is one of the special cases that are
688 // not subject to CSE.
689 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
690 !N->isMachineOpcode() && !doNotCSE(N)) {
693 llvm_unreachable("Node is not in map!");
699 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
700 /// maps and modified in place. Add it back to the CSE maps, unless an identical
701 /// node already exists, in which case transfer all its users to the existing
702 /// node. This transfer can potentially trigger recursive merging.
705 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
706 // For node types that aren't CSE'd, just act as if no identical node
709 SDNode *Existing = CSEMap.GetOrInsertNode(N);
711 // If there was already an existing matching node, use ReplaceAllUsesWith
712 // to replace the dead one with the existing one. This can cause
713 // recursive merging of other unrelated nodes down the line.
714 ReplaceAllUsesWith(N, Existing);
716 // N is now dead. Inform the listeners and delete it.
717 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
718 DUL->NodeDeleted(N, Existing);
719 DeleteNodeNotInCSEMaps(N);
724 // If the node doesn't already exist, we updated it. Inform listeners.
725 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
729 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
730 /// were replaced with those specified. If this node is never memoized,
731 /// return null, otherwise return a pointer to the slot it would take. If a
732 /// node already exists with these operands, the slot will be non-null.
733 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
738 SDValue Ops[] = { Op };
740 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
741 AddNodeIDCustom(ID, N);
742 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
746 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
747 /// were replaced with those specified. If this node is never memoized,
748 /// return null, otherwise return a pointer to the slot it would take. If a
749 /// node already exists with these operands, the slot will be non-null.
750 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
751 SDValue Op1, SDValue Op2,
756 SDValue Ops[] = { Op1, Op2 };
758 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
759 AddNodeIDCustom(ID, N);
760 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
765 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
766 /// were replaced with those specified. If this node is never memoized,
767 /// return null, otherwise return a pointer to the slot it would take. If a
768 /// node already exists with these operands, the slot will be non-null.
769 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
770 const SDValue *Ops,unsigned NumOps,
776 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
777 AddNodeIDCustom(ID, N);
778 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
783 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
784 static void VerifyNodeCommon(SDNode *N) {
785 switch (N->getOpcode()) {
788 case ISD::BUILD_PAIR: {
789 EVT VT = N->getValueType(0);
790 assert(N->getNumValues() == 1 && "Too many results!");
791 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
792 "Wrong return type!");
793 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
794 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
795 "Mismatched operand types!");
796 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
797 "Wrong operand type!");
798 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
799 "Wrong return type size");
802 case ISD::BUILD_VECTOR: {
803 assert(N->getNumValues() == 1 && "Too many results!");
804 assert(N->getValueType(0).isVector() && "Wrong return type!");
805 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
806 "Wrong number of operands!");
807 EVT EltVT = N->getValueType(0).getVectorElementType();
808 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
809 assert((I->getValueType() == EltVT ||
810 (EltVT.isInteger() && I->getValueType().isInteger() &&
811 EltVT.bitsLE(I->getValueType()))) &&
812 "Wrong operand type!");
813 assert(I->getValueType() == N->getOperand(0).getValueType() &&
814 "Operands must all have the same type");
821 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
822 static void VerifySDNode(SDNode *N) {
823 // The SDNode allocators cannot be used to allocate nodes with fields that are
824 // not present in an SDNode!
825 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
826 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
827 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
828 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
829 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
830 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
831 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
832 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
833 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
834 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
835 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
836 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
837 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
838 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
839 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
840 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
841 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
842 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
843 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
848 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
850 static void VerifyMachineNode(SDNode *N) {
851 // The MachineNode allocators cannot be used to allocate nodes with fields
852 // that are not present in a MachineNode!
853 // Currently there are no such nodes.
859 /// getEVTAlignment - Compute the default alignment value for the
862 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
863 Type *Ty = VT == MVT::iPTR ?
864 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
865 VT.getTypeForEVT(*getContext());
867 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
870 // EntryNode could meaningfully have debug info if we can find it...
871 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
872 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TTI(0), OptLevel(OL),
873 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
874 Root(getEntryNode()), UpdateListeners(0) {
875 AllNodes.push_back(&EntryNode);
876 DbgInfo = new SDDbgInfo();
879 void SelectionDAG::init(MachineFunction &mf, const TargetTransformInfo *tti) {
882 Context = &mf.getFunction()->getContext();
885 SelectionDAG::~SelectionDAG() {
886 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
891 void SelectionDAG::allnodes_clear() {
892 assert(&*AllNodes.begin() == &EntryNode);
893 AllNodes.remove(AllNodes.begin());
894 while (!AllNodes.empty())
895 DeallocateNode(AllNodes.begin());
898 void SelectionDAG::clear() {
900 OperandAllocator.Reset();
903 ExtendedValueTypeNodes.clear();
904 ExternalSymbols.clear();
905 TargetExternalSymbols.clear();
906 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
907 static_cast<CondCodeSDNode*>(0));
908 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
909 static_cast<SDNode*>(0));
911 EntryNode.UseList = 0;
912 AllNodes.push_back(&EntryNode);
913 Root = getEntryNode();
917 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
918 return VT.bitsGT(Op.getValueType()) ?
919 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
920 getNode(ISD::TRUNCATE, DL, VT, Op);
923 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
924 return VT.bitsGT(Op.getValueType()) ?
925 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
926 getNode(ISD::TRUNCATE, DL, VT, Op);
929 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
930 return VT.bitsGT(Op.getValueType()) ?
931 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
932 getNode(ISD::TRUNCATE, DL, VT, Op);
935 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
936 assert(!VT.isVector() &&
937 "getZeroExtendInReg should use the vector element type instead of "
939 if (Op.getValueType() == VT) return Op;
940 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
941 APInt Imm = APInt::getLowBitsSet(BitWidth,
943 return getNode(ISD::AND, DL, Op.getValueType(), Op,
944 getConstant(Imm, Op.getValueType()));
947 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
949 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
950 EVT EltVT = VT.getScalarType();
952 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
953 return getNode(ISD::XOR, DL, VT, Val, NegOne);
956 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
957 EVT EltVT = VT.getScalarType();
958 assert((EltVT.getSizeInBits() >= 64 ||
959 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
960 "getConstant with a uint64_t value that doesn't fit in the type!");
961 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
964 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
965 return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
968 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
969 assert(VT.isInteger() && "Cannot create FP integer constant!");
971 EVT EltVT = VT.getScalarType();
972 const ConstantInt *Elt = &Val;
974 const TargetLowering *TLI = TM.getTargetLowering();
976 // In some cases the vector type is legal but the element type is illegal and
977 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
978 // inserted value (the type does not need to match the vector element type).
979 // Any extra bits introduced will be truncated away.
980 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
981 TargetLowering::TypePromoteInteger) {
982 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
983 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
984 Elt = ConstantInt::get(*getContext(), NewVal);
987 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
988 "APInt size does not match type size!");
989 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
991 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
995 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
997 return SDValue(N, 0);
1000 N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT);
1001 CSEMap.InsertNode(N, IP);
1002 AllNodes.push_back(N);
1005 SDValue Result(N, 0);
1006 if (VT.isVector()) {
1007 SmallVector<SDValue, 8> Ops;
1008 Ops.assign(VT.getVectorNumElements(), Result);
1009 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1014 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1015 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
1019 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1020 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1023 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1024 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1026 EVT EltVT = VT.getScalarType();
1028 // Do the map lookup using the actual bit pattern for the floating point
1029 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1030 // we don't have issues with SNANs.
1031 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1032 FoldingSetNodeID ID;
1033 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1037 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1039 return SDValue(N, 0);
1042 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1043 CSEMap.InsertNode(N, IP);
1044 AllNodes.push_back(N);
1047 SDValue Result(N, 0);
1048 if (VT.isVector()) {
1049 SmallVector<SDValue, 8> Ops;
1050 Ops.assign(VT.getVectorNumElements(), Result);
1051 // FIXME SDLoc info might be appropriate here
1052 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1057 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1058 EVT EltVT = VT.getScalarType();
1059 if (EltVT==MVT::f32)
1060 return getConstantFP(APFloat((float)Val), VT, isTarget);
1061 else if (EltVT==MVT::f64)
1062 return getConstantFP(APFloat(Val), VT, isTarget);
1063 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1066 APFloat apf = APFloat(Val);
1067 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1069 return getConstantFP(apf, VT, isTarget);
1071 llvm_unreachable("Unsupported type in getConstantFP");
1074 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1075 EVT VT, int64_t Offset,
1077 unsigned char TargetFlags) {
1078 assert((TargetFlags == 0 || isTargetGA) &&
1079 "Cannot set target flags on target-independent globals");
1081 // Truncate (with sign-extension) the offset value to the pointer size.
1082 unsigned BitWidth = TM.getTargetLowering()->getPointerTy().getSizeInBits();
1084 Offset = SignExtend64(Offset, BitWidth);
1086 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1088 // If GV is an alias then use the aliasee for determining thread-localness.
1089 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1090 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
1094 if (GVar && GVar->isThreadLocal())
1095 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1097 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1099 FoldingSetNodeID ID;
1100 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1102 ID.AddInteger(Offset);
1103 ID.AddInteger(TargetFlags);
1104 ID.AddInteger(GV->getType()->getAddressSpace());
1106 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1107 return SDValue(E, 0);
1109 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1110 DL.getDebugLoc(), GV, VT,
1111 Offset, TargetFlags);
1112 CSEMap.InsertNode(N, IP);
1113 AllNodes.push_back(N);
1114 return SDValue(N, 0);
1117 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1118 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1119 FoldingSetNodeID ID;
1120 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1123 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1124 return SDValue(E, 0);
1126 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1127 CSEMap.InsertNode(N, IP);
1128 AllNodes.push_back(N);
1129 return SDValue(N, 0);
1132 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1133 unsigned char TargetFlags) {
1134 assert((TargetFlags == 0 || isTarget) &&
1135 "Cannot set target flags on target-independent jump tables");
1136 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1137 FoldingSetNodeID ID;
1138 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1140 ID.AddInteger(TargetFlags);
1142 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1143 return SDValue(E, 0);
1145 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1147 CSEMap.InsertNode(N, IP);
1148 AllNodes.push_back(N);
1149 return SDValue(N, 0);
1152 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1153 unsigned Alignment, int Offset,
1155 unsigned char TargetFlags) {
1156 assert((TargetFlags == 0 || isTarget) &&
1157 "Cannot set target flags on target-independent globals");
1160 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1161 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1162 FoldingSetNodeID ID;
1163 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1164 ID.AddInteger(Alignment);
1165 ID.AddInteger(Offset);
1167 ID.AddInteger(TargetFlags);
1169 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1170 return SDValue(E, 0);
1172 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1173 Alignment, TargetFlags);
1174 CSEMap.InsertNode(N, IP);
1175 AllNodes.push_back(N);
1176 return SDValue(N, 0);
1180 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1181 unsigned Alignment, int Offset,
1183 unsigned char TargetFlags) {
1184 assert((TargetFlags == 0 || isTarget) &&
1185 "Cannot set target flags on target-independent globals");
1188 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1189 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1190 FoldingSetNodeID ID;
1191 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1192 ID.AddInteger(Alignment);
1193 ID.AddInteger(Offset);
1194 C->addSelectionDAGCSEId(ID);
1195 ID.AddInteger(TargetFlags);
1197 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1198 return SDValue(E, 0);
1200 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1201 Alignment, TargetFlags);
1202 CSEMap.InsertNode(N, IP);
1203 AllNodes.push_back(N);
1204 return SDValue(N, 0);
1207 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1208 unsigned char TargetFlags) {
1209 FoldingSetNodeID ID;
1210 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
1211 ID.AddInteger(Index);
1212 ID.AddInteger(Offset);
1213 ID.AddInteger(TargetFlags);
1215 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1216 return SDValue(E, 0);
1218 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1220 CSEMap.InsertNode(N, IP);
1221 AllNodes.push_back(N);
1222 return SDValue(N, 0);
1225 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1226 FoldingSetNodeID ID;
1227 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1230 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1231 return SDValue(E, 0);
1233 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1234 CSEMap.InsertNode(N, IP);
1235 AllNodes.push_back(N);
1236 return SDValue(N, 0);
1239 SDValue SelectionDAG::getValueType(EVT VT) {
1240 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1241 ValueTypeNodes.size())
1242 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1244 SDNode *&N = VT.isExtended() ?
1245 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1247 if (N) return SDValue(N, 0);
1248 N = new (NodeAllocator) VTSDNode(VT);
1249 AllNodes.push_back(N);
1250 return SDValue(N, 0);
1253 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1254 SDNode *&N = ExternalSymbols[Sym];
1255 if (N) return SDValue(N, 0);
1256 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1257 AllNodes.push_back(N);
1258 return SDValue(N, 0);
1261 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1262 unsigned char TargetFlags) {
1264 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1266 if (N) return SDValue(N, 0);
1267 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1268 AllNodes.push_back(N);
1269 return SDValue(N, 0);
1272 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1273 if ((unsigned)Cond >= CondCodeNodes.size())
1274 CondCodeNodes.resize(Cond+1);
1276 if (CondCodeNodes[Cond] == 0) {
1277 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1278 CondCodeNodes[Cond] = N;
1279 AllNodes.push_back(N);
1282 return SDValue(CondCodeNodes[Cond], 0);
1285 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1286 // the shuffle mask M that point at N1 to point at N2, and indices that point
1287 // N2 to point at N1.
1288 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1290 int NElts = M.size();
1291 for (int i = 0; i != NElts; ++i) {
1299 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1300 SDValue N2, const int *Mask) {
1301 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1302 "Invalid VECTOR_SHUFFLE");
1304 // Canonicalize shuffle undef, undef -> undef
1305 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1306 return getUNDEF(VT);
1308 // Validate that all indices in Mask are within the range of the elements
1309 // input to the shuffle.
1310 unsigned NElts = VT.getVectorNumElements();
1311 SmallVector<int, 8> MaskVec;
1312 for (unsigned i = 0; i != NElts; ++i) {
1313 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1314 MaskVec.push_back(Mask[i]);
1317 // Canonicalize shuffle v, v -> v, undef
1320 for (unsigned i = 0; i != NElts; ++i)
1321 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1324 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1325 if (N1.getOpcode() == ISD::UNDEF)
1326 commuteShuffle(N1, N2, MaskVec);
1328 // Canonicalize all index into lhs, -> shuffle lhs, undef
1329 // Canonicalize all index into rhs, -> shuffle rhs, undef
1330 bool AllLHS = true, AllRHS = true;
1331 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1332 for (unsigned i = 0; i != NElts; ++i) {
1333 if (MaskVec[i] >= (int)NElts) {
1338 } else if (MaskVec[i] >= 0) {
1342 if (AllLHS && AllRHS)
1343 return getUNDEF(VT);
1344 if (AllLHS && !N2Undef)
1348 commuteShuffle(N1, N2, MaskVec);
1351 // If Identity shuffle return that node.
1352 bool Identity = true;
1353 for (unsigned i = 0; i != NElts; ++i) {
1354 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1356 if (Identity && NElts)
1359 FoldingSetNodeID ID;
1360 SDValue Ops[2] = { N1, N2 };
1361 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1362 for (unsigned i = 0; i != NElts; ++i)
1363 ID.AddInteger(MaskVec[i]);
1366 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1367 return SDValue(E, 0);
1369 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1370 // SDNode doesn't have access to it. This memory will be "leaked" when
1371 // the node is deallocated, but recovered when the NodeAllocator is released.
1372 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1373 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1375 ShuffleVectorSDNode *N =
1376 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1377 dl.getDebugLoc(), N1, N2,
1379 CSEMap.InsertNode(N, IP);
1380 AllNodes.push_back(N);
1381 return SDValue(N, 0);
1384 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1385 SDValue Val, SDValue DTy,
1386 SDValue STy, SDValue Rnd, SDValue Sat,
1387 ISD::CvtCode Code) {
1388 // If the src and dest types are the same and the conversion is between
1389 // integer types of the same sign or two floats, no conversion is necessary.
1391 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1394 FoldingSetNodeID ID;
1395 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1396 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1398 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1399 return SDValue(E, 0);
1401 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1404 CSEMap.InsertNode(N, IP);
1405 AllNodes.push_back(N);
1406 return SDValue(N, 0);
1409 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1410 FoldingSetNodeID ID;
1411 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1412 ID.AddInteger(RegNo);
1414 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1415 return SDValue(E, 0);
1417 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1418 CSEMap.InsertNode(N, IP);
1419 AllNodes.push_back(N);
1420 return SDValue(N, 0);
1423 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1424 FoldingSetNodeID ID;
1425 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
1426 ID.AddPointer(RegMask);
1428 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1429 return SDValue(E, 0);
1431 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1432 CSEMap.InsertNode(N, IP);
1433 AllNodes.push_back(N);
1434 return SDValue(N, 0);
1437 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1438 FoldingSetNodeID ID;
1439 SDValue Ops[] = { Root };
1440 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1441 ID.AddPointer(Label);
1443 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1444 return SDValue(E, 0);
1446 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1447 dl.getDebugLoc(), Root, Label);
1448 CSEMap.InsertNode(N, IP);
1449 AllNodes.push_back(N);
1450 return SDValue(N, 0);
1454 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1457 unsigned char TargetFlags) {
1458 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1460 FoldingSetNodeID ID;
1461 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1463 ID.AddInteger(Offset);
1464 ID.AddInteger(TargetFlags);
1466 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1467 return SDValue(E, 0);
1469 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1471 CSEMap.InsertNode(N, IP);
1472 AllNodes.push_back(N);
1473 return SDValue(N, 0);
1476 SDValue SelectionDAG::getSrcValue(const Value *V) {
1477 assert((!V || V->getType()->isPointerTy()) &&
1478 "SrcValue is not a pointer?");
1480 FoldingSetNodeID ID;
1481 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1485 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1486 return SDValue(E, 0);
1488 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1489 CSEMap.InsertNode(N, IP);
1490 AllNodes.push_back(N);
1491 return SDValue(N, 0);
1494 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1495 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1496 FoldingSetNodeID ID;
1497 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
1501 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1502 return SDValue(E, 0);
1504 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1505 CSEMap.InsertNode(N, IP);
1506 AllNodes.push_back(N);
1507 return SDValue(N, 0);
1511 /// getShiftAmountOperand - Return the specified value casted to
1512 /// the target's desired shift amount type.
1513 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1514 EVT OpTy = Op.getValueType();
1515 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
1516 if (OpTy == ShTy || OpTy.isVector()) return Op;
1518 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1519 return getNode(Opcode, SDLoc(Op), ShTy, Op);
1522 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1523 /// specified value type.
1524 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1525 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1526 unsigned ByteSize = VT.getStoreSize();
1527 Type *Ty = VT.getTypeForEVT(*getContext());
1528 const TargetLowering *TLI = TM.getTargetLowering();
1529 unsigned StackAlign =
1530 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1532 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1533 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1536 /// CreateStackTemporary - Create a stack temporary suitable for holding
1537 /// either of the specified value types.
1538 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1539 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1540 VT2.getStoreSizeInBits())/8;
1541 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1542 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1543 const TargetLowering *TLI = TM.getTargetLowering();
1544 const DataLayout *TD = TLI->getDataLayout();
1545 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1546 TD->getPrefTypeAlignment(Ty2));
1548 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1549 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1550 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1553 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1554 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1555 // These setcc operations always fold.
1559 case ISD::SETFALSE2: return getConstant(0, VT);
1561 case ISD::SETTRUE2: {
1562 const TargetLowering *TLI = TM.getTargetLowering();
1563 TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
1565 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1578 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1582 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1583 const APInt &C2 = N2C->getAPIntValue();
1584 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1585 const APInt &C1 = N1C->getAPIntValue();
1588 default: llvm_unreachable("Unknown integer setcc!");
1589 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1590 case ISD::SETNE: return getConstant(C1 != C2, VT);
1591 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1592 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1593 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1594 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1595 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1596 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1597 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1598 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1602 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1603 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1604 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1607 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1608 return getUNDEF(VT);
1610 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1611 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1612 return getUNDEF(VT);
1614 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1615 R==APFloat::cmpLessThan, VT);
1616 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1617 return getUNDEF(VT);
1619 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1620 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1621 return getUNDEF(VT);
1623 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1624 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1625 return getUNDEF(VT);
1627 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1628 R==APFloat::cmpEqual, VT);
1629 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1630 return getUNDEF(VT);
1632 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1633 R==APFloat::cmpEqual, VT);
1634 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1635 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1636 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1637 R==APFloat::cmpEqual, VT);
1638 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1639 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1640 R==APFloat::cmpLessThan, VT);
1641 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1642 R==APFloat::cmpUnordered, VT);
1643 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1644 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1647 // Ensure that the constant occurs on the RHS.
1648 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1649 MVT CompVT = N1.getValueType().getSimpleVT();
1650 if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
1653 return getSetCC(dl, VT, N2, N1, SwappedCond);
1657 // Could not fold it.
1661 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1662 /// use this predicate to simplify operations downstream.
1663 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1664 // This predicate is not safe for vector operations.
1665 if (Op.getValueType().isVector())
1668 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1669 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1672 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1673 /// this predicate to simplify operations downstream. Mask is known to be zero
1674 /// for bits that V cannot have.
1675 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1676 unsigned Depth) const {
1677 APInt KnownZero, KnownOne;
1678 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1679 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1680 return (KnownZero & Mask) == Mask;
1683 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
1684 /// known to be either zero or one and return them in the KnownZero/KnownOne
1685 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1687 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1688 APInt &KnownOne, unsigned Depth) const {
1689 const TargetLowering *TLI = TM.getTargetLowering();
1690 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1692 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1694 return; // Limit search depth.
1696 APInt KnownZero2, KnownOne2;
1698 switch (Op.getOpcode()) {
1700 // We know all of the bits for a constant!
1701 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1702 KnownZero = ~KnownOne;
1705 // If either the LHS or the RHS are Zero, the result is zero.
1706 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1707 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1708 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1709 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1711 // Output known-1 bits are only known if set in both the LHS & RHS.
1712 KnownOne &= KnownOne2;
1713 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1714 KnownZero |= KnownZero2;
1717 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1718 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1719 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1720 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1722 // Output known-0 bits are only known if clear in both the LHS & RHS.
1723 KnownZero &= KnownZero2;
1724 // Output known-1 are known to be set if set in either the LHS | RHS.
1725 KnownOne |= KnownOne2;
1728 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1729 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1730 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1731 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1733 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1734 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1735 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1736 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1737 KnownZero = KnownZeroOut;
1741 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1742 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1743 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1744 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1746 // If low bits are zero in either operand, output low known-0 bits.
1747 // Also compute a conserative estimate for high known-0 bits.
1748 // More trickiness is possible, but this is sufficient for the
1749 // interesting case of alignment computation.
1750 KnownOne.clearAllBits();
1751 unsigned TrailZ = KnownZero.countTrailingOnes() +
1752 KnownZero2.countTrailingOnes();
1753 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1754 KnownZero2.countLeadingOnes(),
1755 BitWidth) - BitWidth;
1757 TrailZ = std::min(TrailZ, BitWidth);
1758 LeadZ = std::min(LeadZ, BitWidth);
1759 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1760 APInt::getHighBitsSet(BitWidth, LeadZ);
1764 // For the purposes of computing leading zeros we can conservatively
1765 // treat a udiv as a logical right shift by the power of 2 known to
1766 // be less than the denominator.
1767 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1768 unsigned LeadZ = KnownZero2.countLeadingOnes();
1770 KnownOne2.clearAllBits();
1771 KnownZero2.clearAllBits();
1772 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1773 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1774 if (RHSUnknownLeadingOnes != BitWidth)
1775 LeadZ = std::min(BitWidth,
1776 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1778 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1782 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1783 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1784 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1785 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1787 // Only known if known in both the LHS and RHS.
1788 KnownOne &= KnownOne2;
1789 KnownZero &= KnownZero2;
1791 case ISD::SELECT_CC:
1792 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1793 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1794 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1795 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1797 // Only known if known in both the LHS and RHS.
1798 KnownOne &= KnownOne2;
1799 KnownZero &= KnownZero2;
1807 if (Op.getResNo() != 1)
1809 // The boolean result conforms to getBooleanContents. Fall through.
1811 // If we know the result of a setcc has the top bits zero, use this info.
1812 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
1813 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1814 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1817 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1818 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1819 unsigned ShAmt = SA->getZExtValue();
1821 // If the shift count is an invalid immediate, don't do anything.
1822 if (ShAmt >= BitWidth)
1825 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1826 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1827 KnownZero <<= ShAmt;
1829 // low bits known zero.
1830 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1834 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1835 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1836 unsigned ShAmt = SA->getZExtValue();
1838 // If the shift count is an invalid immediate, don't do anything.
1839 if (ShAmt >= BitWidth)
1842 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1843 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1844 KnownZero = KnownZero.lshr(ShAmt);
1845 KnownOne = KnownOne.lshr(ShAmt);
1847 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1848 KnownZero |= HighBits; // High bits known zero.
1852 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1853 unsigned ShAmt = SA->getZExtValue();
1855 // If the shift count is an invalid immediate, don't do anything.
1856 if (ShAmt >= BitWidth)
1859 // If any of the demanded bits are produced by the sign extension, we also
1860 // demand the input sign bit.
1861 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1863 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1864 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1865 KnownZero = KnownZero.lshr(ShAmt);
1866 KnownOne = KnownOne.lshr(ShAmt);
1868 // Handle the sign bits.
1869 APInt SignBit = APInt::getSignBit(BitWidth);
1870 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1872 if (KnownZero.intersects(SignBit)) {
1873 KnownZero |= HighBits; // New bits are known zero.
1874 } else if (KnownOne.intersects(SignBit)) {
1875 KnownOne |= HighBits; // New bits are known one.
1879 case ISD::SIGN_EXTEND_INREG: {
1880 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1881 unsigned EBits = EVT.getScalarType().getSizeInBits();
1883 // Sign extension. Compute the demanded bits in the result that are not
1884 // present in the input.
1885 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1887 APInt InSignBit = APInt::getSignBit(EBits);
1888 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
1890 // If the sign extended bits are demanded, we know that the sign
1892 InSignBit = InSignBit.zext(BitWidth);
1893 if (NewBits.getBoolValue())
1894 InputDemandedBits |= InSignBit;
1896 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1897 KnownOne &= InputDemandedBits;
1898 KnownZero &= InputDemandedBits;
1899 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1901 // If the sign bit of the input is known set or clear, then we know the
1902 // top bits of the result.
1903 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
1904 KnownZero |= NewBits;
1905 KnownOne &= ~NewBits;
1906 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
1907 KnownOne |= NewBits;
1908 KnownZero &= ~NewBits;
1909 } else { // Input sign bit unknown
1910 KnownZero &= ~NewBits;
1911 KnownOne &= ~NewBits;
1916 case ISD::CTTZ_ZERO_UNDEF:
1918 case ISD::CTLZ_ZERO_UNDEF:
1920 unsigned LowBits = Log2_32(BitWidth)+1;
1921 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1922 KnownOne.clearAllBits();
1926 LoadSDNode *LD = cast<LoadSDNode>(Op);
1927 // If this is a ZEXTLoad and we are looking at the loaded value.
1928 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
1929 EVT VT = LD->getMemoryVT();
1930 unsigned MemBits = VT.getScalarType().getSizeInBits();
1931 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1932 } else if (const MDNode *Ranges = LD->getRanges()) {
1933 computeMaskedBitsLoad(*Ranges, KnownZero);
1937 case ISD::ZERO_EXTEND: {
1938 EVT InVT = Op.getOperand(0).getValueType();
1939 unsigned InBits = InVT.getScalarType().getSizeInBits();
1940 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1941 KnownZero = KnownZero.trunc(InBits);
1942 KnownOne = KnownOne.trunc(InBits);
1943 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1944 KnownZero = KnownZero.zext(BitWidth);
1945 KnownOne = KnownOne.zext(BitWidth);
1946 KnownZero |= NewBits;
1949 case ISD::SIGN_EXTEND: {
1950 EVT InVT = Op.getOperand(0).getValueType();
1951 unsigned InBits = InVT.getScalarType().getSizeInBits();
1952 APInt InSignBit = APInt::getSignBit(InBits);
1953 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1955 KnownZero = KnownZero.trunc(InBits);
1956 KnownOne = KnownOne.trunc(InBits);
1957 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1959 // Note if the sign bit is known to be zero or one.
1960 bool SignBitKnownZero = KnownZero.isNegative();
1961 bool SignBitKnownOne = KnownOne.isNegative();
1962 assert(!(SignBitKnownZero && SignBitKnownOne) &&
1963 "Sign bit can't be known to be both zero and one!");
1965 KnownZero = KnownZero.zext(BitWidth);
1966 KnownOne = KnownOne.zext(BitWidth);
1968 // If the sign bit is known zero or one, the top bits match.
1969 if (SignBitKnownZero)
1970 KnownZero |= NewBits;
1971 else if (SignBitKnownOne)
1972 KnownOne |= NewBits;
1975 case ISD::ANY_EXTEND: {
1976 EVT InVT = Op.getOperand(0).getValueType();
1977 unsigned InBits = InVT.getScalarType().getSizeInBits();
1978 KnownZero = KnownZero.trunc(InBits);
1979 KnownOne = KnownOne.trunc(InBits);
1980 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1981 KnownZero = KnownZero.zext(BitWidth);
1982 KnownOne = KnownOne.zext(BitWidth);
1985 case ISD::TRUNCATE: {
1986 EVT InVT = Op.getOperand(0).getValueType();
1987 unsigned InBits = InVT.getScalarType().getSizeInBits();
1988 KnownZero = KnownZero.zext(InBits);
1989 KnownOne = KnownOne.zext(InBits);
1990 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1991 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1992 KnownZero = KnownZero.trunc(BitWidth);
1993 KnownOne = KnownOne.trunc(BitWidth);
1996 case ISD::AssertZext: {
1997 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1998 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
1999 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2000 KnownZero |= (~InMask);
2001 KnownOne &= (~KnownZero);
2005 // All bits are zero except the low bit.
2006 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2010 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2011 // We know that the top bits of C-X are clear if X contains less bits
2012 // than C (i.e. no wrap-around can happen). For example, 20-X is
2013 // positive if we can prove that X is >= 0 and < 16.
2014 if (CLHS->getAPIntValue().isNonNegative()) {
2015 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2016 // NLZ can't be BitWidth with no sign bit
2017 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2018 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2020 // If all of the MaskV bits are known to be zero, then we know the
2021 // output top bits are zero, because we now know that the output is
2023 if ((KnownZero2 & MaskV) == MaskV) {
2024 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2025 // Top bits known zero.
2026 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2034 // Output known-0 bits are known if clear or set in both the low clear bits
2035 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2036 // low 3 bits clear.
2037 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2038 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2039 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2041 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2042 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2043 KnownZeroOut = std::min(KnownZeroOut,
2044 KnownZero2.countTrailingOnes());
2046 if (Op.getOpcode() == ISD::ADD) {
2047 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2051 // With ADDE, a carry bit may be added in, so we can only use this
2052 // information if we know (at least) that the low two bits are clear. We
2053 // then return to the caller that the low bit is unknown but that other bits
2055 if (KnownZeroOut >= 2) // ADDE
2056 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2060 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2061 const APInt &RA = Rem->getAPIntValue().abs();
2062 if (RA.isPowerOf2()) {
2063 APInt LowBits = RA - 1;
2064 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
2065 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2067 // The low bits of the first operand are unchanged by the srem.
2068 KnownZero = KnownZero2 & LowBits;
2069 KnownOne = KnownOne2 & LowBits;
2071 // If the first operand is non-negative or has all low bits zero, then
2072 // the upper bits are all zero.
2073 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2074 KnownZero |= ~LowBits;
2076 // If the first operand is negative and not all low bits are zero, then
2077 // the upper bits are all one.
2078 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2079 KnownOne |= ~LowBits;
2080 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2085 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2086 const APInt &RA = Rem->getAPIntValue();
2087 if (RA.isPowerOf2()) {
2088 APInt LowBits = (RA - 1);
2089 KnownZero |= ~LowBits;
2090 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2091 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2096 // Since the result is less than or equal to either operand, any leading
2097 // zero bits in either operand must also exist in the result.
2098 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2099 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2101 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2102 KnownZero2.countLeadingOnes());
2103 KnownOne.clearAllBits();
2104 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2107 case ISD::FrameIndex:
2108 case ISD::TargetFrameIndex:
2109 if (unsigned Align = InferPtrAlignment(Op)) {
2110 // The low bits are known zero if the pointer is aligned.
2111 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2117 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2120 case ISD::INTRINSIC_WO_CHAIN:
2121 case ISD::INTRINSIC_W_CHAIN:
2122 case ISD::INTRINSIC_VOID:
2123 // Allow the target to implement this method for its nodes.
2124 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2129 /// ComputeNumSignBits - Return the number of times the sign bit of the
2130 /// register is replicated into the other bits. We know that at least 1 bit
2131 /// is always equal to the sign bit (itself), but other cases can give us
2132 /// information. For example, immediately after an "SRA X, 2", we know that
2133 /// the top 3 bits are all equal to each other, so we return 3.
2134 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2135 const TargetLowering *TLI = TM.getTargetLowering();
2136 EVT VT = Op.getValueType();
2137 assert(VT.isInteger() && "Invalid VT!");
2138 unsigned VTBits = VT.getScalarType().getSizeInBits();
2140 unsigned FirstAnswer = 1;
2143 return 1; // Limit search depth.
2145 switch (Op.getOpcode()) {
2147 case ISD::AssertSext:
2148 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2149 return VTBits-Tmp+1;
2150 case ISD::AssertZext:
2151 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2154 case ISD::Constant: {
2155 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2156 return Val.getNumSignBits();
2159 case ISD::SIGN_EXTEND:
2161 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2162 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2164 case ISD::SIGN_EXTEND_INREG:
2165 // Max of the input and what this extends.
2167 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2170 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2171 return std::max(Tmp, Tmp2);
2174 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2175 // SRA X, C -> adds C sign bits.
2176 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2177 Tmp += C->getZExtValue();
2178 if (Tmp > VTBits) Tmp = VTBits;
2182 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2183 // shl destroys sign bits.
2184 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2185 if (C->getZExtValue() >= VTBits || // Bad shift.
2186 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2187 return Tmp - C->getZExtValue();
2192 case ISD::XOR: // NOT is handled here.
2193 // Logical binary ops preserve the number of sign bits at the worst.
2194 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2196 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2197 FirstAnswer = std::min(Tmp, Tmp2);
2198 // We computed what we know about the sign bits as our first
2199 // answer. Now proceed to the generic code that uses
2200 // ComputeMaskedBits, and pick whichever answer is better.
2205 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2206 if (Tmp == 1) return 1; // Early out.
2207 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2208 return std::min(Tmp, Tmp2);
2216 if (Op.getResNo() != 1)
2218 // The boolean result conforms to getBooleanContents. Fall through.
2220 // If setcc returns 0/-1, all bits are sign bits.
2221 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
2222 TargetLowering::ZeroOrNegativeOneBooleanContent)
2227 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2228 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2230 // Handle rotate right by N like a rotate left by 32-N.
2231 if (Op.getOpcode() == ISD::ROTR)
2232 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2234 // If we aren't rotating out all of the known-in sign bits, return the
2235 // number that are left. This handles rotl(sext(x), 1) for example.
2236 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2237 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2241 // Add can have at most one carry bit. Thus we know that the output
2242 // is, at worst, one more bit than the inputs.
2243 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2244 if (Tmp == 1) return 1; // Early out.
2246 // Special case decrementing a value (ADD X, -1):
2247 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2248 if (CRHS->isAllOnesValue()) {
2249 APInt KnownZero, KnownOne;
2250 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2252 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2254 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2257 // If we are subtracting one from a positive number, there is no carry
2258 // out of the result.
2259 if (KnownZero.isNegative())
2263 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2264 if (Tmp2 == 1) return 1;
2265 return std::min(Tmp, Tmp2)-1;
2268 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2269 if (Tmp2 == 1) return 1;
2272 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2273 if (CLHS->isNullValue()) {
2274 APInt KnownZero, KnownOne;
2275 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2276 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2278 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2281 // If the input is known to be positive (the sign bit is known clear),
2282 // the output of the NEG has the same number of sign bits as the input.
2283 if (KnownZero.isNegative())
2286 // Otherwise, we treat this like a SUB.
2289 // Sub can have at most one carry bit. Thus we know that the output
2290 // is, at worst, one more bit than the inputs.
2291 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2292 if (Tmp == 1) return 1; // Early out.
2293 return std::min(Tmp, Tmp2)-1;
2295 // FIXME: it's tricky to do anything useful for this, but it is an important
2296 // case for targets like X86.
2300 // If we are looking at the loaded value of the SDNode.
2301 if (Op.getResNo() == 0) {
2302 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2303 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2304 unsigned ExtType = LD->getExtensionType();
2307 case ISD::SEXTLOAD: // '17' bits known
2308 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2309 return VTBits-Tmp+1;
2310 case ISD::ZEXTLOAD: // '16' bits known
2311 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2317 // Allow the target to implement this method for its nodes.
2318 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2319 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2320 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2321 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2322 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth);
2323 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2326 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2327 // use this information.
2328 APInt KnownZero, KnownOne;
2329 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2332 if (KnownZero.isNegative()) { // sign bit is 0
2334 } else if (KnownOne.isNegative()) { // sign bit is 1;
2341 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2342 // the number of identical bits in the top of the input value.
2344 Mask <<= Mask.getBitWidth()-VTBits;
2345 // Return # leading zeros. We use 'min' here in case Val was zero before
2346 // shifting. We don't want to return '64' as for an i32 "0".
2347 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2350 /// isBaseWithConstantOffset - Return true if the specified operand is an
2351 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2352 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2353 /// semantics as an ADD. This handles the equivalence:
2354 /// X|Cst == X+Cst iff X&Cst = 0.
2355 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2356 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2357 !isa<ConstantSDNode>(Op.getOperand(1)))
2360 if (Op.getOpcode() == ISD::OR &&
2361 !MaskedValueIsZero(Op.getOperand(0),
2362 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2369 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2370 // If we're told that NaNs won't happen, assume they won't.
2371 if (getTarget().Options.NoNaNsFPMath)
2374 // If the value is a constant, we can obviously see if it is a NaN or not.
2375 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2376 return !C->getValueAPF().isNaN();
2378 // TODO: Recognize more cases here.
2383 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2384 // If the value is a constant, we can obviously see if it is a zero or not.
2385 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2386 return !C->isZero();
2388 // TODO: Recognize more cases here.
2389 switch (Op.getOpcode()) {
2392 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2393 return !C->isNullValue();
2400 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2401 // Check the obvious case.
2402 if (A == B) return true;
2404 // For for negative and positive zero.
2405 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2406 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2407 if (CA->isZero() && CB->isZero()) return true;
2409 // Otherwise they may not be equal.
2413 /// getNode - Gets or creates the specified node.
2415 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2416 FoldingSetNodeID ID;
2417 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2419 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2420 return SDValue(E, 0);
2422 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2423 DL.getDebugLoc(), getVTList(VT));
2424 CSEMap.InsertNode(N, IP);
2426 AllNodes.push_back(N);
2430 return SDValue(N, 0);
2433 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2434 EVT VT, SDValue Operand) {
2435 // Constant fold unary operations with an integer constant operand.
2436 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2437 const APInt &Val = C->getAPIntValue();
2440 case ISD::SIGN_EXTEND:
2441 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
2442 case ISD::ANY_EXTEND:
2443 case ISD::ZERO_EXTEND:
2445 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
2446 case ISD::UINT_TO_FP:
2447 case ISD::SINT_TO_FP: {
2448 APFloat apf(EVTToAPFloatSemantics(VT),
2449 APInt::getNullValue(VT.getSizeInBits()));
2450 (void)apf.convertFromAPInt(Val,
2451 Opcode==ISD::SINT_TO_FP,
2452 APFloat::rmNearestTiesToEven);
2453 return getConstantFP(apf, VT);
2456 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2457 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2458 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2459 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2462 return getConstant(Val.byteSwap(), VT);
2464 return getConstant(Val.countPopulation(), VT);
2466 case ISD::CTLZ_ZERO_UNDEF:
2467 return getConstant(Val.countLeadingZeros(), VT);
2469 case ISD::CTTZ_ZERO_UNDEF:
2470 return getConstant(Val.countTrailingZeros(), VT);
2474 // Constant fold unary operations with a floating point constant operand.
2475 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2476 APFloat V = C->getValueAPF(); // make copy
2480 return getConstantFP(V, VT);
2483 return getConstantFP(V, VT);
2485 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2486 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2487 return getConstantFP(V, VT);
2491 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2492 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2493 return getConstantFP(V, VT);
2497 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2498 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2499 return getConstantFP(V, VT);
2502 case ISD::FP_EXTEND: {
2504 // This can return overflow, underflow, or inexact; we don't care.
2505 // FIXME need to be more flexible about rounding mode.
2506 (void)V.convert(EVTToAPFloatSemantics(VT),
2507 APFloat::rmNearestTiesToEven, &ignored);
2508 return getConstantFP(V, VT);
2510 case ISD::FP_TO_SINT:
2511 case ISD::FP_TO_UINT: {
2514 assert(integerPartWidth >= 64);
2515 // FIXME need to be more flexible about rounding mode.
2516 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2517 Opcode==ISD::FP_TO_SINT,
2518 APFloat::rmTowardZero, &ignored);
2519 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2521 APInt api(VT.getSizeInBits(), x);
2522 return getConstant(api, VT);
2525 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2526 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2527 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2528 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2533 unsigned OpOpcode = Operand.getNode()->getOpcode();
2535 case ISD::TokenFactor:
2536 case ISD::MERGE_VALUES:
2537 case ISD::CONCAT_VECTORS:
2538 return Operand; // Factor, merge or concat of one node? No need.
2539 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2540 case ISD::FP_EXTEND:
2541 assert(VT.isFloatingPoint() &&
2542 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2543 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2544 assert((!VT.isVector() ||
2545 VT.getVectorNumElements() ==
2546 Operand.getValueType().getVectorNumElements()) &&
2547 "Vector element count mismatch!");
2548 if (Operand.getOpcode() == ISD::UNDEF)
2549 return getUNDEF(VT);
2551 case ISD::SIGN_EXTEND:
2552 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2553 "Invalid SIGN_EXTEND!");
2554 if (Operand.getValueType() == VT) return Operand; // noop extension
2555 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2556 "Invalid sext node, dst < src!");
2557 assert((!VT.isVector() ||
2558 VT.getVectorNumElements() ==
2559 Operand.getValueType().getVectorNumElements()) &&
2560 "Vector element count mismatch!");
2561 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2562 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2563 else if (OpOpcode == ISD::UNDEF)
2564 // sext(undef) = 0, because the top bits will all be the same.
2565 return getConstant(0, VT);
2567 case ISD::ZERO_EXTEND:
2568 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2569 "Invalid ZERO_EXTEND!");
2570 if (Operand.getValueType() == VT) return Operand; // noop extension
2571 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2572 "Invalid zext node, dst < src!");
2573 assert((!VT.isVector() ||
2574 VT.getVectorNumElements() ==
2575 Operand.getValueType().getVectorNumElements()) &&
2576 "Vector element count mismatch!");
2577 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2578 return getNode(ISD::ZERO_EXTEND, DL, VT,
2579 Operand.getNode()->getOperand(0));
2580 else if (OpOpcode == ISD::UNDEF)
2581 // zext(undef) = 0, because the top bits will be zero.
2582 return getConstant(0, VT);
2584 case ISD::ANY_EXTEND:
2585 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2586 "Invalid ANY_EXTEND!");
2587 if (Operand.getValueType() == VT) return Operand; // noop extension
2588 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2589 "Invalid anyext node, dst < src!");
2590 assert((!VT.isVector() ||
2591 VT.getVectorNumElements() ==
2592 Operand.getValueType().getVectorNumElements()) &&
2593 "Vector element count mismatch!");
2595 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2596 OpOpcode == ISD::ANY_EXTEND)
2597 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2598 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2599 else if (OpOpcode == ISD::UNDEF)
2600 return getUNDEF(VT);
2602 // (ext (trunx x)) -> x
2603 if (OpOpcode == ISD::TRUNCATE) {
2604 SDValue OpOp = Operand.getNode()->getOperand(0);
2605 if (OpOp.getValueType() == VT)
2610 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2611 "Invalid TRUNCATE!");
2612 if (Operand.getValueType() == VT) return Operand; // noop truncate
2613 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2614 "Invalid truncate node, src < dst!");
2615 assert((!VT.isVector() ||
2616 VT.getVectorNumElements() ==
2617 Operand.getValueType().getVectorNumElements()) &&
2618 "Vector element count mismatch!");
2619 if (OpOpcode == ISD::TRUNCATE)
2620 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2621 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2622 OpOpcode == ISD::ANY_EXTEND) {
2623 // If the source is smaller than the dest, we still need an extend.
2624 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2625 .bitsLT(VT.getScalarType()))
2626 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2627 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2628 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2629 return Operand.getNode()->getOperand(0);
2631 if (OpOpcode == ISD::UNDEF)
2632 return getUNDEF(VT);
2635 // Basic sanity checking.
2636 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2637 && "Cannot BITCAST between types of different sizes!");
2638 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2639 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2640 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2641 if (OpOpcode == ISD::UNDEF)
2642 return getUNDEF(VT);
2644 case ISD::SCALAR_TO_VECTOR:
2645 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2646 (VT.getVectorElementType() == Operand.getValueType() ||
2647 (VT.getVectorElementType().isInteger() &&
2648 Operand.getValueType().isInteger() &&
2649 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2650 "Illegal SCALAR_TO_VECTOR node!");
2651 if (OpOpcode == ISD::UNDEF)
2652 return getUNDEF(VT);
2653 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2654 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2655 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2656 Operand.getConstantOperandVal(1) == 0 &&
2657 Operand.getOperand(0).getValueType() == VT)
2658 return Operand.getOperand(0);
2661 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2662 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2663 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2664 Operand.getNode()->getOperand(0));
2665 if (OpOpcode == ISD::FNEG) // --X -> X
2666 return Operand.getNode()->getOperand(0);
2669 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2670 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2675 SDVTList VTs = getVTList(VT);
2676 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2677 FoldingSetNodeID ID;
2678 SDValue Ops[1] = { Operand };
2679 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2681 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2682 return SDValue(E, 0);
2684 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2685 DL.getDebugLoc(), VTs, Operand);
2686 CSEMap.InsertNode(N, IP);
2688 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2689 DL.getDebugLoc(), VTs, Operand);
2692 AllNodes.push_back(N);
2696 return SDValue(N, 0);
2699 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2700 SDNode *Cst1, SDNode *Cst2) {
2701 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2702 SmallVector<SDValue, 4> Outputs;
2703 EVT SVT = VT.getScalarType();
2705 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2706 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2707 if (Scalar1 && Scalar2) {
2708 // Scalar instruction.
2709 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2711 // For vectors extract each constant element into Inputs so we can constant
2712 // fold them individually.
2713 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2714 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2718 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2720 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2721 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2722 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2723 if (!V1 || !V2) // Not a constant, bail.
2726 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2727 // FIXME: This is valid and could be handled by truncating the APInts.
2728 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2731 Inputs.push_back(std::make_pair(V1, V2));
2735 // We have a number of constant values, constant fold them element by element.
2736 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2737 const APInt &C1 = Inputs[I].first->getAPIntValue();
2738 const APInt &C2 = Inputs[I].second->getAPIntValue();
2742 Outputs.push_back(getConstant(C1 + C2, SVT));
2745 Outputs.push_back(getConstant(C1 - C2, SVT));
2748 Outputs.push_back(getConstant(C1 * C2, SVT));
2751 if (!C2.getBoolValue())
2753 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2756 if (!C2.getBoolValue())
2758 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2761 if (!C2.getBoolValue())
2763 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2766 if (!C2.getBoolValue())
2768 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2771 Outputs.push_back(getConstant(C1 & C2, SVT));
2774 Outputs.push_back(getConstant(C1 | C2, SVT));
2777 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2780 Outputs.push_back(getConstant(C1 << C2, SVT));
2783 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2786 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2789 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2792 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2799 // Handle the scalar case first.
2800 if (Scalar1 && Scalar2)
2801 return Outputs.back();
2803 // Otherwise build a big vector out of the scalar elements we generated.
2804 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(),
2808 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
2810 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2811 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2814 case ISD::TokenFactor:
2815 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2816 N2.getValueType() == MVT::Other && "Invalid token factor!");
2817 // Fold trivial token factors.
2818 if (N1.getOpcode() == ISD::EntryToken) return N2;
2819 if (N2.getOpcode() == ISD::EntryToken) return N1;
2820 if (N1 == N2) return N1;
2822 case ISD::CONCAT_VECTORS:
2823 // Concat of UNDEFs is UNDEF.
2824 if (N1.getOpcode() == ISD::UNDEF &&
2825 N2.getOpcode() == ISD::UNDEF)
2826 return getUNDEF(VT);
2828 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2829 // one big BUILD_VECTOR.
2830 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2831 N2.getOpcode() == ISD::BUILD_VECTOR) {
2832 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2833 N1.getNode()->op_end());
2834 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2835 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2839 assert(VT.isInteger() && "This operator does not apply to FP types!");
2840 assert(N1.getValueType() == N2.getValueType() &&
2841 N1.getValueType() == VT && "Binary operator types must match!");
2842 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2843 // worth handling here.
2844 if (N2C && N2C->isNullValue())
2846 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2853 assert(VT.isInteger() && "This operator does not apply to FP types!");
2854 assert(N1.getValueType() == N2.getValueType() &&
2855 N1.getValueType() == VT && "Binary operator types must match!");
2856 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2857 // it's worth handling here.
2858 if (N2C && N2C->isNullValue())
2868 assert(VT.isInteger() && "This operator does not apply to FP types!");
2869 assert(N1.getValueType() == N2.getValueType() &&
2870 N1.getValueType() == VT && "Binary operator types must match!");
2877 if (getTarget().Options.UnsafeFPMath) {
2878 if (Opcode == ISD::FADD) {
2880 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2881 if (CFP->getValueAPF().isZero())
2884 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2885 if (CFP->getValueAPF().isZero())
2887 } else if (Opcode == ISD::FSUB) {
2889 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2890 if (CFP->getValueAPF().isZero())
2892 } else if (Opcode == ISD::FMUL) {
2893 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
2896 // If the first operand isn't the constant, try the second
2898 CFP = dyn_cast<ConstantFPSDNode>(N2);
2905 return SDValue(CFP,0);
2907 if (CFP->isExactlyValue(1.0))
2912 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
2913 assert(N1.getValueType() == N2.getValueType() &&
2914 N1.getValueType() == VT && "Binary operator types must match!");
2916 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
2917 assert(N1.getValueType() == VT &&
2918 N1.getValueType().isFloatingPoint() &&
2919 N2.getValueType().isFloatingPoint() &&
2920 "Invalid FCOPYSIGN!");
2927 assert(VT == N1.getValueType() &&
2928 "Shift operators return type must be the same as their first arg");
2929 assert(VT.isInteger() && N2.getValueType().isInteger() &&
2930 "Shifts only work on integers");
2931 assert((!VT.isVector() || VT == N2.getValueType()) &&
2932 "Vector shift amounts must be in the same as their first arg");
2933 // Verify that the shift amount VT is bit enough to hold valid shift
2934 // amounts. This catches things like trying to shift an i1024 value by an
2935 // i8, which is easy to fall into in generic code that uses
2936 // TLI.getShiftAmount().
2937 assert(N2.getValueType().getSizeInBits() >=
2938 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
2939 "Invalid use of small shift amount with oversized value!");
2941 // Always fold shifts of i1 values so the code generator doesn't need to
2942 // handle them. Since we know the size of the shift has to be less than the
2943 // size of the value, the shift/rotate count is guaranteed to be zero.
2946 if (N2C && N2C->isNullValue())
2949 case ISD::FP_ROUND_INREG: {
2950 EVT EVT = cast<VTSDNode>(N2)->getVT();
2951 assert(VT == N1.getValueType() && "Not an inreg round!");
2952 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
2953 "Cannot FP_ROUND_INREG integer types");
2954 assert(EVT.isVector() == VT.isVector() &&
2955 "FP_ROUND_INREG type should be vector iff the operand "
2957 assert((!EVT.isVector() ||
2958 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2959 "Vector element counts must match in FP_ROUND_INREG");
2960 assert(EVT.bitsLE(VT) && "Not rounding down!");
2962 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
2966 assert(VT.isFloatingPoint() &&
2967 N1.getValueType().isFloatingPoint() &&
2968 VT.bitsLE(N1.getValueType()) &&
2969 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
2970 if (N1.getValueType() == VT) return N1; // noop conversion.
2972 case ISD::AssertSext:
2973 case ISD::AssertZext: {
2974 EVT EVT = cast<VTSDNode>(N2)->getVT();
2975 assert(VT == N1.getValueType() && "Not an inreg extend!");
2976 assert(VT.isInteger() && EVT.isInteger() &&
2977 "Cannot *_EXTEND_INREG FP types");
2978 assert(!EVT.isVector() &&
2979 "AssertSExt/AssertZExt type should be the vector element type "
2980 "rather than the vector type!");
2981 assert(EVT.bitsLE(VT) && "Not extending!");
2982 if (VT == EVT) return N1; // noop assertion.
2985 case ISD::SIGN_EXTEND_INREG: {
2986 EVT EVT = cast<VTSDNode>(N2)->getVT();
2987 assert(VT == N1.getValueType() && "Not an inreg extend!");
2988 assert(VT.isInteger() && EVT.isInteger() &&
2989 "Cannot *_EXTEND_INREG FP types");
2990 assert(EVT.isVector() == VT.isVector() &&
2991 "SIGN_EXTEND_INREG type should be vector iff the operand "
2993 assert((!EVT.isVector() ||
2994 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2995 "Vector element counts must match in SIGN_EXTEND_INREG");
2996 assert(EVT.bitsLE(VT) && "Not extending!");
2997 if (EVT == VT) return N1; // Not actually extending
3000 APInt Val = N1C->getAPIntValue();
3001 unsigned FromBits = EVT.getScalarType().getSizeInBits();
3002 Val <<= Val.getBitWidth()-FromBits;
3003 Val = Val.ashr(Val.getBitWidth()-FromBits);
3004 return getConstant(Val, VT);
3008 case ISD::EXTRACT_VECTOR_ELT:
3009 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3010 if (N1.getOpcode() == ISD::UNDEF)
3011 return getUNDEF(VT);
3013 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3014 // expanding copies of large vectors from registers.
3016 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3017 N1.getNumOperands() > 0) {
3019 N1.getOperand(0).getValueType().getVectorNumElements();
3020 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3021 N1.getOperand(N2C->getZExtValue() / Factor),
3022 getConstant(N2C->getZExtValue() % Factor,
3023 N2.getValueType()));
3026 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3027 // expanding large vector constants.
3028 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3029 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3031 if (VT != Elt.getValueType())
3032 // If the vector element type is not legal, the BUILD_VECTOR operands
3033 // are promoted and implicitly truncated, and the result implicitly
3034 // extended. Make that explicit here.
3035 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3040 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3041 // operations are lowered to scalars.
3042 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3043 // If the indices are the same, return the inserted element else
3044 // if the indices are known different, extract the element from
3045 // the original vector.
3046 SDValue N1Op2 = N1.getOperand(2);
3047 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3049 if (N1Op2C && N2C) {
3050 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3051 if (VT == N1.getOperand(1).getValueType())
3052 return N1.getOperand(1);
3054 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3057 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3061 case ISD::EXTRACT_ELEMENT:
3062 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3063 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3064 (N1.getValueType().isInteger() == VT.isInteger()) &&
3065 N1.getValueType() != VT &&
3066 "Wrong types for EXTRACT_ELEMENT!");
3068 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3069 // 64-bit integers into 32-bit parts. Instead of building the extract of
3070 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3071 if (N1.getOpcode() == ISD::BUILD_PAIR)
3072 return N1.getOperand(N2C->getZExtValue());
3074 // EXTRACT_ELEMENT of a constant int is also very common.
3075 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3076 unsigned ElementSize = VT.getSizeInBits();
3077 unsigned Shift = ElementSize * N2C->getZExtValue();
3078 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3079 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3082 case ISD::EXTRACT_SUBVECTOR: {
3084 if (VT.isSimple() && N1.getValueType().isSimple()) {
3085 assert(VT.isVector() && N1.getValueType().isVector() &&
3086 "Extract subvector VTs must be a vectors!");
3087 assert(VT.getVectorElementType() ==
3088 N1.getValueType().getVectorElementType() &&
3089 "Extract subvector VTs must have the same element type!");
3090 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3091 "Extract subvector must be from larger vector to smaller vector!");
3093 if (isa<ConstantSDNode>(Index.getNode())) {
3094 assert((VT.getVectorNumElements() +
3095 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3096 <= N1.getValueType().getVectorNumElements())
3097 && "Extract subvector overflow!");
3100 // Trivial extraction.
3101 if (VT.getSimpleVT() == N1.getSimpleValueType())
3108 // Perform trivial constant folding.
3109 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3110 if (SV.getNode()) return SV;
3112 // Canonicalize constant to RHS if commutative.
3113 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3114 std::swap(N1C, N2C);
3118 // Constant fold FP operations.
3119 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3120 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3122 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3123 // Canonicalize constant to RHS if commutative.
3124 std::swap(N1CFP, N2CFP);
3127 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3128 APFloat::opStatus s;
3131 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3132 if (s != APFloat::opInvalidOp)
3133 return getConstantFP(V1, VT);
3136 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3137 if (s!=APFloat::opInvalidOp)
3138 return getConstantFP(V1, VT);
3141 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3142 if (s!=APFloat::opInvalidOp)
3143 return getConstantFP(V1, VT);
3146 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3147 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3148 return getConstantFP(V1, VT);
3151 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3152 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3153 return getConstantFP(V1, VT);
3155 case ISD::FCOPYSIGN:
3157 return getConstantFP(V1, VT);
3162 if (Opcode == ISD::FP_ROUND) {
3163 APFloat V = N1CFP->getValueAPF(); // make copy
3165 // This can return overflow, underflow, or inexact; we don't care.
3166 // FIXME need to be more flexible about rounding mode.
3167 (void)V.convert(EVTToAPFloatSemantics(VT),
3168 APFloat::rmNearestTiesToEven, &ignored);
3169 return getConstantFP(V, VT);
3173 // Canonicalize an UNDEF to the RHS, even over a constant.
3174 if (N1.getOpcode() == ISD::UNDEF) {
3175 if (isCommutativeBinOp(Opcode)) {
3179 case ISD::FP_ROUND_INREG:
3180 case ISD::SIGN_EXTEND_INREG:
3186 return N1; // fold op(undef, arg2) -> undef
3194 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3195 // For vectors, we can't easily build an all zero vector, just return
3202 // Fold a bunch of operators when the RHS is undef.
3203 if (N2.getOpcode() == ISD::UNDEF) {
3206 if (N1.getOpcode() == ISD::UNDEF)
3207 // Handle undef ^ undef -> 0 special case. This is a common
3209 return getConstant(0, VT);
3219 return N2; // fold op(arg1, undef) -> undef
3225 if (getTarget().Options.UnsafeFPMath)
3233 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3234 // For vectors, we can't easily build an all zero vector, just return
3239 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3240 // For vectors, we can't easily build an all one vector, just return
3248 // Memoize this node if possible.
3250 SDVTList VTs = getVTList(VT);
3251 if (VT != MVT::Glue) {
3252 SDValue Ops[] = { N1, N2 };
3253 FoldingSetNodeID ID;
3254 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3256 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3257 return SDValue(E, 0);
3259 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3260 DL.getDebugLoc(), VTs, N1, N2);
3261 CSEMap.InsertNode(N, IP);
3263 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3264 DL.getDebugLoc(), VTs, N1, N2);
3267 AllNodes.push_back(N);
3271 return SDValue(N, 0);
3274 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3275 SDValue N1, SDValue N2, SDValue N3) {
3276 // Perform various simplifications.
3277 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3280 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3281 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3282 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3283 if (N1CFP && N2CFP && N3CFP) {
3284 APFloat V1 = N1CFP->getValueAPF();
3285 const APFloat &V2 = N2CFP->getValueAPF();
3286 const APFloat &V3 = N3CFP->getValueAPF();
3287 APFloat::opStatus s =
3288 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3289 if (s != APFloat::opInvalidOp)
3290 return getConstantFP(V1, VT);
3294 case ISD::CONCAT_VECTORS:
3295 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3296 // one big BUILD_VECTOR.
3297 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3298 N2.getOpcode() == ISD::BUILD_VECTOR &&
3299 N3.getOpcode() == ISD::BUILD_VECTOR) {
3300 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3301 N1.getNode()->op_end());
3302 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3303 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3304 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
3308 // Use FoldSetCC to simplify SETCC's.
3309 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3310 if (Simp.getNode()) return Simp;
3315 if (N1C->getZExtValue())
3316 return N2; // select true, X, Y -> X
3317 return N3; // select false, X, Y -> Y
3320 if (N2 == N3) return N2; // select C, X, X -> X
3322 case ISD::VECTOR_SHUFFLE:
3323 llvm_unreachable("should use getVectorShuffle constructor!");
3324 case ISD::INSERT_SUBVECTOR: {
3326 if (VT.isSimple() && N1.getValueType().isSimple()
3327 && N2.getValueType().isSimple()) {
3328 assert(VT.isVector() && N1.getValueType().isVector() &&
3329 N2.getValueType().isVector() &&
3330 "Insert subvector VTs must be a vectors");
3331 assert(VT == N1.getValueType() &&
3332 "Dest and insert subvector source types must match!");
3333 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3334 "Insert subvector must be from smaller vector to larger vector!");
3335 if (isa<ConstantSDNode>(Index.getNode())) {
3336 assert((N2.getValueType().getVectorNumElements() +
3337 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3338 <= VT.getVectorNumElements())
3339 && "Insert subvector overflow!");
3342 // Trivial insertion.
3343 if (VT.getSimpleVT() == N2.getSimpleValueType())
3349 // Fold bit_convert nodes from a type to themselves.
3350 if (N1.getValueType() == VT)
3355 // Memoize node if it doesn't produce a flag.
3357 SDVTList VTs = getVTList(VT);
3358 if (VT != MVT::Glue) {
3359 SDValue Ops[] = { N1, N2, N3 };
3360 FoldingSetNodeID ID;
3361 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3363 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3364 return SDValue(E, 0);
3366 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3367 DL.getDebugLoc(), VTs, N1, N2, N3);
3368 CSEMap.InsertNode(N, IP);
3370 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3371 DL.getDebugLoc(), VTs, N1, N2, N3);
3374 AllNodes.push_back(N);
3378 return SDValue(N, 0);
3381 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3382 SDValue N1, SDValue N2, SDValue N3,
3384 SDValue Ops[] = { N1, N2, N3, N4 };
3385 return getNode(Opcode, DL, VT, Ops, 4);
3388 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3389 SDValue N1, SDValue N2, SDValue N3,
3390 SDValue N4, SDValue N5) {
3391 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3392 return getNode(Opcode, DL, VT, Ops, 5);
3395 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3396 /// the incoming stack arguments to be loaded from the stack.
3397 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3398 SmallVector<SDValue, 8> ArgChains;
3400 // Include the original chain at the beginning of the list. When this is
3401 // used by target LowerCall hooks, this helps legalize find the
3402 // CALLSEQ_BEGIN node.
3403 ArgChains.push_back(Chain);
3405 // Add a chain value for each stack argument.
3406 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3407 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3408 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3409 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3410 if (FI->getIndex() < 0)
3411 ArgChains.push_back(SDValue(L, 1));
3413 // Build a tokenfactor for all the chains.
3414 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
3415 &ArgChains[0], ArgChains.size());
3418 /// getMemsetValue - Vectorized representation of the memset value
3420 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3422 assert(Value.getOpcode() != ISD::UNDEF);
3424 unsigned NumBits = VT.getScalarType().getSizeInBits();
3425 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3426 assert(C->getAPIntValue().getBitWidth() == 8);
3427 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3429 return DAG.getConstant(Val, VT);
3430 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3433 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3435 // Use a multiplication with 0x010101... to extend the input to the
3437 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3438 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3444 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3445 /// used when a memcpy is turned into a memset when the source is a constant
3447 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3448 const TargetLowering &TLI, StringRef Str) {
3449 // Handle vector with all elements zero.
3452 return DAG.getConstant(0, VT);
3453 else if (VT == MVT::f32 || VT == MVT::f64)
3454 return DAG.getConstantFP(0.0, VT);
3455 else if (VT.isVector()) {
3456 unsigned NumElts = VT.getVectorNumElements();
3457 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3458 return DAG.getNode(ISD::BITCAST, dl, VT,
3459 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3462 llvm_unreachable("Expected type!");
3465 assert(!VT.isVector() && "Can't handle vector type here!");
3466 unsigned NumVTBits = VT.getSizeInBits();
3467 unsigned NumVTBytes = NumVTBits / 8;
3468 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3470 APInt Val(NumVTBits, 0);
3471 if (TLI.isLittleEndian()) {
3472 for (unsigned i = 0; i != NumBytes; ++i)
3473 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3475 for (unsigned i = 0; i != NumBytes; ++i)
3476 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3479 // If the "cost" of materializing the integer immediate is 1 or free, then
3480 // it is cost effective to turn the load into the immediate.
3481 const TargetTransformInfo *TTI = DAG.getTargetTransformInfo();
3482 if (TTI->getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2)
3483 return DAG.getConstant(Val, VT);
3484 return SDValue(0, 0);
3487 /// getMemBasePlusOffset - Returns base and offset node for the
3489 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3490 SelectionDAG &DAG) {
3491 EVT VT = Base.getValueType();
3492 return DAG.getNode(ISD::ADD, dl,
3493 VT, Base, DAG.getConstant(Offset, VT));
3496 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3498 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3499 unsigned SrcDelta = 0;
3500 GlobalAddressSDNode *G = NULL;
3501 if (Src.getOpcode() == ISD::GlobalAddress)
3502 G = cast<GlobalAddressSDNode>(Src);
3503 else if (Src.getOpcode() == ISD::ADD &&
3504 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3505 Src.getOperand(1).getOpcode() == ISD::Constant) {
3506 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3507 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3512 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3515 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3516 /// to replace the memset / memcpy. Return true if the number of memory ops
3517 /// is below the threshold. It returns the types of the sequence of
3518 /// memory ops to perform memset / memcpy by reference.
3519 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3520 unsigned Limit, uint64_t Size,
3521 unsigned DstAlign, unsigned SrcAlign,
3527 const TargetLowering &TLI) {
3528 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3529 "Expecting memcpy / memset source to meet alignment requirement!");
3530 // If 'SrcAlign' is zero, that means the memory operation does not need to
3531 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3532 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3533 // is the specified alignment of the memory operation. If it is zero, that
3534 // means it's possible to change the alignment of the destination.
3535 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3536 // not need to be loaded.
3537 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3538 IsMemset, ZeroMemset, MemcpyStrSrc,
3539 DAG.getMachineFunction());
3541 if (VT == MVT::Other) {
3542 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
3543 TLI.allowsUnalignedMemoryAccesses(VT)) {
3544 VT = TLI.getPointerTy();
3546 switch (DstAlign & 7) {
3547 case 0: VT = MVT::i64; break;
3548 case 4: VT = MVT::i32; break;
3549 case 2: VT = MVT::i16; break;
3550 default: VT = MVT::i8; break;
3555 while (!TLI.isTypeLegal(LVT))
3556 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3557 assert(LVT.isInteger());
3563 unsigned NumMemOps = 0;
3565 unsigned VTSize = VT.getSizeInBits() / 8;
3566 while (VTSize > Size) {
3567 // For now, only use non-vector load / store's for the left-over pieces.
3572 if (VT.isVector() || VT.isFloatingPoint()) {
3573 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3574 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3575 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3577 else if (NewVT == MVT::i64 &&
3578 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3579 TLI.isSafeMemOpType(MVT::f64)) {
3580 // i64 is usually not legal on 32-bit targets, but f64 may be.
3588 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3589 if (NewVT == MVT::i8)
3591 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3593 NewVTSize = NewVT.getSizeInBits() / 8;
3595 // If the new VT cannot cover all of the remaining bits, then consider
3596 // issuing a (or a pair of) unaligned and overlapping load / store.
3597 // FIXME: Only does this for 64-bit or more since we don't have proper
3598 // cost model for unaligned load / store.
3600 if (NumMemOps && AllowOverlap &&
3601 VTSize >= 8 && NewVTSize < Size &&
3602 TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
3610 if (++NumMemOps > Limit)
3613 MemOps.push_back(VT);
3620 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3621 SDValue Chain, SDValue Dst,
3622 SDValue Src, uint64_t Size,
3623 unsigned Align, bool isVol,
3625 MachinePointerInfo DstPtrInfo,
3626 MachinePointerInfo SrcPtrInfo) {
3627 // Turn a memcpy of undef to nop.
3628 if (Src.getOpcode() == ISD::UNDEF)
3631 // Expand memcpy to a series of load and store ops if the size operand falls
3632 // below a certain threshold.
3633 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3634 // rather than maybe a humongous number of loads and stores.
3635 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3636 std::vector<EVT> MemOps;
3637 bool DstAlignCanChange = false;
3638 MachineFunction &MF = DAG.getMachineFunction();
3639 MachineFrameInfo *MFI = MF.getFrameInfo();
3641 MF.getFunction()->getAttributes().
3642 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3643 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3644 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3645 DstAlignCanChange = true;
3646 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3647 if (Align > SrcAlign)
3650 bool CopyFromStr = isMemSrcFromString(Src, Str);
3651 bool isZeroStr = CopyFromStr && Str.empty();
3652 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3654 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3655 (DstAlignCanChange ? 0 : Align),
3656 (isZeroStr ? 0 : SrcAlign),
3657 false, false, CopyFromStr, true, DAG, TLI))
3660 if (DstAlignCanChange) {
3661 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3662 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3664 // Don't promote to an alignment that would require dynamic stack
3666 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3667 if (!TRI->needsStackRealignment(MF))
3668 while (NewAlign > Align &&
3669 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3672 if (NewAlign > Align) {
3673 // Give the stack frame object a larger alignment if needed.
3674 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3675 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3680 SmallVector<SDValue, 8> OutChains;
3681 unsigned NumMemOps = MemOps.size();
3682 uint64_t SrcOff = 0, DstOff = 0;
3683 for (unsigned i = 0; i != NumMemOps; ++i) {
3685 unsigned VTSize = VT.getSizeInBits() / 8;
3686 SDValue Value, Store;
3688 if (VTSize > Size) {
3689 // Issuing an unaligned load / store pair that overlaps with the previous
3690 // pair. Adjust the offset accordingly.
3691 assert(i == NumMemOps-1 && i != 0);
3692 SrcOff -= VTSize - Size;
3693 DstOff -= VTSize - Size;
3697 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3698 // It's unlikely a store of a vector immediate can be done in a single
3699 // instruction. It would require a load from a constantpool first.
3700 // We only handle zero vectors here.
3701 // FIXME: Handle other cases where store of vector immediate is done in
3702 // a single instruction.
3703 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3704 if (Value.getNode())
3705 Store = DAG.getStore(Chain, dl, Value,
3706 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3707 DstPtrInfo.getWithOffset(DstOff), isVol,
3711 if (!Store.getNode()) {
3712 // The type might not be legal for the target. This should only happen
3713 // if the type is smaller than a legal type, as on PPC, so the right
3714 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3715 // to Load/Store if NVT==VT.
3716 // FIXME does the case above also need this?
3717 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3718 assert(NVT.bitsGE(VT));
3719 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3720 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3721 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3722 MinAlign(SrcAlign, SrcOff));
3723 Store = DAG.getTruncStore(Chain, dl, Value,
3724 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3725 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3728 OutChains.push_back(Store);
3734 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3735 &OutChains[0], OutChains.size());
3738 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3739 SDValue Chain, SDValue Dst,
3740 SDValue Src, uint64_t Size,
3741 unsigned Align, bool isVol,
3743 MachinePointerInfo DstPtrInfo,
3744 MachinePointerInfo SrcPtrInfo) {
3745 // Turn a memmove of undef to nop.
3746 if (Src.getOpcode() == ISD::UNDEF)
3749 // Expand memmove to a series of load and store ops if the size operand falls
3750 // below a certain threshold.
3751 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3752 std::vector<EVT> MemOps;
3753 bool DstAlignCanChange = false;
3754 MachineFunction &MF = DAG.getMachineFunction();
3755 MachineFrameInfo *MFI = MF.getFrameInfo();
3756 bool OptSize = MF.getFunction()->getAttributes().
3757 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3758 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3759 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3760 DstAlignCanChange = true;
3761 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3762 if (Align > SrcAlign)
3764 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3766 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3767 (DstAlignCanChange ? 0 : Align), SrcAlign,
3768 false, false, false, false, DAG, TLI))
3771 if (DstAlignCanChange) {
3772 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3773 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3774 if (NewAlign > Align) {
3775 // Give the stack frame object a larger alignment if needed.
3776 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3777 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3782 uint64_t SrcOff = 0, DstOff = 0;
3783 SmallVector<SDValue, 8> LoadValues;
3784 SmallVector<SDValue, 8> LoadChains;
3785 SmallVector<SDValue, 8> OutChains;
3786 unsigned NumMemOps = MemOps.size();
3787 for (unsigned i = 0; i < NumMemOps; i++) {
3789 unsigned VTSize = VT.getSizeInBits() / 8;
3790 SDValue Value, Store;
3792 Value = DAG.getLoad(VT, dl, Chain,
3793 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3794 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3795 false, false, SrcAlign);
3796 LoadValues.push_back(Value);
3797 LoadChains.push_back(Value.getValue(1));
3800 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3801 &LoadChains[0], LoadChains.size());
3803 for (unsigned i = 0; i < NumMemOps; i++) {
3805 unsigned VTSize = VT.getSizeInBits() / 8;
3806 SDValue Value, Store;
3808 Store = DAG.getStore(Chain, dl, LoadValues[i],
3809 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3810 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3811 OutChains.push_back(Store);
3815 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3816 &OutChains[0], OutChains.size());
3819 /// \brief Lower the call to 'memset' intrinsic function into a series of store
3822 /// \param DAG Selection DAG where lowered code is placed.
3823 /// \param dl Link to corresponding IR location.
3824 /// \param Chain Control flow dependency.
3825 /// \param Dst Pointer to destination memory location.
3826 /// \param Src Value of byte to write into the memory.
3827 /// \param Size Number of bytes to write.
3828 /// \param Align Alignment of the destination in bytes.
3829 /// \param isVol True if destination is volatile.
3830 /// \param DstPtrInfo IR information on the memory pointer.
3831 /// \returns New head in the control flow, if lowering was successful, empty
3832 /// SDValue otherwise.
3834 /// The function tries to replace 'llvm.memset' intrinsic with several store
3835 /// operations and value calculation code. This is usually profitable for small
3837 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
3838 SDValue Chain, SDValue Dst,
3839 SDValue Src, uint64_t Size,
3840 unsigned Align, bool isVol,
3841 MachinePointerInfo DstPtrInfo) {
3842 // Turn a memset of undef to nop.
3843 if (Src.getOpcode() == ISD::UNDEF)
3846 // Expand memset to a series of load/store ops if the size operand
3847 // falls below a certain threshold.
3848 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3849 std::vector<EVT> MemOps;
3850 bool DstAlignCanChange = false;
3851 MachineFunction &MF = DAG.getMachineFunction();
3852 MachineFrameInfo *MFI = MF.getFrameInfo();
3853 bool OptSize = MF.getFunction()->getAttributes().
3854 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3855 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3856 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3857 DstAlignCanChange = true;
3859 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3860 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3861 Size, (DstAlignCanChange ? 0 : Align), 0,
3862 true, IsZeroVal, false, true, DAG, TLI))
3865 if (DstAlignCanChange) {
3866 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3867 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3868 if (NewAlign > Align) {
3869 // Give the stack frame object a larger alignment if needed.
3870 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3871 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3876 SmallVector<SDValue, 8> OutChains;
3877 uint64_t DstOff = 0;
3878 unsigned NumMemOps = MemOps.size();
3880 // Find the largest store and generate the bit pattern for it.
3881 EVT LargestVT = MemOps[0];
3882 for (unsigned i = 1; i < NumMemOps; i++)
3883 if (MemOps[i].bitsGT(LargestVT))
3884 LargestVT = MemOps[i];
3885 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
3887 for (unsigned i = 0; i < NumMemOps; i++) {
3889 unsigned VTSize = VT.getSizeInBits() / 8;
3890 if (VTSize > Size) {
3891 // Issuing an unaligned load / store pair that overlaps with the previous
3892 // pair. Adjust the offset accordingly.
3893 assert(i == NumMemOps-1 && i != 0);
3894 DstOff -= VTSize - Size;
3897 // If this store is smaller than the largest store see whether we can get
3898 // the smaller value for free with a truncate.
3899 SDValue Value = MemSetValue;
3900 if (VT.bitsLT(LargestVT)) {
3901 if (!LargestVT.isVector() && !VT.isVector() &&
3902 TLI.isTruncateFree(LargestVT, VT))
3903 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
3905 Value = getMemsetValue(Src, VT, DAG, dl);
3907 assert(Value.getValueType() == VT && "Value with wrong type.");
3908 SDValue Store = DAG.getStore(Chain, dl, Value,
3909 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3910 DstPtrInfo.getWithOffset(DstOff),
3911 isVol, false, Align);
3912 OutChains.push_back(Store);
3913 DstOff += VT.getSizeInBits() / 8;
3917 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3918 &OutChains[0], OutChains.size());
3921 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
3922 SDValue Src, SDValue Size,
3923 unsigned Align, bool isVol, bool AlwaysInline,
3924 MachinePointerInfo DstPtrInfo,
3925 MachinePointerInfo SrcPtrInfo) {
3926 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
3928 // Check to see if we should lower the memcpy to loads and stores first.
3929 // For cases within the target-specified limits, this is the best choice.
3930 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3932 // Memcpy with size zero? Just return the original chain.
3933 if (ConstantSize->isNullValue())
3936 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3937 ConstantSize->getZExtValue(),Align,
3938 isVol, false, DstPtrInfo, SrcPtrInfo);
3939 if (Result.getNode())
3943 // Then check to see if we should lower the memcpy with target-specific
3944 // code. If the target chooses to do this, this is the next best.
3946 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
3947 isVol, AlwaysInline,
3948 DstPtrInfo, SrcPtrInfo);
3949 if (Result.getNode())
3952 // If we really need inline code and the target declined to provide it,
3953 // use a (potentially long) sequence of loads and stores.
3955 assert(ConstantSize && "AlwaysInline requires a constant size!");
3956 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3957 ConstantSize->getZExtValue(), Align, isVol,
3958 true, DstPtrInfo, SrcPtrInfo);
3961 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
3962 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
3963 // respect volatile, so they may do things like read or write memory
3964 // beyond the given memory regions. But fixing this isn't easy, and most
3965 // people don't care.
3967 const TargetLowering *TLI = TM.getTargetLowering();
3969 // Emit a library call.
3970 TargetLowering::ArgListTy Args;
3971 TargetLowering::ArgListEntry Entry;
3972 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
3973 Entry.Node = Dst; Args.push_back(Entry);
3974 Entry.Node = Src; Args.push_back(Entry);
3975 Entry.Node = Size; Args.push_back(Entry);
3976 // FIXME: pass in SDLoc
3978 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
3979 false, false, false, false, 0,
3980 TLI->getLibcallCallingConv(RTLIB::MEMCPY),
3981 /*isTailCall=*/false,
3982 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
3983 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
3984 TLI->getPointerTy()),
3986 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
3988 return CallResult.second;
3991 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
3992 SDValue Src, SDValue Size,
3993 unsigned Align, bool isVol,
3994 MachinePointerInfo DstPtrInfo,
3995 MachinePointerInfo SrcPtrInfo) {
3996 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
3998 // Check to see if we should lower the memmove to loads and stores first.
3999 // For cases within the target-specified limits, this is the best choice.
4000 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4002 // Memmove with size zero? Just return the original chain.
4003 if (ConstantSize->isNullValue())
4007 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4008 ConstantSize->getZExtValue(), Align, isVol,
4009 false, DstPtrInfo, SrcPtrInfo);
4010 if (Result.getNode())
4014 // Then check to see if we should lower the memmove with target-specific
4015 // code. If the target chooses to do this, this is the next best.
4017 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4018 DstPtrInfo, SrcPtrInfo);
4019 if (Result.getNode())
4022 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4023 // not be safe. See memcpy above for more details.
4025 const TargetLowering *TLI = TM.getTargetLowering();
4027 // Emit a library call.
4028 TargetLowering::ArgListTy Args;
4029 TargetLowering::ArgListEntry Entry;
4030 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4031 Entry.Node = Dst; Args.push_back(Entry);
4032 Entry.Node = Src; Args.push_back(Entry);
4033 Entry.Node = Size; Args.push_back(Entry);
4034 // FIXME: pass in SDLoc
4036 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4037 false, false, false, false, 0,
4038 TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4039 /*isTailCall=*/false,
4040 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4041 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4042 TLI->getPointerTy()),
4044 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4046 return CallResult.second;
4049 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4050 SDValue Src, SDValue Size,
4051 unsigned Align, bool isVol,
4052 MachinePointerInfo DstPtrInfo) {
4053 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4055 // Check to see if we should lower the memset to stores first.
4056 // For cases within the target-specified limits, this is the best choice.
4057 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4059 // Memset with size zero? Just return the original chain.
4060 if (ConstantSize->isNullValue())
4064 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4065 Align, isVol, DstPtrInfo);
4067 if (Result.getNode())
4071 // Then check to see if we should lower the memset with target-specific
4072 // code. If the target chooses to do this, this is the next best.
4074 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4076 if (Result.getNode())
4079 // Emit a library call.
4080 const TargetLowering *TLI = TM.getTargetLowering();
4081 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4082 TargetLowering::ArgListTy Args;
4083 TargetLowering::ArgListEntry Entry;
4084 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4085 Args.push_back(Entry);
4086 // Extend or truncate the argument to be an i32 value for the call.
4087 if (Src.getValueType().bitsGT(MVT::i32))
4088 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4090 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4092 Entry.Ty = Type::getInt32Ty(*getContext());
4093 Entry.isSExt = true;
4094 Args.push_back(Entry);
4096 Entry.Ty = IntPtrTy;
4097 Entry.isSExt = false;
4098 Args.push_back(Entry);
4099 // FIXME: pass in SDLoc
4101 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4102 false, false, false, false, 0,
4103 TLI->getLibcallCallingConv(RTLIB::MEMSET),
4104 /*isTailCall=*/false,
4105 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4106 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4107 TLI->getPointerTy()),
4109 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4111 return CallResult.second;
4114 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4115 SDVTList VTList, SDValue* Ops, unsigned NumOps,
4116 MachineMemOperand *MMO,
4117 AtomicOrdering Ordering,
4118 SynchronizationScope SynchScope) {
4119 FoldingSetNodeID ID;
4120 ID.AddInteger(MemVT.getRawBits());
4121 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4122 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4124 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4125 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4126 return SDValue(E, 0);
4129 // Allocate the operands array for the node out of the BumpPtrAllocator, since
4130 // SDNode doesn't have access to it. This memory will be "leaked" when
4131 // the node is deallocated, but recovered when the allocator is released.
4132 // If the number of operands is less than 5 we use AtomicSDNode's internal
4134 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : 0;
4136 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4137 dl.getDebugLoc(), VTList, MemVT,
4138 Ops, DynOps, NumOps, MMO,
4139 Ordering, SynchScope);
4140 CSEMap.InsertNode(N, IP);
4141 AllNodes.push_back(N);
4142 return SDValue(N, 0);
4145 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4146 SDValue Chain, SDValue Ptr, SDValue Cmp,
4147 SDValue Swp, MachinePointerInfo PtrInfo,
4149 AtomicOrdering Ordering,
4150 SynchronizationScope SynchScope) {
4151 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4152 Alignment = getEVTAlignment(MemVT);
4154 MachineFunction &MF = getMachineFunction();
4156 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4157 // For now, atomics are considered to be volatile always.
4158 // FIXME: Volatile isn't really correct; we should keep track of atomic
4159 // orderings in the memoperand.
4160 unsigned Flags = MachineMemOperand::MOVolatile;
4161 if (Opcode != ISD::ATOMIC_STORE)
4162 Flags |= MachineMemOperand::MOLoad;
4163 if (Opcode != ISD::ATOMIC_LOAD)
4164 Flags |= MachineMemOperand::MOStore;
4166 MachineMemOperand *MMO =
4167 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4169 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4170 Ordering, SynchScope);
4173 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4175 SDValue Ptr, SDValue Cmp,
4176 SDValue Swp, MachineMemOperand *MMO,
4177 AtomicOrdering Ordering,
4178 SynchronizationScope SynchScope) {
4179 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4180 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4182 EVT VT = Cmp.getValueType();
4184 SDVTList VTs = getVTList(VT, MVT::Other);
4185 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4186 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, Ordering, SynchScope);
4189 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4191 SDValue Ptr, SDValue Val,
4192 const Value* PtrVal,
4194 AtomicOrdering Ordering,
4195 SynchronizationScope SynchScope) {
4196 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4197 Alignment = getEVTAlignment(MemVT);
4199 MachineFunction &MF = getMachineFunction();
4200 // An atomic store does not load. An atomic load does not store.
4201 // (An atomicrmw obviously both loads and stores.)
4202 // For now, atomics are considered to be volatile always, and they are
4204 // FIXME: Volatile isn't really correct; we should keep track of atomic
4205 // orderings in the memoperand.
4206 unsigned Flags = MachineMemOperand::MOVolatile;
4207 if (Opcode != ISD::ATOMIC_STORE)
4208 Flags |= MachineMemOperand::MOLoad;
4209 if (Opcode != ISD::ATOMIC_LOAD)
4210 Flags |= MachineMemOperand::MOStore;
4212 MachineMemOperand *MMO =
4213 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4214 MemVT.getStoreSize(), Alignment);
4216 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4217 Ordering, SynchScope);
4220 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4222 SDValue Ptr, SDValue Val,
4223 MachineMemOperand *MMO,
4224 AtomicOrdering Ordering,
4225 SynchronizationScope SynchScope) {
4226 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4227 Opcode == ISD::ATOMIC_LOAD_SUB ||
4228 Opcode == ISD::ATOMIC_LOAD_AND ||
4229 Opcode == ISD::ATOMIC_LOAD_OR ||
4230 Opcode == ISD::ATOMIC_LOAD_XOR ||
4231 Opcode == ISD::ATOMIC_LOAD_NAND ||
4232 Opcode == ISD::ATOMIC_LOAD_MIN ||
4233 Opcode == ISD::ATOMIC_LOAD_MAX ||
4234 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4235 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4236 Opcode == ISD::ATOMIC_SWAP ||
4237 Opcode == ISD::ATOMIC_STORE) &&
4238 "Invalid Atomic Op");
4240 EVT VT = Val.getValueType();
4242 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4243 getVTList(VT, MVT::Other);
4244 SDValue Ops[] = {Chain, Ptr, Val};
4245 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope);
4248 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4249 EVT VT, SDValue Chain,
4251 const Value* PtrVal,
4253 AtomicOrdering Ordering,
4254 SynchronizationScope SynchScope) {
4255 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4256 Alignment = getEVTAlignment(MemVT);
4258 MachineFunction &MF = getMachineFunction();
4259 // An atomic store does not load. An atomic load does not store.
4260 // (An atomicrmw obviously both loads and stores.)
4261 // For now, atomics are considered to be volatile always, and they are
4263 // FIXME: Volatile isn't really correct; we should keep track of atomic
4264 // orderings in the memoperand.
4265 unsigned Flags = MachineMemOperand::MOVolatile;
4266 if (Opcode != ISD::ATOMIC_STORE)
4267 Flags |= MachineMemOperand::MOLoad;
4268 if (Opcode != ISD::ATOMIC_LOAD)
4269 Flags |= MachineMemOperand::MOStore;
4271 MachineMemOperand *MMO =
4272 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4273 MemVT.getStoreSize(), Alignment);
4275 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
4276 Ordering, SynchScope);
4279 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4280 EVT VT, SDValue Chain,
4282 MachineMemOperand *MMO,
4283 AtomicOrdering Ordering,
4284 SynchronizationScope SynchScope) {
4285 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4287 SDVTList VTs = getVTList(VT, MVT::Other);
4288 SDValue Ops[] = {Chain, Ptr};
4289 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope);
4292 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4293 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
4298 SmallVector<EVT, 4> VTs;
4299 VTs.reserve(NumOps);
4300 for (unsigned i = 0; i < NumOps; ++i)
4301 VTs.push_back(Ops[i].getValueType());
4302 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
4307 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl,
4308 const EVT *VTs, unsigned NumVTs,
4309 const SDValue *Ops, unsigned NumOps,
4310 EVT MemVT, MachinePointerInfo PtrInfo,
4311 unsigned Align, bool Vol,
4312 bool ReadMem, bool WriteMem) {
4313 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
4314 MemVT, PtrInfo, Align, Vol,
4319 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4320 const SDValue *Ops, unsigned NumOps,
4321 EVT MemVT, MachinePointerInfo PtrInfo,
4322 unsigned Align, bool Vol,
4323 bool ReadMem, bool WriteMem) {
4324 if (Align == 0) // Ensure that codegen never sees alignment 0
4325 Align = getEVTAlignment(MemVT);
4327 MachineFunction &MF = getMachineFunction();
4330 Flags |= MachineMemOperand::MOStore;
4332 Flags |= MachineMemOperand::MOLoad;
4334 Flags |= MachineMemOperand::MOVolatile;
4335 MachineMemOperand *MMO =
4336 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4338 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
4342 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4343 const SDValue *Ops, unsigned NumOps,
4344 EVT MemVT, MachineMemOperand *MMO) {
4345 assert((Opcode == ISD::INTRINSIC_VOID ||
4346 Opcode == ISD::INTRINSIC_W_CHAIN ||
4347 Opcode == ISD::PREFETCH ||
4348 Opcode == ISD::LIFETIME_START ||
4349 Opcode == ISD::LIFETIME_END ||
4350 (Opcode <= INT_MAX &&
4351 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4352 "Opcode is not a memory-accessing opcode!");
4354 // Memoize the node unless it returns a flag.
4355 MemIntrinsicSDNode *N;
4356 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4357 FoldingSetNodeID ID;
4358 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4359 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4361 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4362 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4363 return SDValue(E, 0);
4366 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4367 dl.getDebugLoc(), VTList, Ops,
4368 NumOps, MemVT, MMO);
4369 CSEMap.InsertNode(N, IP);
4371 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4372 dl.getDebugLoc(), VTList, Ops,
4373 NumOps, MemVT, MMO);
4375 AllNodes.push_back(N);
4376 return SDValue(N, 0);
4379 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4380 /// MachinePointerInfo record from it. This is particularly useful because the
4381 /// code generator has many cases where it doesn't bother passing in a
4382 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4383 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4384 // If this is FI+Offset, we can model it.
4385 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4386 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4388 // If this is (FI+Offset1)+Offset2, we can model it.
4389 if (Ptr.getOpcode() != ISD::ADD ||
4390 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4391 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4392 return MachinePointerInfo();
4394 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4395 return MachinePointerInfo::getFixedStack(FI, Offset+
4396 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4399 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4400 /// MachinePointerInfo record from it. This is particularly useful because the
4401 /// code generator has many cases where it doesn't bother passing in a
4402 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4403 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4404 // If the 'Offset' value isn't a constant, we can't handle this.
4405 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4406 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4407 if (OffsetOp.getOpcode() == ISD::UNDEF)
4408 return InferPointerInfo(Ptr);
4409 return MachinePointerInfo();
4414 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4415 EVT VT, SDLoc dl, SDValue Chain,
4416 SDValue Ptr, SDValue Offset,
4417 MachinePointerInfo PtrInfo, EVT MemVT,
4418 bool isVolatile, bool isNonTemporal, bool isInvariant,
4419 unsigned Alignment, const MDNode *TBAAInfo,
4420 const MDNode *Ranges) {
4421 assert(Chain.getValueType() == MVT::Other &&
4422 "Invalid chain type");
4423 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4424 Alignment = getEVTAlignment(VT);
4426 unsigned Flags = MachineMemOperand::MOLoad;
4428 Flags |= MachineMemOperand::MOVolatile;
4430 Flags |= MachineMemOperand::MONonTemporal;
4432 Flags |= MachineMemOperand::MOInvariant;
4434 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4437 PtrInfo = InferPointerInfo(Ptr, Offset);
4439 MachineFunction &MF = getMachineFunction();
4440 MachineMemOperand *MMO =
4441 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4443 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4447 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4448 EVT VT, SDLoc dl, SDValue Chain,
4449 SDValue Ptr, SDValue Offset, EVT MemVT,
4450 MachineMemOperand *MMO) {
4452 ExtType = ISD::NON_EXTLOAD;
4453 } else if (ExtType == ISD::NON_EXTLOAD) {
4454 assert(VT == MemVT && "Non-extending load from different memory type!");
4457 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4458 "Should only be an extending load, not truncating!");
4459 assert(VT.isInteger() == MemVT.isInteger() &&
4460 "Cannot convert from FP to Int or Int -> FP!");
4461 assert(VT.isVector() == MemVT.isVector() &&
4462 "Cannot use trunc store to convert to or from a vector!");
4463 assert((!VT.isVector() ||
4464 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4465 "Cannot use trunc store to change the number of vector elements!");
4468 bool Indexed = AM != ISD::UNINDEXED;
4469 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4470 "Unindexed load with an offset!");
4472 SDVTList VTs = Indexed ?
4473 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4474 SDValue Ops[] = { Chain, Ptr, Offset };
4475 FoldingSetNodeID ID;
4476 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4477 ID.AddInteger(MemVT.getRawBits());
4478 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4479 MMO->isNonTemporal(),
4480 MMO->isInvariant()));
4481 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4483 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4484 cast<LoadSDNode>(E)->refineAlignment(MMO);
4485 return SDValue(E, 0);
4487 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4488 dl.getDebugLoc(), VTs, AM, ExtType,
4490 CSEMap.InsertNode(N, IP);
4491 AllNodes.push_back(N);
4492 return SDValue(N, 0);
4495 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4496 SDValue Chain, SDValue Ptr,
4497 MachinePointerInfo PtrInfo,
4498 bool isVolatile, bool isNonTemporal,
4499 bool isInvariant, unsigned Alignment,
4500 const MDNode *TBAAInfo,
4501 const MDNode *Ranges) {
4502 SDValue Undef = getUNDEF(Ptr.getValueType());
4503 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4504 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4508 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4509 SDValue Chain, SDValue Ptr,
4510 MachinePointerInfo PtrInfo, EVT MemVT,
4511 bool isVolatile, bool isNonTemporal,
4512 unsigned Alignment, const MDNode *TBAAInfo) {
4513 SDValue Undef = getUNDEF(Ptr.getValueType());
4514 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4515 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4521 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4522 SDValue Offset, ISD::MemIndexedMode AM) {
4523 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4524 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4525 "Load is already a indexed load!");
4526 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4527 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4528 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4529 false, LD->getAlignment());
4532 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4533 SDValue Ptr, MachinePointerInfo PtrInfo,
4534 bool isVolatile, bool isNonTemporal,
4535 unsigned Alignment, const MDNode *TBAAInfo) {
4536 assert(Chain.getValueType() == MVT::Other &&
4537 "Invalid chain type");
4538 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4539 Alignment = getEVTAlignment(Val.getValueType());
4541 unsigned Flags = MachineMemOperand::MOStore;
4543 Flags |= MachineMemOperand::MOVolatile;
4545 Flags |= MachineMemOperand::MONonTemporal;
4548 PtrInfo = InferPointerInfo(Ptr);
4550 MachineFunction &MF = getMachineFunction();
4551 MachineMemOperand *MMO =
4552 MF.getMachineMemOperand(PtrInfo, Flags,
4553 Val.getValueType().getStoreSize(), Alignment,
4556 return getStore(Chain, dl, Val, Ptr, MMO);
4559 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4560 SDValue Ptr, MachineMemOperand *MMO) {
4561 assert(Chain.getValueType() == MVT::Other &&
4562 "Invalid chain type");
4563 EVT VT = Val.getValueType();
4564 SDVTList VTs = getVTList(MVT::Other);
4565 SDValue Undef = getUNDEF(Ptr.getValueType());
4566 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4567 FoldingSetNodeID ID;
4568 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4569 ID.AddInteger(VT.getRawBits());
4570 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4571 MMO->isNonTemporal(), MMO->isInvariant()));
4572 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4574 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4575 cast<StoreSDNode>(E)->refineAlignment(MMO);
4576 return SDValue(E, 0);
4578 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4579 dl.getDebugLoc(), VTs,
4580 ISD::UNINDEXED, false, VT, MMO);
4581 CSEMap.InsertNode(N, IP);
4582 AllNodes.push_back(N);
4583 return SDValue(N, 0);
4586 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4587 SDValue Ptr, MachinePointerInfo PtrInfo,
4588 EVT SVT,bool isVolatile, bool isNonTemporal,
4590 const MDNode *TBAAInfo) {
4591 assert(Chain.getValueType() == MVT::Other &&
4592 "Invalid chain type");
4593 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4594 Alignment = getEVTAlignment(SVT);
4596 unsigned Flags = MachineMemOperand::MOStore;
4598 Flags |= MachineMemOperand::MOVolatile;
4600 Flags |= MachineMemOperand::MONonTemporal;
4603 PtrInfo = InferPointerInfo(Ptr);
4605 MachineFunction &MF = getMachineFunction();
4606 MachineMemOperand *MMO =
4607 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4610 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4613 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4614 SDValue Ptr, EVT SVT,
4615 MachineMemOperand *MMO) {
4616 EVT VT = Val.getValueType();
4618 assert(Chain.getValueType() == MVT::Other &&
4619 "Invalid chain type");
4621 return getStore(Chain, dl, Val, Ptr, MMO);
4623 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4624 "Should only be a truncating store, not extending!");
4625 assert(VT.isInteger() == SVT.isInteger() &&
4626 "Can't do FP-INT conversion!");
4627 assert(VT.isVector() == SVT.isVector() &&
4628 "Cannot use trunc store to convert to or from a vector!");
4629 assert((!VT.isVector() ||
4630 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4631 "Cannot use trunc store to change the number of vector elements!");
4633 SDVTList VTs = getVTList(MVT::Other);
4634 SDValue Undef = getUNDEF(Ptr.getValueType());
4635 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4636 FoldingSetNodeID ID;
4637 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4638 ID.AddInteger(SVT.getRawBits());
4639 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4640 MMO->isNonTemporal(), MMO->isInvariant()));
4641 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4643 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4644 cast<StoreSDNode>(E)->refineAlignment(MMO);
4645 return SDValue(E, 0);
4647 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4648 dl.getDebugLoc(), VTs,
4649 ISD::UNINDEXED, true, SVT, MMO);
4650 CSEMap.InsertNode(N, IP);
4651 AllNodes.push_back(N);
4652 return SDValue(N, 0);
4656 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4657 SDValue Offset, ISD::MemIndexedMode AM) {
4658 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4659 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4660 "Store is already a indexed store!");
4661 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4662 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4663 FoldingSetNodeID ID;
4664 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4665 ID.AddInteger(ST->getMemoryVT().getRawBits());
4666 ID.AddInteger(ST->getRawSubclassData());
4667 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4669 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4670 return SDValue(E, 0);
4672 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4673 dl.getDebugLoc(), VTs, AM,
4674 ST->isTruncatingStore(),
4676 ST->getMemOperand());
4677 CSEMap.InsertNode(N, IP);
4678 AllNodes.push_back(N);
4679 return SDValue(N, 0);
4682 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4683 SDValue Chain, SDValue Ptr,
4686 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4687 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
4690 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4691 const SDUse *Ops, unsigned NumOps) {
4693 case 0: return getNode(Opcode, DL, VT);
4694 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4695 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4696 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4700 // Copy from an SDUse array into an SDValue array for use with
4701 // the regular getNode logic.
4702 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4703 return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
4706 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4707 const SDValue *Ops, unsigned NumOps) {
4709 case 0: return getNode(Opcode, DL, VT);
4710 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4711 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4712 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4718 case ISD::SELECT_CC: {
4719 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4720 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4721 "LHS and RHS of condition must have same type!");
4722 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4723 "True and False arms of SelectCC must have same type!");
4724 assert(Ops[2].getValueType() == VT &&
4725 "select_cc node must be of same type as true and false value!");
4729 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4730 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4731 "LHS/RHS of comparison should match types!");
4738 SDVTList VTs = getVTList(VT);
4740 if (VT != MVT::Glue) {
4741 FoldingSetNodeID ID;
4742 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
4745 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4746 return SDValue(E, 0);
4748 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4750 CSEMap.InsertNode(N, IP);
4752 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4756 AllNodes.push_back(N);
4760 return SDValue(N, 0);
4763 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4764 ArrayRef<EVT> ResultTys,
4765 const SDValue *Ops, unsigned NumOps) {
4766 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
4770 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4771 const EVT *VTs, unsigned NumVTs,
4772 const SDValue *Ops, unsigned NumOps) {
4774 return getNode(Opcode, DL, VTs[0], Ops, NumOps);
4775 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
4778 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4779 const SDValue *Ops, unsigned NumOps) {
4780 if (VTList.NumVTs == 1)
4781 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
4785 // FIXME: figure out how to safely handle things like
4786 // int foo(int x) { return 1 << (x & 255); }
4787 // int bar() { return foo(256); }
4788 case ISD::SRA_PARTS:
4789 case ISD::SRL_PARTS:
4790 case ISD::SHL_PARTS:
4791 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4792 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4793 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4794 else if (N3.getOpcode() == ISD::AND)
4795 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4796 // If the and is only masking out bits that cannot effect the shift,
4797 // eliminate the and.
4798 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4799 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4800 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4806 // Memoize the node unless it returns a flag.
4808 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4809 FoldingSetNodeID ID;
4810 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4812 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4813 return SDValue(E, 0);
4816 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4817 DL.getDebugLoc(), VTList, Ops[0]);
4818 } else if (NumOps == 2) {
4819 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4820 DL.getDebugLoc(), VTList, Ops[0],
4822 } else if (NumOps == 3) {
4823 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4824 DL.getDebugLoc(), VTList, Ops[0],
4827 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4828 VTList, Ops, NumOps);
4830 CSEMap.InsertNode(N, IP);
4833 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4834 DL.getDebugLoc(), VTList, Ops[0]);
4835 } else if (NumOps == 2) {
4836 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4837 DL.getDebugLoc(), VTList, Ops[0],
4839 } else if (NumOps == 3) {
4840 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4841 DL.getDebugLoc(), VTList, Ops[0],
4844 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4845 VTList, Ops, NumOps);
4848 AllNodes.push_back(N);
4852 return SDValue(N, 0);
4855 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
4856 return getNode(Opcode, DL, VTList, 0, 0);
4859 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4861 SDValue Ops[] = { N1 };
4862 return getNode(Opcode, DL, VTList, Ops, 1);
4865 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4866 SDValue N1, SDValue N2) {
4867 SDValue Ops[] = { N1, N2 };
4868 return getNode(Opcode, DL, VTList, Ops, 2);
4871 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4872 SDValue N1, SDValue N2, SDValue N3) {
4873 SDValue Ops[] = { N1, N2, N3 };
4874 return getNode(Opcode, DL, VTList, Ops, 3);
4877 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4878 SDValue N1, SDValue N2, SDValue N3,
4880 SDValue Ops[] = { N1, N2, N3, N4 };
4881 return getNode(Opcode, DL, VTList, Ops, 4);
4884 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4885 SDValue N1, SDValue N2, SDValue N3,
4886 SDValue N4, SDValue N5) {
4887 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4888 return getNode(Opcode, DL, VTList, Ops, 5);
4891 SDVTList SelectionDAG::getVTList(EVT VT) {
4892 return makeVTList(SDNode::getValueTypeList(VT), 1);
4895 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4896 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4897 E = VTList.rend(); I != E; ++I)
4898 if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2)
4901 EVT *Array = Allocator.Allocate<EVT>(2);
4904 SDVTList Result = makeVTList(Array, 2);
4905 VTList.push_back(Result);
4909 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
4910 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4911 E = VTList.rend(); I != E; ++I)
4912 if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4916 EVT *Array = Allocator.Allocate<EVT>(3);
4920 SDVTList Result = makeVTList(Array, 3);
4921 VTList.push_back(Result);
4925 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
4926 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4927 E = VTList.rend(); I != E; ++I)
4928 if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4929 I->VTs[2] == VT3 && I->VTs[3] == VT4)
4932 EVT *Array = Allocator.Allocate<EVT>(4);
4937 SDVTList Result = makeVTList(Array, 4);
4938 VTList.push_back(Result);
4942 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
4944 case 0: llvm_unreachable("Cannot have nodes without results!");
4945 case 1: return getVTList(VTs[0]);
4946 case 2: return getVTList(VTs[0], VTs[1]);
4947 case 3: return getVTList(VTs[0], VTs[1], VTs[2]);
4948 case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]);
4952 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4953 E = VTList.rend(); I != E; ++I) {
4954 if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1])
4957 if (std::equal(&VTs[2], &VTs[NumVTs], &I->VTs[2]))
4961 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
4962 std::copy(VTs, VTs+NumVTs, Array);
4963 SDVTList Result = makeVTList(Array, NumVTs);
4964 VTList.push_back(Result);
4969 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
4970 /// specified operands. If the resultant node already exists in the DAG,
4971 /// this does not modify the specified node, instead it returns the node that
4972 /// already exists. If the resultant node does not exist in the DAG, the
4973 /// input node is returned. As a degenerate case, if you specify the same
4974 /// input operands as the node already has, the input node is returned.
4975 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
4976 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
4978 // Check to see if there is no change.
4979 if (Op == N->getOperand(0)) return N;
4981 // See if the modified node already exists.
4982 void *InsertPos = 0;
4983 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
4986 // Nope it doesn't. Remove the node from its current place in the maps.
4988 if (!RemoveNodeFromCSEMaps(N))
4991 // Now we update the operands.
4992 N->OperandList[0].set(Op);
4994 // If this gets put into a CSE map, add it.
4995 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4999 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5000 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5002 // Check to see if there is no change.
5003 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5004 return N; // No operands changed, just return the input node.
5006 // See if the modified node already exists.
5007 void *InsertPos = 0;
5008 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5011 // Nope it doesn't. Remove the node from its current place in the maps.
5013 if (!RemoveNodeFromCSEMaps(N))
5016 // Now we update the operands.
5017 if (N->OperandList[0] != Op1)
5018 N->OperandList[0].set(Op1);
5019 if (N->OperandList[1] != Op2)
5020 N->OperandList[1].set(Op2);
5022 // If this gets put into a CSE map, add it.
5023 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5027 SDNode *SelectionDAG::
5028 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5029 SDValue Ops[] = { Op1, Op2, Op3 };
5030 return UpdateNodeOperands(N, Ops, 3);
5033 SDNode *SelectionDAG::
5034 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5035 SDValue Op3, SDValue Op4) {
5036 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5037 return UpdateNodeOperands(N, Ops, 4);
5040 SDNode *SelectionDAG::
5041 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5042 SDValue Op3, SDValue Op4, SDValue Op5) {
5043 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5044 return UpdateNodeOperands(N, Ops, 5);
5047 SDNode *SelectionDAG::
5048 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
5049 assert(N->getNumOperands() == NumOps &&
5050 "Update with wrong number of operands");
5052 // Check to see if there is no change.
5053 bool AnyChange = false;
5054 for (unsigned i = 0; i != NumOps; ++i) {
5055 if (Ops[i] != N->getOperand(i)) {
5061 // No operands changed, just return the input node.
5062 if (!AnyChange) return N;
5064 // See if the modified node already exists.
5065 void *InsertPos = 0;
5066 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
5069 // Nope it doesn't. Remove the node from its current place in the maps.
5071 if (!RemoveNodeFromCSEMaps(N))
5074 // Now we update the operands.
5075 for (unsigned i = 0; i != NumOps; ++i)
5076 if (N->OperandList[i] != Ops[i])
5077 N->OperandList[i].set(Ops[i]);
5079 // If this gets put into a CSE map, add it.
5080 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5084 /// DropOperands - Release the operands and set this node to have
5086 void SDNode::DropOperands() {
5087 // Unlike the code in MorphNodeTo that does this, we don't need to
5088 // watch for dead nodes here.
5089 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5095 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5098 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5100 SDVTList VTs = getVTList(VT);
5101 return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
5104 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5105 EVT VT, SDValue Op1) {
5106 SDVTList VTs = getVTList(VT);
5107 SDValue Ops[] = { Op1 };
5108 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5111 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5112 EVT VT, SDValue Op1,
5114 SDVTList VTs = getVTList(VT);
5115 SDValue Ops[] = { Op1, Op2 };
5116 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5119 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5120 EVT VT, SDValue Op1,
5121 SDValue Op2, SDValue Op3) {
5122 SDVTList VTs = getVTList(VT);
5123 SDValue Ops[] = { Op1, Op2, Op3 };
5124 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5127 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5128 EVT VT, const SDValue *Ops,
5130 SDVTList VTs = getVTList(VT);
5131 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5134 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5135 EVT VT1, EVT VT2, const SDValue *Ops,
5137 SDVTList VTs = getVTList(VT1, VT2);
5138 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5141 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5143 SDVTList VTs = getVTList(VT1, VT2);
5144 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
5147 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5148 EVT VT1, EVT VT2, EVT VT3,
5149 const SDValue *Ops, unsigned NumOps) {
5150 SDVTList VTs = getVTList(VT1, VT2, VT3);
5151 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5154 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5155 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5156 const SDValue *Ops, unsigned NumOps) {
5157 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5158 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5161 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5164 SDVTList VTs = getVTList(VT1, VT2);
5165 SDValue Ops[] = { Op1 };
5166 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5169 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5171 SDValue Op1, SDValue Op2) {
5172 SDVTList VTs = getVTList(VT1, VT2);
5173 SDValue Ops[] = { Op1, Op2 };
5174 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5177 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5179 SDValue Op1, SDValue Op2,
5181 SDVTList VTs = getVTList(VT1, VT2);
5182 SDValue Ops[] = { Op1, Op2, Op3 };
5183 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5186 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5187 EVT VT1, EVT VT2, EVT VT3,
5188 SDValue Op1, SDValue Op2,
5190 SDVTList VTs = getVTList(VT1, VT2, VT3);
5191 SDValue Ops[] = { Op1, Op2, Op3 };
5192 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5195 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5196 SDVTList VTs, const SDValue *Ops,
5198 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5199 // Reset the NodeID to -1.
5204 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5205 /// the line number information on the merged node since it is not possible to
5206 /// preserve the information that operation is associated with multiple lines.
5207 /// This will make the debugger working better at -O0, were there is a higher
5208 /// probability having other instructions associated with that line.
5210 /// For IROrder, we keep the smaller of the two
5211 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5212 DebugLoc NLoc = N->getDebugLoc();
5213 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5214 (OLoc.getDebugLoc() != NLoc)) {
5215 N->setDebugLoc(DebugLoc());
5217 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5218 N->setIROrder(Order);
5222 /// MorphNodeTo - This *mutates* the specified node to have the specified
5223 /// return type, opcode, and operands.
5225 /// Note that MorphNodeTo returns the resultant node. If there is already a
5226 /// node of the specified opcode and operands, it returns that node instead of
5227 /// the current one. Note that the SDLoc need not be the same.
5229 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5230 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5231 /// node, and because it doesn't require CSE recalculation for any of
5232 /// the node's users.
5234 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5235 SDVTList VTs, const SDValue *Ops,
5237 // If an identical node already exists, use it.
5239 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5240 FoldingSetNodeID ID;
5241 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5242 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5243 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5246 if (!RemoveNodeFromCSEMaps(N))
5249 // Start the morphing.
5251 N->ValueList = VTs.VTs;
5252 N->NumValues = VTs.NumVTs;
5254 // Clear the operands list, updating used nodes to remove this from their
5255 // use list. Keep track of any operands that become dead as a result.
5256 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5257 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5259 SDNode *Used = Use.getNode();
5261 if (Used->use_empty())
5262 DeadNodeSet.insert(Used);
5265 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5266 // Initialize the memory references information.
5267 MN->setMemRefs(0, 0);
5268 // If NumOps is larger than the # of operands we can have in a
5269 // MachineSDNode, reallocate the operand list.
5270 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5271 if (MN->OperandsNeedDelete)
5272 delete[] MN->OperandList;
5273 if (NumOps > array_lengthof(MN->LocalOperands))
5274 // We're creating a final node that will live unmorphed for the
5275 // remainder of the current SelectionDAG iteration, so we can allocate
5276 // the operands directly out of a pool with no recycling metadata.
5277 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5280 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5281 MN->OperandsNeedDelete = false;
5283 MN->InitOperands(MN->OperandList, Ops, NumOps);
5285 // If NumOps is larger than the # of operands we currently have, reallocate
5286 // the operand list.
5287 if (NumOps > N->NumOperands) {
5288 if (N->OperandsNeedDelete)
5289 delete[] N->OperandList;
5290 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5291 N->OperandsNeedDelete = true;
5293 N->InitOperands(N->OperandList, Ops, NumOps);
5296 // Delete any nodes that are still dead after adding the uses for the
5298 if (!DeadNodeSet.empty()) {
5299 SmallVector<SDNode *, 16> DeadNodes;
5300 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5301 E = DeadNodeSet.end(); I != E; ++I)
5302 if ((*I)->use_empty())
5303 DeadNodes.push_back(*I);
5304 RemoveDeadNodes(DeadNodes);
5308 CSEMap.InsertNode(N, IP); // Memoize the new node.
5313 /// getMachineNode - These are used for target selectors to create a new node
5314 /// with specified return type(s), MachineInstr opcode, and operands.
5316 /// Note that getMachineNode returns the resultant node. If there is already a
5317 /// node of the specified opcode and operands, it returns that node instead of
5318 /// the current one.
5320 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5321 SDVTList VTs = getVTList(VT);
5322 return getMachineNode(Opcode, dl, VTs, None);
5326 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5327 SDVTList VTs = getVTList(VT);
5328 SDValue Ops[] = { Op1 };
5329 return getMachineNode(Opcode, dl, VTs, Ops);
5333 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5334 SDValue Op1, SDValue Op2) {
5335 SDVTList VTs = getVTList(VT);
5336 SDValue Ops[] = { Op1, Op2 };
5337 return getMachineNode(Opcode, dl, VTs, Ops);
5341 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5342 SDValue Op1, SDValue Op2, SDValue Op3) {
5343 SDVTList VTs = getVTList(VT);
5344 SDValue Ops[] = { Op1, Op2, Op3 };
5345 return getMachineNode(Opcode, dl, VTs, Ops);
5349 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5350 ArrayRef<SDValue> Ops) {
5351 SDVTList VTs = getVTList(VT);
5352 return getMachineNode(Opcode, dl, VTs, Ops);
5356 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5357 SDVTList VTs = getVTList(VT1, VT2);
5358 return getMachineNode(Opcode, dl, VTs, None);
5362 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5363 EVT VT1, EVT VT2, SDValue Op1) {
5364 SDVTList VTs = getVTList(VT1, VT2);
5365 SDValue Ops[] = { Op1 };
5366 return getMachineNode(Opcode, dl, VTs, Ops);
5370 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5371 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5372 SDVTList VTs = getVTList(VT1, VT2);
5373 SDValue Ops[] = { Op1, Op2 };
5374 return getMachineNode(Opcode, dl, VTs, Ops);
5378 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5379 EVT VT1, EVT VT2, SDValue Op1,
5380 SDValue Op2, SDValue Op3) {
5381 SDVTList VTs = getVTList(VT1, VT2);
5382 SDValue Ops[] = { Op1, Op2, Op3 };
5383 return getMachineNode(Opcode, dl, VTs, Ops);
5387 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5389 ArrayRef<SDValue> Ops) {
5390 SDVTList VTs = getVTList(VT1, VT2);
5391 return getMachineNode(Opcode, dl, VTs, Ops);
5395 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5396 EVT VT1, EVT VT2, EVT VT3,
5397 SDValue Op1, SDValue Op2) {
5398 SDVTList VTs = getVTList(VT1, VT2, VT3);
5399 SDValue Ops[] = { Op1, Op2 };
5400 return getMachineNode(Opcode, dl, VTs, Ops);
5404 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5405 EVT VT1, EVT VT2, EVT VT3,
5406 SDValue Op1, SDValue Op2, SDValue Op3) {
5407 SDVTList VTs = getVTList(VT1, VT2, VT3);
5408 SDValue Ops[] = { Op1, Op2, Op3 };
5409 return getMachineNode(Opcode, dl, VTs, Ops);
5413 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5414 EVT VT1, EVT VT2, EVT VT3,
5415 ArrayRef<SDValue> Ops) {
5416 SDVTList VTs = getVTList(VT1, VT2, VT3);
5417 return getMachineNode(Opcode, dl, VTs, Ops);
5421 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5422 EVT VT2, EVT VT3, EVT VT4,
5423 ArrayRef<SDValue> Ops) {
5424 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5425 return getMachineNode(Opcode, dl, VTs, Ops);
5429 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5430 ArrayRef<EVT> ResultTys,
5431 ArrayRef<SDValue> Ops) {
5432 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
5433 return getMachineNode(Opcode, dl, VTs, Ops);
5437 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5438 ArrayRef<SDValue> OpsArray) {
5439 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5442 const SDValue *Ops = OpsArray.data();
5443 unsigned NumOps = OpsArray.size();
5446 FoldingSetNodeID ID;
5447 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5449 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5450 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5454 // Allocate a new MachineSDNode.
5455 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5456 DL.getDebugLoc(), VTs);
5458 // Initialize the operands list.
5459 if (NumOps > array_lengthof(N->LocalOperands))
5460 // We're creating a final node that will live unmorphed for the
5461 // remainder of the current SelectionDAG iteration, so we can allocate
5462 // the operands directly out of a pool with no recycling metadata.
5463 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5466 N->InitOperands(N->LocalOperands, Ops, NumOps);
5467 N->OperandsNeedDelete = false;
5470 CSEMap.InsertNode(N, IP);
5472 AllNodes.push_back(N);
5474 VerifyMachineNode(N);
5479 /// getTargetExtractSubreg - A convenience function for creating
5480 /// TargetOpcode::EXTRACT_SUBREG nodes.
5482 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5484 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5485 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5486 VT, Operand, SRIdxVal);
5487 return SDValue(Subreg, 0);
5490 /// getTargetInsertSubreg - A convenience function for creating
5491 /// TargetOpcode::INSERT_SUBREG nodes.
5493 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5494 SDValue Operand, SDValue Subreg) {
5495 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5496 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5497 VT, Operand, Subreg, SRIdxVal);
5498 return SDValue(Result, 0);
5501 /// getNodeIfExists - Get the specified node if it's already available, or
5502 /// else return NULL.
5503 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5504 const SDValue *Ops, unsigned NumOps) {
5505 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5506 FoldingSetNodeID ID;
5507 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5509 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5515 /// getDbgValue - Creates a SDDbgValue node.
5518 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
5519 DebugLoc DL, unsigned O) {
5520 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
5524 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
5525 DebugLoc DL, unsigned O) {
5526 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5530 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5531 DebugLoc DL, unsigned O) {
5532 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5537 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5538 /// pointed to by a use iterator is deleted, increment the use iterator
5539 /// so that it doesn't dangle.
5541 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5542 SDNode::use_iterator &UI;
5543 SDNode::use_iterator &UE;
5545 virtual void NodeDeleted(SDNode *N, SDNode *E) {
5546 // Increment the iterator as needed.
5547 while (UI != UE && N == *UI)
5552 RAUWUpdateListener(SelectionDAG &d,
5553 SDNode::use_iterator &ui,
5554 SDNode::use_iterator &ue)
5555 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5560 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5561 /// This can cause recursive merging of nodes in the DAG.
5563 /// This version assumes From has a single result value.
5565 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5566 SDNode *From = FromN.getNode();
5567 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5568 "Cannot replace with this method!");
5569 assert(From != To.getNode() && "Cannot replace uses of with self");
5571 // Iterate over all the existing uses of From. New uses will be added
5572 // to the beginning of the use list, which we avoid visiting.
5573 // This specifically avoids visiting uses of From that arise while the
5574 // replacement is happening, because any such uses would be the result
5575 // of CSE: If an existing node looks like From after one of its operands
5576 // is replaced by To, we don't want to replace of all its users with To
5577 // too. See PR3018 for more info.
5578 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5579 RAUWUpdateListener Listener(*this, UI, UE);
5583 // This node is about to morph, remove its old self from the CSE maps.
5584 RemoveNodeFromCSEMaps(User);
5586 // A user can appear in a use list multiple times, and when this
5587 // happens the uses are usually next to each other in the list.
5588 // To help reduce the number of CSE recomputations, process all
5589 // the uses of this user that we can find this way.
5591 SDUse &Use = UI.getUse();
5594 } while (UI != UE && *UI == User);
5596 // Now that we have modified User, add it back to the CSE maps. If it
5597 // already exists there, recursively merge the results together.
5598 AddModifiedNodeToCSEMaps(User);
5601 // If we just RAUW'd the root, take note.
5602 if (FromN == getRoot())
5606 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5607 /// This can cause recursive merging of nodes in the DAG.
5609 /// This version assumes that for each value of From, there is a
5610 /// corresponding value in To in the same position with the same type.
5612 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5614 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5615 assert((!From->hasAnyUseOfValue(i) ||
5616 From->getValueType(i) == To->getValueType(i)) &&
5617 "Cannot use this version of ReplaceAllUsesWith!");
5620 // Handle the trivial case.
5624 // Iterate over just the existing users of From. See the comments in
5625 // the ReplaceAllUsesWith above.
5626 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5627 RAUWUpdateListener Listener(*this, UI, UE);
5631 // This node is about to morph, remove its old self from the CSE maps.
5632 RemoveNodeFromCSEMaps(User);
5634 // A user can appear in a use list multiple times, and when this
5635 // happens the uses are usually next to each other in the list.
5636 // To help reduce the number of CSE recomputations, process all
5637 // the uses of this user that we can find this way.
5639 SDUse &Use = UI.getUse();
5642 } while (UI != UE && *UI == User);
5644 // Now that we have modified User, add it back to the CSE maps. If it
5645 // already exists there, recursively merge the results together.
5646 AddModifiedNodeToCSEMaps(User);
5649 // If we just RAUW'd the root, take note.
5650 if (From == getRoot().getNode())
5651 setRoot(SDValue(To, getRoot().getResNo()));
5654 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5655 /// This can cause recursive merging of nodes in the DAG.
5657 /// This version can replace From with any result values. To must match the
5658 /// number and types of values returned by From.
5659 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5660 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5661 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5663 // Iterate over just the existing users of From. See the comments in
5664 // the ReplaceAllUsesWith above.
5665 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5666 RAUWUpdateListener Listener(*this, UI, UE);
5670 // This node is about to morph, remove its old self from the CSE maps.
5671 RemoveNodeFromCSEMaps(User);
5673 // A user can appear in a use list multiple times, and when this
5674 // happens the uses are usually next to each other in the list.
5675 // To help reduce the number of CSE recomputations, process all
5676 // the uses of this user that we can find this way.
5678 SDUse &Use = UI.getUse();
5679 const SDValue &ToOp = To[Use.getResNo()];
5682 } while (UI != UE && *UI == User);
5684 // Now that we have modified User, add it back to the CSE maps. If it
5685 // already exists there, recursively merge the results together.
5686 AddModifiedNodeToCSEMaps(User);
5689 // If we just RAUW'd the root, take note.
5690 if (From == getRoot().getNode())
5691 setRoot(SDValue(To[getRoot().getResNo()]));
5694 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5695 /// uses of other values produced by From.getNode() alone. The Deleted
5696 /// vector is handled the same way as for ReplaceAllUsesWith.
5697 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5698 // Handle the really simple, really trivial case efficiently.
5699 if (From == To) return;
5701 // Handle the simple, trivial, case efficiently.
5702 if (From.getNode()->getNumValues() == 1) {
5703 ReplaceAllUsesWith(From, To);
5707 // Iterate over just the existing users of From. See the comments in
5708 // the ReplaceAllUsesWith above.
5709 SDNode::use_iterator UI = From.getNode()->use_begin(),
5710 UE = From.getNode()->use_end();
5711 RAUWUpdateListener Listener(*this, UI, UE);
5714 bool UserRemovedFromCSEMaps = false;
5716 // A user can appear in a use list multiple times, and when this
5717 // happens the uses are usually next to each other in the list.
5718 // To help reduce the number of CSE recomputations, process all
5719 // the uses of this user that we can find this way.
5721 SDUse &Use = UI.getUse();
5723 // Skip uses of different values from the same node.
5724 if (Use.getResNo() != From.getResNo()) {
5729 // If this node hasn't been modified yet, it's still in the CSE maps,
5730 // so remove its old self from the CSE maps.
5731 if (!UserRemovedFromCSEMaps) {
5732 RemoveNodeFromCSEMaps(User);
5733 UserRemovedFromCSEMaps = true;
5738 } while (UI != UE && *UI == User);
5740 // We are iterating over all uses of the From node, so if a use
5741 // doesn't use the specific value, no changes are made.
5742 if (!UserRemovedFromCSEMaps)
5745 // Now that we have modified User, add it back to the CSE maps. If it
5746 // already exists there, recursively merge the results together.
5747 AddModifiedNodeToCSEMaps(User);
5750 // If we just RAUW'd the root, take note.
5751 if (From == getRoot())
5756 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5757 /// to record information about a use.
5764 /// operator< - Sort Memos by User.
5765 bool operator<(const UseMemo &L, const UseMemo &R) {
5766 return (intptr_t)L.User < (intptr_t)R.User;
5770 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5771 /// uses of other values produced by From.getNode() alone. The same value
5772 /// may appear in both the From and To list. The Deleted vector is
5773 /// handled the same way as for ReplaceAllUsesWith.
5774 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5777 // Handle the simple, trivial case efficiently.
5779 return ReplaceAllUsesOfValueWith(*From, *To);
5781 // Read up all the uses and make records of them. This helps
5782 // processing new uses that are introduced during the
5783 // replacement process.
5784 SmallVector<UseMemo, 4> Uses;
5785 for (unsigned i = 0; i != Num; ++i) {
5786 unsigned FromResNo = From[i].getResNo();
5787 SDNode *FromNode = From[i].getNode();
5788 for (SDNode::use_iterator UI = FromNode->use_begin(),
5789 E = FromNode->use_end(); UI != E; ++UI) {
5790 SDUse &Use = UI.getUse();
5791 if (Use.getResNo() == FromResNo) {
5792 UseMemo Memo = { *UI, i, &Use };
5793 Uses.push_back(Memo);
5798 // Sort the uses, so that all the uses from a given User are together.
5799 std::sort(Uses.begin(), Uses.end());
5801 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5802 UseIndex != UseIndexEnd; ) {
5803 // We know that this user uses some value of From. If it is the right
5804 // value, update it.
5805 SDNode *User = Uses[UseIndex].User;
5807 // This node is about to morph, remove its old self from the CSE maps.
5808 RemoveNodeFromCSEMaps(User);
5810 // The Uses array is sorted, so all the uses for a given User
5811 // are next to each other in the list.
5812 // To help reduce the number of CSE recomputations, process all
5813 // the uses of this user that we can find this way.
5815 unsigned i = Uses[UseIndex].Index;
5816 SDUse &Use = *Uses[UseIndex].Use;
5820 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5822 // Now that we have modified User, add it back to the CSE maps. If it
5823 // already exists there, recursively merge the results together.
5824 AddModifiedNodeToCSEMaps(User);
5828 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5829 /// based on their topological order. It returns the maximum id and a vector
5830 /// of the SDNodes* in assigned order by reference.
5831 unsigned SelectionDAG::AssignTopologicalOrder() {
5833 unsigned DAGSize = 0;
5835 // SortedPos tracks the progress of the algorithm. Nodes before it are
5836 // sorted, nodes after it are unsorted. When the algorithm completes
5837 // it is at the end of the list.
5838 allnodes_iterator SortedPos = allnodes_begin();
5840 // Visit all the nodes. Move nodes with no operands to the front of
5841 // the list immediately. Annotate nodes that do have operands with their
5842 // operand count. Before we do this, the Node Id fields of the nodes
5843 // may contain arbitrary values. After, the Node Id fields for nodes
5844 // before SortedPos will contain the topological sort index, and the
5845 // Node Id fields for nodes At SortedPos and after will contain the
5846 // count of outstanding operands.
5847 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5850 unsigned Degree = N->getNumOperands();
5852 // A node with no uses, add it to the result array immediately.
5853 N->setNodeId(DAGSize++);
5854 allnodes_iterator Q = N;
5856 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5857 assert(SortedPos != AllNodes.end() && "Overran node list");
5860 // Temporarily use the Node Id as scratch space for the degree count.
5861 N->setNodeId(Degree);
5865 // Visit all the nodes. As we iterate, move nodes into sorted order,
5866 // such that by the time the end is reached all nodes will be sorted.
5867 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5870 // N is in sorted position, so all its uses have one less operand
5871 // that needs to be sorted.
5872 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5875 unsigned Degree = P->getNodeId();
5876 assert(Degree != 0 && "Invalid node degree");
5879 // All of P's operands are sorted, so P may sorted now.
5880 P->setNodeId(DAGSize++);
5882 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5883 assert(SortedPos != AllNodes.end() && "Overran node list");
5886 // Update P's outstanding operand count.
5887 P->setNodeId(Degree);
5890 if (I == SortedPos) {
5893 dbgs() << "Overran sorted position:\n";
5896 llvm_unreachable(0);
5900 assert(SortedPos == AllNodes.end() &&
5901 "Topological sort incomplete!");
5902 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
5903 "First node in topological sort is not the entry token!");
5904 assert(AllNodes.front().getNodeId() == 0 &&
5905 "First node in topological sort has non-zero id!");
5906 assert(AllNodes.front().getNumOperands() == 0 &&
5907 "First node in topological sort has operands!");
5908 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
5909 "Last node in topologic sort has unexpected id!");
5910 assert(AllNodes.back().use_empty() &&
5911 "Last node in topologic sort has users!");
5912 assert(DAGSize == allnodes_size() && "Node count mismatch!");
5916 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
5917 /// value is produced by SD.
5918 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
5919 DbgInfo->add(DB, SD, isParameter);
5921 SD->setHasDebugValue(true);
5924 /// TransferDbgValues - Transfer SDDbgValues.
5925 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
5926 if (From == To || !From.getNode()->getHasDebugValue())
5928 SDNode *FromNode = From.getNode();
5929 SDNode *ToNode = To.getNode();
5930 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
5931 SmallVector<SDDbgValue *, 2> ClonedDVs;
5932 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
5934 SDDbgValue *Dbg = *I;
5935 if (Dbg->getKind() == SDDbgValue::SDNODE) {
5936 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
5937 Dbg->getOffset(), Dbg->getDebugLoc(),
5939 ClonedDVs.push_back(Clone);
5942 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
5943 E = ClonedDVs.end(); I != E; ++I)
5944 AddDbgValue(*I, ToNode, false);
5947 //===----------------------------------------------------------------------===//
5949 //===----------------------------------------------------------------------===//
5951 HandleSDNode::~HandleSDNode() {
5955 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
5956 DebugLoc DL, const GlobalValue *GA,
5957 EVT VT, int64_t o, unsigned char TF)
5958 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
5962 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
5963 EVT memvt, MachineMemOperand *mmo)
5964 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
5965 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5966 MMO->isNonTemporal(), MMO->isInvariant());
5967 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5968 assert(isNonTemporal() == MMO->isNonTemporal() &&
5969 "Non-temporal encoding error!");
5970 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5973 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
5974 const SDValue *Ops, unsigned NumOps, EVT memvt,
5975 MachineMemOperand *mmo)
5976 : SDNode(Opc, Order, dl, VTs, Ops, NumOps),
5977 MemoryVT(memvt), MMO(mmo) {
5978 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5979 MMO->isNonTemporal(), MMO->isInvariant());
5980 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5981 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5984 /// Profile - Gather unique data for the node.
5986 void SDNode::Profile(FoldingSetNodeID &ID) const {
5987 AddNodeIDNode(ID, this);
5992 std::vector<EVT> VTs;
5995 VTs.reserve(MVT::LAST_VALUETYPE);
5996 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
5997 VTs.push_back(MVT((MVT::SimpleValueType)i));
6002 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6003 static ManagedStatic<EVTArray> SimpleVTArray;
6004 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6006 /// getValueTypeList - Return a pointer to the specified value type.
6008 const EVT *SDNode::getValueTypeList(EVT VT) {
6009 if (VT.isExtended()) {
6010 sys::SmartScopedLock<true> Lock(*VTMutex);
6011 return &(*EVTs->insert(VT).first);
6013 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6014 "Value type out of range!");
6015 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6019 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6020 /// indicated value. This method ignores uses of other values defined by this
6022 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6023 assert(Value < getNumValues() && "Bad value!");
6025 // TODO: Only iterate over uses of a given value of the node
6026 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6027 if (UI.getUse().getResNo() == Value) {
6034 // Found exactly the right number of uses?
6039 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6040 /// value. This method ignores uses of other values defined by this operation.
6041 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6042 assert(Value < getNumValues() && "Bad value!");
6044 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6045 if (UI.getUse().getResNo() == Value)
6052 /// isOnlyUserOf - Return true if this node is the only use of N.
6054 bool SDNode::isOnlyUserOf(SDNode *N) const {
6056 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6067 /// isOperand - Return true if this node is an operand of N.
6069 bool SDValue::isOperandOf(SDNode *N) const {
6070 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6071 if (*this == N->getOperand(i))
6076 bool SDNode::isOperandOf(SDNode *N) const {
6077 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6078 if (this == N->OperandList[i].getNode())
6083 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6084 /// be a chain) reaches the specified operand without crossing any
6085 /// side-effecting instructions on any chain path. In practice, this looks
6086 /// through token factors and non-volatile loads. In order to remain efficient,
6087 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6088 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6089 unsigned Depth) const {
6090 if (*this == Dest) return true;
6092 // Don't search too deeply, we just want to be able to see through
6093 // TokenFactor's etc.
6094 if (Depth == 0) return false;
6096 // If this is a token factor, all inputs to the TF happen in parallel. If any
6097 // of the operands of the TF does not reach dest, then we cannot do the xform.
6098 if (getOpcode() == ISD::TokenFactor) {
6099 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6100 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6105 // Loads don't have side effects, look through them.
6106 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6107 if (!Ld->isVolatile())
6108 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6113 /// hasPredecessor - Return true if N is a predecessor of this node.
6114 /// N is either an operand of this node, or can be reached by recursively
6115 /// traversing up the operands.
6116 /// NOTE: This is an expensive method. Use it carefully.
6117 bool SDNode::hasPredecessor(const SDNode *N) const {
6118 SmallPtrSet<const SDNode *, 32> Visited;
6119 SmallVector<const SDNode *, 16> Worklist;
6120 return hasPredecessorHelper(N, Visited, Worklist);
6124 SDNode::hasPredecessorHelper(const SDNode *N,
6125 SmallPtrSet<const SDNode *, 32> &Visited,
6126 SmallVectorImpl<const SDNode *> &Worklist) const {
6127 if (Visited.empty()) {
6128 Worklist.push_back(this);
6130 // Take a look in the visited set. If we've already encountered this node
6131 // we needn't search further.
6132 if (Visited.count(N))
6136 // Haven't visited N yet. Continue the search.
6137 while (!Worklist.empty()) {
6138 const SDNode *M = Worklist.pop_back_val();
6139 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6140 SDNode *Op = M->getOperand(i).getNode();
6141 if (Visited.insert(Op))
6142 Worklist.push_back(Op);
6151 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6152 assert(Num < NumOperands && "Invalid child # of SDNode!");
6153 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6156 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6157 assert(N->getNumValues() == 1 &&
6158 "Can't unroll a vector with multiple results!");
6160 EVT VT = N->getValueType(0);
6161 unsigned NE = VT.getVectorNumElements();
6162 EVT EltVT = VT.getVectorElementType();
6165 SmallVector<SDValue, 8> Scalars;
6166 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6168 // If ResNE is 0, fully unroll the vector op.
6171 else if (NE > ResNE)
6175 for (i= 0; i != NE; ++i) {
6176 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6177 SDValue Operand = N->getOperand(j);
6178 EVT OperandVT = Operand.getValueType();
6179 if (OperandVT.isVector()) {
6180 // A vector operand; extract a single element.
6181 const TargetLowering *TLI = TM.getTargetLowering();
6182 EVT OperandEltVT = OperandVT.getVectorElementType();
6183 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6186 getConstant(i, TLI->getVectorIdxTy()));
6188 // A scalar operand; just use it as is.
6189 Operands[j] = Operand;
6193 switch (N->getOpcode()) {
6195 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6196 &Operands[0], Operands.size()));
6199 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT,
6200 &Operands[0], Operands.size()));
6207 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6208 getShiftAmountOperand(Operands[0].getValueType(),
6211 case ISD::SIGN_EXTEND_INREG:
6212 case ISD::FP_ROUND_INREG: {
6213 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6214 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6216 getValueType(ExtVT)));
6221 for (; i < ResNE; ++i)
6222 Scalars.push_back(getUNDEF(EltVT));
6224 return getNode(ISD::BUILD_VECTOR, dl,
6225 EVT::getVectorVT(*getContext(), EltVT, ResNE),
6226 &Scalars[0], Scalars.size());
6230 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6231 /// location that is 'Dist' units away from the location that the 'Base' load
6232 /// is loading from.
6233 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6234 unsigned Bytes, int Dist) const {
6235 if (LD->getChain() != Base->getChain())
6237 EVT VT = LD->getValueType(0);
6238 if (VT.getSizeInBits() / 8 != Bytes)
6241 SDValue Loc = LD->getOperand(1);
6242 SDValue BaseLoc = Base->getOperand(1);
6243 if (Loc.getOpcode() == ISD::FrameIndex) {
6244 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6246 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6247 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6248 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6249 int FS = MFI->getObjectSize(FI);
6250 int BFS = MFI->getObjectSize(BFI);
6251 if (FS != BFS || FS != (int)Bytes) return false;
6252 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6256 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6257 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6260 const GlobalValue *GV1 = NULL;
6261 const GlobalValue *GV2 = NULL;
6262 int64_t Offset1 = 0;
6263 int64_t Offset2 = 0;
6264 const TargetLowering *TLI = TM.getTargetLowering();
6265 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6266 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6267 if (isGA1 && isGA2 && GV1 == GV2)
6268 return Offset1 == (Offset2 + Dist*Bytes);
6273 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6274 /// it cannot be inferred.
6275 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6276 // If this is a GlobalAddress + cst, return the alignment.
6277 const GlobalValue *GV;
6278 int64_t GVOffset = 0;
6279 const TargetLowering *TLI = TM.getTargetLowering();
6280 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6281 unsigned PtrWidth = TLI->getPointerTy().getSizeInBits();
6282 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6283 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6284 TLI->getDataLayout());
6285 unsigned AlignBits = KnownZero.countTrailingOnes();
6286 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6288 return MinAlign(Align, GVOffset);
6291 // If this is a direct reference to a stack slot, use information about the
6292 // stack slot's alignment.
6293 int FrameIdx = 1 << 31;
6294 int64_t FrameOffset = 0;
6295 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6296 FrameIdx = FI->getIndex();
6297 } else if (isBaseWithConstantOffset(Ptr) &&
6298 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6300 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6301 FrameOffset = Ptr.getConstantOperandVal(1);
6304 if (FrameIdx != (1 << 31)) {
6305 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6306 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6314 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6315 unsigned GlobalAddressSDNode::getAddressSpace() const {
6316 return getGlobal()->getType()->getAddressSpace();
6320 Type *ConstantPoolSDNode::getType() const {
6321 if (isMachineConstantPoolEntry())
6322 return Val.MachineCPVal->getType();
6323 return Val.ConstVal->getType();
6326 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6328 unsigned &SplatBitSize,
6330 unsigned MinSplatBits,
6332 EVT VT = getValueType(0);
6333 assert(VT.isVector() && "Expected a vector type");
6334 unsigned sz = VT.getSizeInBits();
6335 if (MinSplatBits > sz)
6338 SplatValue = APInt(sz, 0);
6339 SplatUndef = APInt(sz, 0);
6341 // Get the bits. Bits with undefined values (when the corresponding element
6342 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6343 // in SplatValue. If any of the values are not constant, give up and return
6345 unsigned int nOps = getNumOperands();
6346 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6347 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6349 for (unsigned j = 0; j < nOps; ++j) {
6350 unsigned i = isBigEndian ? nOps-1-j : j;
6351 SDValue OpVal = getOperand(i);
6352 unsigned BitPos = j * EltBitSize;
6354 if (OpVal.getOpcode() == ISD::UNDEF)
6355 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6356 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6357 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6358 zextOrTrunc(sz) << BitPos;
6359 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6360 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6365 // The build_vector is all constants or undefs. Find the smallest element
6366 // size that splats the vector.
6368 HasAnyUndefs = (SplatUndef != 0);
6371 unsigned HalfSize = sz / 2;
6372 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6373 APInt LowValue = SplatValue.trunc(HalfSize);
6374 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6375 APInt LowUndef = SplatUndef.trunc(HalfSize);
6377 // If the two halves do not match (ignoring undef bits), stop here.
6378 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6379 MinSplatBits > HalfSize)
6382 SplatValue = HighValue | LowValue;
6383 SplatUndef = HighUndef & LowUndef;
6392 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6393 // Find the first non-undef value in the shuffle mask.
6395 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6398 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6400 // Make sure all remaining elements are either undef or the same as the first
6402 for (int Idx = Mask[i]; i != e; ++i)
6403 if (Mask[i] >= 0 && Mask[i] != Idx)
6409 static void checkForCyclesHelper(const SDNode *N,
6410 SmallPtrSet<const SDNode*, 32> &Visited,
6411 SmallPtrSet<const SDNode*, 32> &Checked) {
6412 // If this node has already been checked, don't check it again.
6413 if (Checked.count(N))
6416 // If a node has already been visited on this depth-first walk, reject it as
6418 if (!Visited.insert(N)) {
6419 dbgs() << "Offending node:\n";
6421 errs() << "Detected cycle in SelectionDAG\n";
6425 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6426 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6433 void llvm::checkForCycles(const llvm::SDNode *N) {
6435 assert(N && "Checking nonexistant SDNode");
6436 SmallPtrSet<const SDNode*, 32> visited;
6437 SmallPtrSet<const SDNode*, 32> checked;
6438 checkForCyclesHelper(N, visited, checked);
6442 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6443 checkForCycles(DAG->getRoot().getNode());