1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DebugInfo.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/ManagedStatic.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Mutex.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetInstrInfo.h"
43 #include "llvm/Target/TargetIntrinsicInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSelectionDAGInfo.h"
53 /// makeVTList - Return an instance of the SDVTList struct initialized with the
54 /// specified members.
55 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
56 SDVTList Res = {VTs, NumVTs};
60 // Default null implementations of the callbacks.
61 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
62 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
64 //===----------------------------------------------------------------------===//
65 // ConstantFPSDNode Class
66 //===----------------------------------------------------------------------===//
68 /// isExactlyValue - We don't rely on operator== working on double values, as
69 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
70 /// As such, this method can be used to do an exact bit-for-bit comparison of
71 /// two floating point values.
72 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
73 return getValueAPF().bitwiseIsEqual(V);
76 bool ConstantFPSDNode::isValueValidForType(EVT VT,
78 assert(VT.isFloatingPoint() && "Can only convert between FP types");
80 // convert modifies in place, so make a copy.
81 APFloat Val2 = APFloat(Val);
83 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
84 APFloat::rmNearestTiesToEven,
89 //===----------------------------------------------------------------------===//
91 //===----------------------------------------------------------------------===//
93 /// isBuildVectorAllOnes - Return true if the specified node is a
94 /// BUILD_VECTOR where all of the elements are ~0 or undef.
95 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
96 // Look through a bit convert.
97 if (N->getOpcode() == ISD::BITCAST)
98 N = N->getOperand(0).getNode();
100 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
102 unsigned i = 0, e = N->getNumOperands();
104 // Skip over all of the undef values.
105 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
108 // Do not accept an all-undef vector.
109 if (i == e) return false;
111 // Do not accept build_vectors that aren't all constants or which have non-~0
112 // elements. We have to be a bit careful here, as the type of the constant
113 // may not be the same as the type of the vector elements due to type
114 // legalization (the elements are promoted to a legal type for the target and
115 // a vector of a type may be legal when the base element type is not).
116 // We only want to check enough bits to cover the vector elements, because
117 // we care if the resultant vector is all ones, not whether the individual
119 SDValue NotZero = N->getOperand(i);
120 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
121 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
122 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
124 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
125 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
130 // Okay, we have at least one ~0 value, check to see if the rest match or are
131 // undefs. Even with the above element type twiddling, this should be OK, as
132 // the same type legalization should have applied to all the elements.
133 for (++i; i != e; ++i)
134 if (N->getOperand(i) != NotZero &&
135 N->getOperand(i).getOpcode() != ISD::UNDEF)
141 /// isBuildVectorAllZeros - Return true if the specified node is a
142 /// BUILD_VECTOR where all of the elements are 0 or undef.
143 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
144 // Look through a bit convert.
145 if (N->getOpcode() == ISD::BITCAST)
146 N = N->getOperand(0).getNode();
148 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
150 unsigned i = 0, e = N->getNumOperands();
152 // Skip over all of the undef values.
153 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
156 // Do not accept an all-undef vector.
157 if (i == e) return false;
159 // Do not accept build_vectors that aren't all constants or which have non-0
161 SDValue Zero = N->getOperand(i);
162 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
163 if (!CN->isNullValue())
165 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
166 if (!CFPN->getValueAPF().isPosZero())
171 // Okay, we have at least one 0 value, check to see if the rest match or are
173 for (++i; i != e; ++i)
174 if (N->getOperand(i) != Zero &&
175 N->getOperand(i).getOpcode() != ISD::UNDEF)
180 /// \brief Return true if the specified node is a BUILD_VECTOR node of
181 /// all ConstantSDNode or undef.
182 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
183 if (N->getOpcode() != ISD::BUILD_VECTOR)
186 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
187 SDValue Op = N->getOperand(i);
188 if (Op.getOpcode() == ISD::UNDEF)
190 if (!isa<ConstantSDNode>(Op))
196 /// isScalarToVector - Return true if the specified node is a
197 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
198 /// element is not an undef.
199 bool ISD::isScalarToVector(const SDNode *N) {
200 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
203 if (N->getOpcode() != ISD::BUILD_VECTOR)
205 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
207 unsigned NumElems = N->getNumOperands();
210 for (unsigned i = 1; i < NumElems; ++i) {
211 SDValue V = N->getOperand(i);
212 if (V.getOpcode() != ISD::UNDEF)
218 /// allOperandsUndef - Return true if the node has at least one operand
219 /// and all operands of the specified node are ISD::UNDEF.
220 bool ISD::allOperandsUndef(const SDNode *N) {
221 // Return false if the node has no operands.
222 // This is "logically inconsistent" with the definition of "all" but
223 // is probably the desired behavior.
224 if (N->getNumOperands() == 0)
227 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
228 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
234 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) {
237 return ISD::ANY_EXTEND;
239 return ISD::SIGN_EXTEND;
241 return ISD::ZERO_EXTEND;
246 llvm_unreachable("Invalid LoadExtType");
249 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
250 /// when given the operation for (X op Y).
251 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
252 // To perform this operation, we just need to swap the L and G bits of the
254 unsigned OldL = (Operation >> 2) & 1;
255 unsigned OldG = (Operation >> 1) & 1;
256 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
257 (OldL << 1) | // New G bit
258 (OldG << 2)); // New L bit.
261 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
262 /// 'op' is a valid SetCC operation.
263 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
264 unsigned Operation = Op;
266 Operation ^= 7; // Flip L, G, E bits, but not U.
268 Operation ^= 15; // Flip all of the condition bits.
270 if (Operation > ISD::SETTRUE2)
271 Operation &= ~8; // Don't let N and U bits get set.
273 return ISD::CondCode(Operation);
277 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
278 /// signed operation and 2 if the result is an unsigned comparison. Return zero
279 /// if the operation does not depend on the sign of the input (setne and seteq).
280 static int isSignedOp(ISD::CondCode Opcode) {
282 default: llvm_unreachable("Illegal integer setcc operation!");
284 case ISD::SETNE: return 0;
288 case ISD::SETGE: return 1;
292 case ISD::SETUGE: return 2;
296 /// getSetCCOrOperation - Return the result of a logical OR between different
297 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
298 /// returns SETCC_INVALID if it is not possible to represent the resultant
300 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
302 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
303 // Cannot fold a signed integer setcc with an unsigned integer setcc.
304 return ISD::SETCC_INVALID;
306 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
308 // If the N and U bits get set then the resultant comparison DOES suddenly
309 // care about orderedness, and is true when ordered.
310 if (Op > ISD::SETTRUE2)
311 Op &= ~16; // Clear the U bit if the N bit is set.
313 // Canonicalize illegal integer setcc's.
314 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
317 return ISD::CondCode(Op);
320 /// getSetCCAndOperation - Return the result of a logical AND between different
321 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
322 /// function returns zero if it is not possible to represent the resultant
324 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
326 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
327 // Cannot fold a signed setcc with an unsigned setcc.
328 return ISD::SETCC_INVALID;
330 // Combine all of the condition bits.
331 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
333 // Canonicalize illegal integer setcc's.
337 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
338 case ISD::SETOEQ: // SETEQ & SETU[LG]E
339 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
340 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
341 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
348 //===----------------------------------------------------------------------===//
349 // SDNode Profile Support
350 //===----------------------------------------------------------------------===//
352 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
354 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
358 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
359 /// solely with their pointer.
360 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
361 ID.AddPointer(VTList.VTs);
364 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
366 static void AddNodeIDOperands(FoldingSetNodeID &ID,
367 const SDValue *Ops, unsigned NumOps) {
368 for (; NumOps; --NumOps, ++Ops) {
369 ID.AddPointer(Ops->getNode());
370 ID.AddInteger(Ops->getResNo());
374 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
376 static void AddNodeIDOperands(FoldingSetNodeID &ID,
377 const SDUse *Ops, unsigned NumOps) {
378 for (; NumOps; --NumOps, ++Ops) {
379 ID.AddPointer(Ops->getNode());
380 ID.AddInteger(Ops->getResNo());
384 static void AddNodeIDNode(FoldingSetNodeID &ID,
385 unsigned short OpC, SDVTList VTList,
386 const SDValue *OpList, unsigned N) {
387 AddNodeIDOpcode(ID, OpC);
388 AddNodeIDValueTypes(ID, VTList);
389 AddNodeIDOperands(ID, OpList, N);
392 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
394 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
395 switch (N->getOpcode()) {
396 case ISD::TargetExternalSymbol:
397 case ISD::ExternalSymbol:
398 llvm_unreachable("Should only be used on nodes with operands");
399 default: break; // Normal nodes don't need extra info.
400 case ISD::TargetConstant:
401 case ISD::Constant: {
402 const ConstantSDNode *C = cast<ConstantSDNode>(N);
403 ID.AddPointer(C->getConstantIntValue());
404 ID.AddBoolean(C->isOpaque());
407 case ISD::TargetConstantFP:
408 case ISD::ConstantFP: {
409 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
412 case ISD::TargetGlobalAddress:
413 case ISD::GlobalAddress:
414 case ISD::TargetGlobalTLSAddress:
415 case ISD::GlobalTLSAddress: {
416 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
417 ID.AddPointer(GA->getGlobal());
418 ID.AddInteger(GA->getOffset());
419 ID.AddInteger(GA->getTargetFlags());
420 ID.AddInteger(GA->getAddressSpace());
423 case ISD::BasicBlock:
424 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
427 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
429 case ISD::RegisterMask:
430 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
433 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
435 case ISD::FrameIndex:
436 case ISD::TargetFrameIndex:
437 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
440 case ISD::TargetJumpTable:
441 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
442 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
444 case ISD::ConstantPool:
445 case ISD::TargetConstantPool: {
446 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
447 ID.AddInteger(CP->getAlignment());
448 ID.AddInteger(CP->getOffset());
449 if (CP->isMachineConstantPoolEntry())
450 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
452 ID.AddPointer(CP->getConstVal());
453 ID.AddInteger(CP->getTargetFlags());
456 case ISD::TargetIndex: {
457 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
458 ID.AddInteger(TI->getIndex());
459 ID.AddInteger(TI->getOffset());
460 ID.AddInteger(TI->getTargetFlags());
464 const LoadSDNode *LD = cast<LoadSDNode>(N);
465 ID.AddInteger(LD->getMemoryVT().getRawBits());
466 ID.AddInteger(LD->getRawSubclassData());
467 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
471 const StoreSDNode *ST = cast<StoreSDNode>(N);
472 ID.AddInteger(ST->getMemoryVT().getRawBits());
473 ID.AddInteger(ST->getRawSubclassData());
474 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
477 case ISD::ATOMIC_CMP_SWAP:
478 case ISD::ATOMIC_SWAP:
479 case ISD::ATOMIC_LOAD_ADD:
480 case ISD::ATOMIC_LOAD_SUB:
481 case ISD::ATOMIC_LOAD_AND:
482 case ISD::ATOMIC_LOAD_OR:
483 case ISD::ATOMIC_LOAD_XOR:
484 case ISD::ATOMIC_LOAD_NAND:
485 case ISD::ATOMIC_LOAD_MIN:
486 case ISD::ATOMIC_LOAD_MAX:
487 case ISD::ATOMIC_LOAD_UMIN:
488 case ISD::ATOMIC_LOAD_UMAX:
489 case ISD::ATOMIC_LOAD:
490 case ISD::ATOMIC_STORE: {
491 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
492 ID.AddInteger(AT->getMemoryVT().getRawBits());
493 ID.AddInteger(AT->getRawSubclassData());
494 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
497 case ISD::PREFETCH: {
498 const MemSDNode *PF = cast<MemSDNode>(N);
499 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
502 case ISD::VECTOR_SHUFFLE: {
503 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
504 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
506 ID.AddInteger(SVN->getMaskElt(i));
509 case ISD::TargetBlockAddress:
510 case ISD::BlockAddress: {
511 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
512 ID.AddPointer(BA->getBlockAddress());
513 ID.AddInteger(BA->getOffset());
514 ID.AddInteger(BA->getTargetFlags());
517 } // end switch (N->getOpcode())
519 // Target specific memory nodes could also have address spaces to check.
520 if (N->isTargetMemoryOpcode())
521 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
524 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
526 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
527 AddNodeIDOpcode(ID, N->getOpcode());
528 // Add the return value info.
529 AddNodeIDValueTypes(ID, N->getVTList());
530 // Add the operand info.
531 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
533 // Handle SDNode leafs with special info.
534 AddNodeIDCustom(ID, N);
537 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
538 /// the CSE map that carries volatility, temporalness, indexing mode, and
539 /// extension/truncation information.
541 static inline unsigned
542 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
543 bool isNonTemporal, bool isInvariant) {
544 assert((ConvType & 3) == ConvType &&
545 "ConvType may not require more than 2 bits!");
546 assert((AM & 7) == AM &&
547 "AM may not require more than 3 bits!");
551 (isNonTemporal << 6) |
555 //===----------------------------------------------------------------------===//
556 // SelectionDAG Class
557 //===----------------------------------------------------------------------===//
559 /// doNotCSE - Return true if CSE should not be performed for this node.
560 static bool doNotCSE(SDNode *N) {
561 if (N->getValueType(0) == MVT::Glue)
562 return true; // Never CSE anything that produces a flag.
564 switch (N->getOpcode()) {
566 case ISD::HANDLENODE:
568 return true; // Never CSE these nodes.
571 // Check that remaining values produced are not flags.
572 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
573 if (N->getValueType(i) == MVT::Glue)
574 return true; // Never CSE anything that produces a flag.
579 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
581 void SelectionDAG::RemoveDeadNodes() {
582 // Create a dummy node (which is not added to allnodes), that adds a reference
583 // to the root node, preventing it from being deleted.
584 HandleSDNode Dummy(getRoot());
586 SmallVector<SDNode*, 128> DeadNodes;
588 // Add all obviously-dead nodes to the DeadNodes worklist.
589 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
591 DeadNodes.push_back(I);
593 RemoveDeadNodes(DeadNodes);
595 // If the root changed (e.g. it was a dead load, update the root).
596 setRoot(Dummy.getValue());
599 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
600 /// given list, and any nodes that become unreachable as a result.
601 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
603 // Process the worklist, deleting the nodes and adding their uses to the
605 while (!DeadNodes.empty()) {
606 SDNode *N = DeadNodes.pop_back_val();
608 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
609 DUL->NodeDeleted(N, 0);
611 // Take the node out of the appropriate CSE map.
612 RemoveNodeFromCSEMaps(N);
614 // Next, brutally remove the operand list. This is safe to do, as there are
615 // no cycles in the graph.
616 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
618 SDNode *Operand = Use.getNode();
621 // Now that we removed this operand, see if there are no uses of it left.
622 if (Operand->use_empty())
623 DeadNodes.push_back(Operand);
630 void SelectionDAG::RemoveDeadNode(SDNode *N){
631 SmallVector<SDNode*, 16> DeadNodes(1, N);
633 // Create a dummy node that adds a reference to the root node, preventing
634 // it from being deleted. (This matters if the root is an operand of the
636 HandleSDNode Dummy(getRoot());
638 RemoveDeadNodes(DeadNodes);
641 void SelectionDAG::DeleteNode(SDNode *N) {
642 // First take this out of the appropriate CSE map.
643 RemoveNodeFromCSEMaps(N);
645 // Finally, remove uses due to operands of this node, remove from the
646 // AllNodes list, and delete the node.
647 DeleteNodeNotInCSEMaps(N);
650 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
651 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
652 assert(N->use_empty() && "Cannot delete a node that is not dead!");
654 // Drop all of the operands and decrement used node's use counts.
660 void SelectionDAG::DeallocateNode(SDNode *N) {
661 if (N->OperandsNeedDelete)
662 delete[] N->OperandList;
664 // Set the opcode to DELETED_NODE to help catch bugs when node
665 // memory is reallocated.
666 N->NodeType = ISD::DELETED_NODE;
668 NodeAllocator.Deallocate(AllNodes.remove(N));
670 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
671 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
672 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
673 DbgVals[i]->setIsInvalidated();
676 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
677 /// correspond to it. This is useful when we're about to delete or repurpose
678 /// the node. We don't want future request for structurally identical nodes
679 /// to return N anymore.
680 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
682 switch (N->getOpcode()) {
683 case ISD::HANDLENODE: return false; // noop.
685 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
686 "Cond code doesn't exist!");
687 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
688 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
690 case ISD::ExternalSymbol:
691 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
693 case ISD::TargetExternalSymbol: {
694 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
695 Erased = TargetExternalSymbols.erase(
696 std::pair<std::string,unsigned char>(ESN->getSymbol(),
697 ESN->getTargetFlags()));
700 case ISD::VALUETYPE: {
701 EVT VT = cast<VTSDNode>(N)->getVT();
702 if (VT.isExtended()) {
703 Erased = ExtendedValueTypeNodes.erase(VT);
705 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
706 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
711 // Remove it from the CSE Map.
712 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
713 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
714 Erased = CSEMap.RemoveNode(N);
718 // Verify that the node was actually in one of the CSE maps, unless it has a
719 // flag result (which cannot be CSE'd) or is one of the special cases that are
720 // not subject to CSE.
721 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
722 !N->isMachineOpcode() && !doNotCSE(N)) {
725 llvm_unreachable("Node is not in map!");
731 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
732 /// maps and modified in place. Add it back to the CSE maps, unless an identical
733 /// node already exists, in which case transfer all its users to the existing
734 /// node. This transfer can potentially trigger recursive merging.
737 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
738 // For node types that aren't CSE'd, just act as if no identical node
741 SDNode *Existing = CSEMap.GetOrInsertNode(N);
743 // If there was already an existing matching node, use ReplaceAllUsesWith
744 // to replace the dead one with the existing one. This can cause
745 // recursive merging of other unrelated nodes down the line.
746 ReplaceAllUsesWith(N, Existing);
748 // N is now dead. Inform the listeners and delete it.
749 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
750 DUL->NodeDeleted(N, Existing);
751 DeleteNodeNotInCSEMaps(N);
756 // If the node doesn't already exist, we updated it. Inform listeners.
757 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
761 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
762 /// were replaced with those specified. If this node is never memoized,
763 /// return null, otherwise return a pointer to the slot it would take. If a
764 /// node already exists with these operands, the slot will be non-null.
765 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
770 SDValue Ops[] = { Op };
772 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
773 AddNodeIDCustom(ID, N);
774 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
778 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
779 /// were replaced with those specified. If this node is never memoized,
780 /// return null, otherwise return a pointer to the slot it would take. If a
781 /// node already exists with these operands, the slot will be non-null.
782 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
783 SDValue Op1, SDValue Op2,
788 SDValue Ops[] = { Op1, Op2 };
790 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
791 AddNodeIDCustom(ID, N);
792 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
797 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
798 /// were replaced with those specified. If this node is never memoized,
799 /// return null, otherwise return a pointer to the slot it would take. If a
800 /// node already exists with these operands, the slot will be non-null.
801 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
802 const SDValue *Ops,unsigned NumOps,
808 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
809 AddNodeIDCustom(ID, N);
810 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
815 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
816 static void VerifyNodeCommon(SDNode *N) {
817 switch (N->getOpcode()) {
820 case ISD::BUILD_PAIR: {
821 EVT VT = N->getValueType(0);
822 assert(N->getNumValues() == 1 && "Too many results!");
823 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
824 "Wrong return type!");
825 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
826 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
827 "Mismatched operand types!");
828 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
829 "Wrong operand type!");
830 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
831 "Wrong return type size");
834 case ISD::BUILD_VECTOR: {
835 assert(N->getNumValues() == 1 && "Too many results!");
836 assert(N->getValueType(0).isVector() && "Wrong return type!");
837 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
838 "Wrong number of operands!");
839 EVT EltVT = N->getValueType(0).getVectorElementType();
840 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
841 assert((I->getValueType() == EltVT ||
842 (EltVT.isInteger() && I->getValueType().isInteger() &&
843 EltVT.bitsLE(I->getValueType()))) &&
844 "Wrong operand type!");
845 assert(I->getValueType() == N->getOperand(0).getValueType() &&
846 "Operands must all have the same type");
853 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
854 static void VerifySDNode(SDNode *N) {
855 // The SDNode allocators cannot be used to allocate nodes with fields that are
856 // not present in an SDNode!
857 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
858 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
859 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
860 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
861 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
862 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
863 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
864 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
865 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
866 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
867 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
868 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
869 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
870 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
871 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
872 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
873 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
874 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
875 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
880 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
882 static void VerifyMachineNode(SDNode *N) {
883 // The MachineNode allocators cannot be used to allocate nodes with fields
884 // that are not present in a MachineNode!
885 // Currently there are no such nodes.
891 /// getEVTAlignment - Compute the default alignment value for the
894 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
895 Type *Ty = VT == MVT::iPTR ?
896 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
897 VT.getTypeForEVT(*getContext());
899 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
902 // EntryNode could meaningfully have debug info if we can find it...
903 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
904 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(0), OptLevel(OL),
905 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
906 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
908 AllNodes.push_back(&EntryNode);
909 DbgInfo = new SDDbgInfo();
912 void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) {
915 Context = &mf.getFunction()->getContext();
918 SelectionDAG::~SelectionDAG() {
919 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
924 void SelectionDAG::allnodes_clear() {
925 assert(&*AllNodes.begin() == &EntryNode);
926 AllNodes.remove(AllNodes.begin());
927 while (!AllNodes.empty())
928 DeallocateNode(AllNodes.begin());
931 void SelectionDAG::clear() {
933 OperandAllocator.Reset();
936 ExtendedValueTypeNodes.clear();
937 ExternalSymbols.clear();
938 TargetExternalSymbols.clear();
939 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
940 static_cast<CondCodeSDNode*>(0));
941 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
942 static_cast<SDNode*>(0));
944 EntryNode.UseList = 0;
945 AllNodes.push_back(&EntryNode);
946 Root = getEntryNode();
950 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
951 return VT.bitsGT(Op.getValueType()) ?
952 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
953 getNode(ISD::TRUNCATE, DL, VT, Op);
956 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
957 return VT.bitsGT(Op.getValueType()) ?
958 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
959 getNode(ISD::TRUNCATE, DL, VT, Op);
962 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
963 return VT.bitsGT(Op.getValueType()) ?
964 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
965 getNode(ISD::TRUNCATE, DL, VT, Op);
968 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
969 assert(!VT.isVector() &&
970 "getZeroExtendInReg should use the vector element type instead of "
972 if (Op.getValueType() == VT) return Op;
973 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
974 APInt Imm = APInt::getLowBitsSet(BitWidth,
976 return getNode(ISD::AND, DL, Op.getValueType(), Op,
977 getConstant(Imm, Op.getValueType()));
980 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
982 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
983 EVT EltVT = VT.getScalarType();
985 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
986 return getNode(ISD::XOR, DL, VT, Val, NegOne);
989 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
990 EVT EltVT = VT.getScalarType();
991 assert((EltVT.getSizeInBits() >= 64 ||
992 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
993 "getConstant with a uint64_t value that doesn't fit in the type!");
994 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
997 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
999 return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
1002 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
1004 assert(VT.isInteger() && "Cannot create FP integer constant!");
1006 EVT EltVT = VT.getScalarType();
1007 const ConstantInt *Elt = &Val;
1009 const TargetLowering *TLI = TM.getTargetLowering();
1011 // In some cases the vector type is legal but the element type is illegal and
1012 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1013 // inserted value (the type does not need to match the vector element type).
1014 // Any extra bits introduced will be truncated away.
1015 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1016 TargetLowering::TypePromoteInteger) {
1017 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1018 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1019 Elt = ConstantInt::get(*getContext(), NewVal);
1021 // In other cases the element type is illegal and needs to be expanded, for
1022 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1023 // the value into n parts and use a vector type with n-times the elements.
1024 // Then bitcast to the type requested.
1025 // Legalizing constants too early makes the DAGCombiner's job harder so we
1026 // only legalize if the DAG tells us we must produce legal types.
1027 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1028 TLI->getTypeAction(*getContext(), EltVT) ==
1029 TargetLowering::TypeExpandInteger) {
1030 APInt NewVal = Elt->getValue();
1031 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1032 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1033 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1034 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1036 // Check the temporary vector is the correct size. If this fails then
1037 // getTypeToTransformTo() probably returned a type whose size (in bits)
1038 // isn't a power-of-2 factor of the requested type size.
1039 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1041 SmallVector<SDValue, 2> EltParts;
1042 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1043 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1044 .trunc(ViaEltSizeInBits),
1045 ViaEltVT, isT, isO));
1048 // EltParts is currently in little endian order. If we actually want
1049 // big-endian order then reverse it now.
1050 if (TLI->isBigEndian())
1051 std::reverse(EltParts.begin(), EltParts.end());
1053 // The elements must be reversed when the element order is different
1054 // to the endianness of the elements (because the BITCAST is itself a
1055 // vector shuffle in this situation). However, we do not need any code to
1056 // perform this reversal because getConstant() is producing a vector
1058 // This situation occurs in MIPS MSA.
1060 SmallVector<SDValue, 8> Ops;
1061 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1062 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1064 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1065 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1066 &Ops[0], Ops.size()));
1070 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1071 "APInt size does not match type size!");
1072 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1073 FoldingSetNodeID ID;
1074 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1079 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1081 return SDValue(N, 0);
1084 N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
1085 CSEMap.InsertNode(N, IP);
1086 AllNodes.push_back(N);
1089 SDValue Result(N, 0);
1090 if (VT.isVector()) {
1091 SmallVector<SDValue, 8> Ops;
1092 Ops.assign(VT.getVectorNumElements(), Result);
1093 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1098 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1099 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
1103 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1104 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1107 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1108 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1110 EVT EltVT = VT.getScalarType();
1112 // Do the map lookup using the actual bit pattern for the floating point
1113 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1114 // we don't have issues with SNANs.
1115 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1116 FoldingSetNodeID ID;
1117 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1121 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1123 return SDValue(N, 0);
1126 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1127 CSEMap.InsertNode(N, IP);
1128 AllNodes.push_back(N);
1131 SDValue Result(N, 0);
1132 if (VT.isVector()) {
1133 SmallVector<SDValue, 8> Ops;
1134 Ops.assign(VT.getVectorNumElements(), Result);
1135 // FIXME SDLoc info might be appropriate here
1136 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
1141 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1142 EVT EltVT = VT.getScalarType();
1143 if (EltVT==MVT::f32)
1144 return getConstantFP(APFloat((float)Val), VT, isTarget);
1145 else if (EltVT==MVT::f64)
1146 return getConstantFP(APFloat(Val), VT, isTarget);
1147 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1150 APFloat apf = APFloat(Val);
1151 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1153 return getConstantFP(apf, VT, isTarget);
1155 llvm_unreachable("Unsupported type in getConstantFP");
1158 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1159 EVT VT, int64_t Offset,
1161 unsigned char TargetFlags) {
1162 assert((TargetFlags == 0 || isTargetGA) &&
1163 "Cannot set target flags on target-independent globals");
1164 const TargetLowering *TLI = TM.getTargetLowering();
1166 // Truncate (with sign-extension) the offset value to the pointer size.
1167 unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
1169 Offset = SignExtend64(Offset, BitWidth);
1171 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1173 // If GV is an alias then use the aliasee for determining thread-localness.
1174 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1175 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
1179 if (GVar && GVar->isThreadLocal())
1180 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1182 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1184 FoldingSetNodeID ID;
1185 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1187 ID.AddInteger(Offset);
1188 ID.AddInteger(TargetFlags);
1189 ID.AddInteger(GV->getType()->getAddressSpace());
1191 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1192 return SDValue(E, 0);
1194 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1195 DL.getDebugLoc(), GV, VT,
1196 Offset, TargetFlags);
1197 CSEMap.InsertNode(N, IP);
1198 AllNodes.push_back(N);
1199 return SDValue(N, 0);
1202 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1203 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1204 FoldingSetNodeID ID;
1205 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1208 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1209 return SDValue(E, 0);
1211 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1212 CSEMap.InsertNode(N, IP);
1213 AllNodes.push_back(N);
1214 return SDValue(N, 0);
1217 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1218 unsigned char TargetFlags) {
1219 assert((TargetFlags == 0 || isTarget) &&
1220 "Cannot set target flags on target-independent jump tables");
1221 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1222 FoldingSetNodeID ID;
1223 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1225 ID.AddInteger(TargetFlags);
1227 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1228 return SDValue(E, 0);
1230 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1232 CSEMap.InsertNode(N, IP);
1233 AllNodes.push_back(N);
1234 return SDValue(N, 0);
1237 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1238 unsigned Alignment, int Offset,
1240 unsigned char TargetFlags) {
1241 assert((TargetFlags == 0 || isTarget) &&
1242 "Cannot set target flags on target-independent globals");
1245 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1246 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1247 FoldingSetNodeID ID;
1248 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1249 ID.AddInteger(Alignment);
1250 ID.AddInteger(Offset);
1252 ID.AddInteger(TargetFlags);
1254 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1255 return SDValue(E, 0);
1257 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1258 Alignment, TargetFlags);
1259 CSEMap.InsertNode(N, IP);
1260 AllNodes.push_back(N);
1261 return SDValue(N, 0);
1265 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1266 unsigned Alignment, int Offset,
1268 unsigned char TargetFlags) {
1269 assert((TargetFlags == 0 || isTarget) &&
1270 "Cannot set target flags on target-independent globals");
1273 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1274 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1275 FoldingSetNodeID ID;
1276 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1277 ID.AddInteger(Alignment);
1278 ID.AddInteger(Offset);
1279 C->addSelectionDAGCSEId(ID);
1280 ID.AddInteger(TargetFlags);
1282 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1283 return SDValue(E, 0);
1285 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1286 Alignment, TargetFlags);
1287 CSEMap.InsertNode(N, IP);
1288 AllNodes.push_back(N);
1289 return SDValue(N, 0);
1292 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1293 unsigned char TargetFlags) {
1294 FoldingSetNodeID ID;
1295 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
1296 ID.AddInteger(Index);
1297 ID.AddInteger(Offset);
1298 ID.AddInteger(TargetFlags);
1300 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1301 return SDValue(E, 0);
1303 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1305 CSEMap.InsertNode(N, IP);
1306 AllNodes.push_back(N);
1307 return SDValue(N, 0);
1310 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1311 FoldingSetNodeID ID;
1312 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1315 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1316 return SDValue(E, 0);
1318 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1319 CSEMap.InsertNode(N, IP);
1320 AllNodes.push_back(N);
1321 return SDValue(N, 0);
1324 SDValue SelectionDAG::getValueType(EVT VT) {
1325 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1326 ValueTypeNodes.size())
1327 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1329 SDNode *&N = VT.isExtended() ?
1330 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1332 if (N) return SDValue(N, 0);
1333 N = new (NodeAllocator) VTSDNode(VT);
1334 AllNodes.push_back(N);
1335 return SDValue(N, 0);
1338 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1339 SDNode *&N = ExternalSymbols[Sym];
1340 if (N) return SDValue(N, 0);
1341 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1342 AllNodes.push_back(N);
1343 return SDValue(N, 0);
1346 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1347 unsigned char TargetFlags) {
1349 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1351 if (N) return SDValue(N, 0);
1352 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1353 AllNodes.push_back(N);
1354 return SDValue(N, 0);
1357 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1358 if ((unsigned)Cond >= CondCodeNodes.size())
1359 CondCodeNodes.resize(Cond+1);
1361 if (CondCodeNodes[Cond] == 0) {
1362 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1363 CondCodeNodes[Cond] = N;
1364 AllNodes.push_back(N);
1367 return SDValue(CondCodeNodes[Cond], 0);
1370 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1371 // the shuffle mask M that point at N1 to point at N2, and indices that point
1372 // N2 to point at N1.
1373 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1375 int NElts = M.size();
1376 for (int i = 0; i != NElts; ++i) {
1384 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1385 SDValue N2, const int *Mask) {
1386 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1387 "Invalid VECTOR_SHUFFLE");
1389 // Canonicalize shuffle undef, undef -> undef
1390 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1391 return getUNDEF(VT);
1393 // Validate that all indices in Mask are within the range of the elements
1394 // input to the shuffle.
1395 unsigned NElts = VT.getVectorNumElements();
1396 SmallVector<int, 8> MaskVec;
1397 for (unsigned i = 0; i != NElts; ++i) {
1398 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1399 MaskVec.push_back(Mask[i]);
1402 // Canonicalize shuffle v, v -> v, undef
1405 for (unsigned i = 0; i != NElts; ++i)
1406 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1409 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1410 if (N1.getOpcode() == ISD::UNDEF)
1411 commuteShuffle(N1, N2, MaskVec);
1413 // Canonicalize all index into lhs, -> shuffle lhs, undef
1414 // Canonicalize all index into rhs, -> shuffle rhs, undef
1415 bool AllLHS = true, AllRHS = true;
1416 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1417 for (unsigned i = 0; i != NElts; ++i) {
1418 if (MaskVec[i] >= (int)NElts) {
1423 } else if (MaskVec[i] >= 0) {
1427 if (AllLHS && AllRHS)
1428 return getUNDEF(VT);
1429 if (AllLHS && !N2Undef)
1433 commuteShuffle(N1, N2, MaskVec);
1436 // If Identity shuffle return that node.
1437 bool Identity = true;
1438 for (unsigned i = 0; i != NElts; ++i) {
1439 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1441 if (Identity && NElts)
1444 FoldingSetNodeID ID;
1445 SDValue Ops[2] = { N1, N2 };
1446 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1447 for (unsigned i = 0; i != NElts; ++i)
1448 ID.AddInteger(MaskVec[i]);
1451 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1452 return SDValue(E, 0);
1454 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1455 // SDNode doesn't have access to it. This memory will be "leaked" when
1456 // the node is deallocated, but recovered when the NodeAllocator is released.
1457 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1458 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1460 ShuffleVectorSDNode *N =
1461 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1462 dl.getDebugLoc(), N1, N2,
1464 CSEMap.InsertNode(N, IP);
1465 AllNodes.push_back(N);
1466 return SDValue(N, 0);
1469 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1470 SDValue Val, SDValue DTy,
1471 SDValue STy, SDValue Rnd, SDValue Sat,
1472 ISD::CvtCode Code) {
1473 // If the src and dest types are the same and the conversion is between
1474 // integer types of the same sign or two floats, no conversion is necessary.
1476 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1479 FoldingSetNodeID ID;
1480 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1481 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1483 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1484 return SDValue(E, 0);
1486 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1489 CSEMap.InsertNode(N, IP);
1490 AllNodes.push_back(N);
1491 return SDValue(N, 0);
1494 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1495 FoldingSetNodeID ID;
1496 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1497 ID.AddInteger(RegNo);
1499 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1500 return SDValue(E, 0);
1502 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1503 CSEMap.InsertNode(N, IP);
1504 AllNodes.push_back(N);
1505 return SDValue(N, 0);
1508 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1509 FoldingSetNodeID ID;
1510 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
1511 ID.AddPointer(RegMask);
1513 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1514 return SDValue(E, 0);
1516 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1517 CSEMap.InsertNode(N, IP);
1518 AllNodes.push_back(N);
1519 return SDValue(N, 0);
1522 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1523 FoldingSetNodeID ID;
1524 SDValue Ops[] = { Root };
1525 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1526 ID.AddPointer(Label);
1528 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1529 return SDValue(E, 0);
1531 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1532 dl.getDebugLoc(), Root, Label);
1533 CSEMap.InsertNode(N, IP);
1534 AllNodes.push_back(N);
1535 return SDValue(N, 0);
1539 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1542 unsigned char TargetFlags) {
1543 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1545 FoldingSetNodeID ID;
1546 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1548 ID.AddInteger(Offset);
1549 ID.AddInteger(TargetFlags);
1551 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1552 return SDValue(E, 0);
1554 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1556 CSEMap.InsertNode(N, IP);
1557 AllNodes.push_back(N);
1558 return SDValue(N, 0);
1561 SDValue SelectionDAG::getSrcValue(const Value *V) {
1562 assert((!V || V->getType()->isPointerTy()) &&
1563 "SrcValue is not a pointer?");
1565 FoldingSetNodeID ID;
1566 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1570 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1571 return SDValue(E, 0);
1573 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1574 CSEMap.InsertNode(N, IP);
1575 AllNodes.push_back(N);
1576 return SDValue(N, 0);
1579 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1580 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1581 FoldingSetNodeID ID;
1582 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
1586 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1587 return SDValue(E, 0);
1589 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1590 CSEMap.InsertNode(N, IP);
1591 AllNodes.push_back(N);
1592 return SDValue(N, 0);
1595 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
1596 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1597 unsigned SrcAS, unsigned DestAS) {
1598 SDValue Ops[] = {Ptr};
1599 FoldingSetNodeID ID;
1600 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), &Ops[0], 1);
1601 ID.AddInteger(SrcAS);
1602 ID.AddInteger(DestAS);
1605 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1606 return SDValue(E, 0);
1608 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1610 VT, Ptr, SrcAS, DestAS);
1611 CSEMap.InsertNode(N, IP);
1612 AllNodes.push_back(N);
1613 return SDValue(N, 0);
1616 /// getShiftAmountOperand - Return the specified value casted to
1617 /// the target's desired shift amount type.
1618 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1619 EVT OpTy = Op.getValueType();
1620 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
1621 if (OpTy == ShTy || OpTy.isVector()) return Op;
1623 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1624 return getNode(Opcode, SDLoc(Op), ShTy, Op);
1627 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1628 /// specified value type.
1629 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1630 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1631 unsigned ByteSize = VT.getStoreSize();
1632 Type *Ty = VT.getTypeForEVT(*getContext());
1633 const TargetLowering *TLI = TM.getTargetLowering();
1634 unsigned StackAlign =
1635 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1637 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1638 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1641 /// CreateStackTemporary - Create a stack temporary suitable for holding
1642 /// either of the specified value types.
1643 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1644 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1645 VT2.getStoreSizeInBits())/8;
1646 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1647 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1648 const TargetLowering *TLI = TM.getTargetLowering();
1649 const DataLayout *TD = TLI->getDataLayout();
1650 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1651 TD->getPrefTypeAlignment(Ty2));
1653 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1654 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1655 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1658 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1659 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1660 // These setcc operations always fold.
1664 case ISD::SETFALSE2: return getConstant(0, VT);
1666 case ISD::SETTRUE2: {
1667 const TargetLowering *TLI = TM.getTargetLowering();
1668 TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
1670 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1683 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1687 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1688 const APInt &C2 = N2C->getAPIntValue();
1689 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1690 const APInt &C1 = N1C->getAPIntValue();
1693 default: llvm_unreachable("Unknown integer setcc!");
1694 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1695 case ISD::SETNE: return getConstant(C1 != C2, VT);
1696 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1697 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1698 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1699 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1700 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1701 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1702 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1703 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1707 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1708 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1709 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1712 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1713 return getUNDEF(VT);
1715 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1716 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1717 return getUNDEF(VT);
1719 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1720 R==APFloat::cmpLessThan, VT);
1721 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1722 return getUNDEF(VT);
1724 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1725 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1726 return getUNDEF(VT);
1728 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1729 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1730 return getUNDEF(VT);
1732 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1733 R==APFloat::cmpEqual, VT);
1734 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1735 return getUNDEF(VT);
1737 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1738 R==APFloat::cmpEqual, VT);
1739 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1740 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1741 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1742 R==APFloat::cmpEqual, VT);
1743 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1744 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1745 R==APFloat::cmpLessThan, VT);
1746 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1747 R==APFloat::cmpUnordered, VT);
1748 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1749 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1752 // Ensure that the constant occurs on the RHS.
1753 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1754 MVT CompVT = N1.getValueType().getSimpleVT();
1755 if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
1758 return getSetCC(dl, VT, N2, N1, SwappedCond);
1762 // Could not fold it.
1766 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1767 /// use this predicate to simplify operations downstream.
1768 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1769 // This predicate is not safe for vector operations.
1770 if (Op.getValueType().isVector())
1773 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1774 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1777 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1778 /// this predicate to simplify operations downstream. Mask is known to be zero
1779 /// for bits that V cannot have.
1780 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1781 unsigned Depth) const {
1782 APInt KnownZero, KnownOne;
1783 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1784 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1785 return (KnownZero & Mask) == Mask;
1788 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
1789 /// known to be either zero or one and return them in the KnownZero/KnownOne
1790 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1792 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1793 APInt &KnownOne, unsigned Depth) const {
1794 const TargetLowering *TLI = TM.getTargetLowering();
1795 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1797 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1799 return; // Limit search depth.
1801 APInt KnownZero2, KnownOne2;
1803 switch (Op.getOpcode()) {
1805 // We know all of the bits for a constant!
1806 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1807 KnownZero = ~KnownOne;
1810 // If either the LHS or the RHS are Zero, the result is zero.
1811 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1812 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1813 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1814 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1816 // Output known-1 bits are only known if set in both the LHS & RHS.
1817 KnownOne &= KnownOne2;
1818 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1819 KnownZero |= KnownZero2;
1822 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1823 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1824 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1825 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1827 // Output known-0 bits are only known if clear in both the LHS & RHS.
1828 KnownZero &= KnownZero2;
1829 // Output known-1 are known to be set if set in either the LHS | RHS.
1830 KnownOne |= KnownOne2;
1833 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1834 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1835 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1836 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1838 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1839 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1840 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1841 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1842 KnownZero = KnownZeroOut;
1846 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1847 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1848 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1849 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1851 // If low bits are zero in either operand, output low known-0 bits.
1852 // Also compute a conserative estimate for high known-0 bits.
1853 // More trickiness is possible, but this is sufficient for the
1854 // interesting case of alignment computation.
1855 KnownOne.clearAllBits();
1856 unsigned TrailZ = KnownZero.countTrailingOnes() +
1857 KnownZero2.countTrailingOnes();
1858 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1859 KnownZero2.countLeadingOnes(),
1860 BitWidth) - BitWidth;
1862 TrailZ = std::min(TrailZ, BitWidth);
1863 LeadZ = std::min(LeadZ, BitWidth);
1864 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1865 APInt::getHighBitsSet(BitWidth, LeadZ);
1869 // For the purposes of computing leading zeros we can conservatively
1870 // treat a udiv as a logical right shift by the power of 2 known to
1871 // be less than the denominator.
1872 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1873 unsigned LeadZ = KnownZero2.countLeadingOnes();
1875 KnownOne2.clearAllBits();
1876 KnownZero2.clearAllBits();
1877 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1878 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1879 if (RHSUnknownLeadingOnes != BitWidth)
1880 LeadZ = std::min(BitWidth,
1881 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1883 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1887 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1888 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1889 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1890 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1892 // Only known if known in both the LHS and RHS.
1893 KnownOne &= KnownOne2;
1894 KnownZero &= KnownZero2;
1896 case ISD::SELECT_CC:
1897 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1898 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1899 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1900 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1902 // Only known if known in both the LHS and RHS.
1903 KnownOne &= KnownOne2;
1904 KnownZero &= KnownZero2;
1912 if (Op.getResNo() != 1)
1914 // The boolean result conforms to getBooleanContents. Fall through.
1916 // If we know the result of a setcc has the top bits zero, use this info.
1917 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
1918 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1919 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1922 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1923 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1924 unsigned ShAmt = SA->getZExtValue();
1926 // If the shift count is an invalid immediate, don't do anything.
1927 if (ShAmt >= BitWidth)
1930 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1931 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1932 KnownZero <<= ShAmt;
1934 // low bits known zero.
1935 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1939 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1940 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1941 unsigned ShAmt = SA->getZExtValue();
1943 // If the shift count is an invalid immediate, don't do anything.
1944 if (ShAmt >= BitWidth)
1947 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1948 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1949 KnownZero = KnownZero.lshr(ShAmt);
1950 KnownOne = KnownOne.lshr(ShAmt);
1952 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1953 KnownZero |= HighBits; // High bits known zero.
1957 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1958 unsigned ShAmt = SA->getZExtValue();
1960 // If the shift count is an invalid immediate, don't do anything.
1961 if (ShAmt >= BitWidth)
1964 // If any of the demanded bits are produced by the sign extension, we also
1965 // demand the input sign bit.
1966 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1968 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1969 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1970 KnownZero = KnownZero.lshr(ShAmt);
1971 KnownOne = KnownOne.lshr(ShAmt);
1973 // Handle the sign bits.
1974 APInt SignBit = APInt::getSignBit(BitWidth);
1975 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1977 if (KnownZero.intersects(SignBit)) {
1978 KnownZero |= HighBits; // New bits are known zero.
1979 } else if (KnownOne.intersects(SignBit)) {
1980 KnownOne |= HighBits; // New bits are known one.
1984 case ISD::SIGN_EXTEND_INREG: {
1985 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1986 unsigned EBits = EVT.getScalarType().getSizeInBits();
1988 // Sign extension. Compute the demanded bits in the result that are not
1989 // present in the input.
1990 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1992 APInt InSignBit = APInt::getSignBit(EBits);
1993 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
1995 // If the sign extended bits are demanded, we know that the sign
1997 InSignBit = InSignBit.zext(BitWidth);
1998 if (NewBits.getBoolValue())
1999 InputDemandedBits |= InSignBit;
2001 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2002 KnownOne &= InputDemandedBits;
2003 KnownZero &= InputDemandedBits;
2004 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2006 // If the sign bit of the input is known set or clear, then we know the
2007 // top bits of the result.
2008 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
2009 KnownZero |= NewBits;
2010 KnownOne &= ~NewBits;
2011 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
2012 KnownOne |= NewBits;
2013 KnownZero &= ~NewBits;
2014 } else { // Input sign bit unknown
2015 KnownZero &= ~NewBits;
2016 KnownOne &= ~NewBits;
2021 case ISD::CTTZ_ZERO_UNDEF:
2023 case ISD::CTLZ_ZERO_UNDEF:
2025 unsigned LowBits = Log2_32(BitWidth)+1;
2026 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2027 KnownOne.clearAllBits();
2031 LoadSDNode *LD = cast<LoadSDNode>(Op);
2032 // If this is a ZEXTLoad and we are looking at the loaded value.
2033 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2034 EVT VT = LD->getMemoryVT();
2035 unsigned MemBits = VT.getScalarType().getSizeInBits();
2036 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2037 } else if (const MDNode *Ranges = LD->getRanges()) {
2038 computeMaskedBitsLoad(*Ranges, KnownZero);
2042 case ISD::ZERO_EXTEND: {
2043 EVT InVT = Op.getOperand(0).getValueType();
2044 unsigned InBits = InVT.getScalarType().getSizeInBits();
2045 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2046 KnownZero = KnownZero.trunc(InBits);
2047 KnownOne = KnownOne.trunc(InBits);
2048 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2049 KnownZero = KnownZero.zext(BitWidth);
2050 KnownOne = KnownOne.zext(BitWidth);
2051 KnownZero |= NewBits;
2054 case ISD::SIGN_EXTEND: {
2055 EVT InVT = Op.getOperand(0).getValueType();
2056 unsigned InBits = InVT.getScalarType().getSizeInBits();
2057 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2059 KnownZero = KnownZero.trunc(InBits);
2060 KnownOne = KnownOne.trunc(InBits);
2061 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2063 // Note if the sign bit is known to be zero or one.
2064 bool SignBitKnownZero = KnownZero.isNegative();
2065 bool SignBitKnownOne = KnownOne.isNegative();
2066 assert(!(SignBitKnownZero && SignBitKnownOne) &&
2067 "Sign bit can't be known to be both zero and one!");
2069 KnownZero = KnownZero.zext(BitWidth);
2070 KnownOne = KnownOne.zext(BitWidth);
2072 // If the sign bit is known zero or one, the top bits match.
2073 if (SignBitKnownZero)
2074 KnownZero |= NewBits;
2075 else if (SignBitKnownOne)
2076 KnownOne |= NewBits;
2079 case ISD::ANY_EXTEND: {
2080 EVT InVT = Op.getOperand(0).getValueType();
2081 unsigned InBits = InVT.getScalarType().getSizeInBits();
2082 KnownZero = KnownZero.trunc(InBits);
2083 KnownOne = KnownOne.trunc(InBits);
2084 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2085 KnownZero = KnownZero.zext(BitWidth);
2086 KnownOne = KnownOne.zext(BitWidth);
2089 case ISD::TRUNCATE: {
2090 EVT InVT = Op.getOperand(0).getValueType();
2091 unsigned InBits = InVT.getScalarType().getSizeInBits();
2092 KnownZero = KnownZero.zext(InBits);
2093 KnownOne = KnownOne.zext(InBits);
2094 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2095 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2096 KnownZero = KnownZero.trunc(BitWidth);
2097 KnownOne = KnownOne.trunc(BitWidth);
2100 case ISD::AssertZext: {
2101 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2102 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2103 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2104 KnownZero |= (~InMask);
2105 KnownOne &= (~KnownZero);
2109 // All bits are zero except the low bit.
2110 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2114 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2115 // We know that the top bits of C-X are clear if X contains less bits
2116 // than C (i.e. no wrap-around can happen). For example, 20-X is
2117 // positive if we can prove that X is >= 0 and < 16.
2118 if (CLHS->getAPIntValue().isNonNegative()) {
2119 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2120 // NLZ can't be BitWidth with no sign bit
2121 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2122 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2124 // If all of the MaskV bits are known to be zero, then we know the
2125 // output top bits are zero, because we now know that the output is
2127 if ((KnownZero2 & MaskV) == MaskV) {
2128 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2129 // Top bits known zero.
2130 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2138 // Output known-0 bits are known if clear or set in both the low clear bits
2139 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2140 // low 3 bits clear.
2141 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2142 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2143 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2145 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2146 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2147 KnownZeroOut = std::min(KnownZeroOut,
2148 KnownZero2.countTrailingOnes());
2150 if (Op.getOpcode() == ISD::ADD) {
2151 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2155 // With ADDE, a carry bit may be added in, so we can only use this
2156 // information if we know (at least) that the low two bits are clear. We
2157 // then return to the caller that the low bit is unknown but that other bits
2159 if (KnownZeroOut >= 2) // ADDE
2160 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2164 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2165 const APInt &RA = Rem->getAPIntValue().abs();
2166 if (RA.isPowerOf2()) {
2167 APInt LowBits = RA - 1;
2168 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2170 // The low bits of the first operand are unchanged by the srem.
2171 KnownZero = KnownZero2 & LowBits;
2172 KnownOne = KnownOne2 & LowBits;
2174 // If the first operand is non-negative or has all low bits zero, then
2175 // the upper bits are all zero.
2176 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2177 KnownZero |= ~LowBits;
2179 // If the first operand is negative and not all low bits are zero, then
2180 // the upper bits are all one.
2181 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2182 KnownOne |= ~LowBits;
2183 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2188 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2189 const APInt &RA = Rem->getAPIntValue();
2190 if (RA.isPowerOf2()) {
2191 APInt LowBits = (RA - 1);
2192 KnownZero |= ~LowBits;
2193 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2194 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2199 // Since the result is less than or equal to either operand, any leading
2200 // zero bits in either operand must also exist in the result.
2201 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2202 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2204 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2205 KnownZero2.countLeadingOnes());
2206 KnownOne.clearAllBits();
2207 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2210 case ISD::FrameIndex:
2211 case ISD::TargetFrameIndex:
2212 if (unsigned Align = InferPtrAlignment(Op)) {
2213 // The low bits are known zero if the pointer is aligned.
2214 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2220 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2223 case ISD::INTRINSIC_WO_CHAIN:
2224 case ISD::INTRINSIC_W_CHAIN:
2225 case ISD::INTRINSIC_VOID:
2226 // Allow the target to implement this method for its nodes.
2227 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2232 /// ComputeNumSignBits - Return the number of times the sign bit of the
2233 /// register is replicated into the other bits. We know that at least 1 bit
2234 /// is always equal to the sign bit (itself), but other cases can give us
2235 /// information. For example, immediately after an "SRA X, 2", we know that
2236 /// the top 3 bits are all equal to each other, so we return 3.
2237 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2238 const TargetLowering *TLI = TM.getTargetLowering();
2239 EVT VT = Op.getValueType();
2240 assert(VT.isInteger() && "Invalid VT!");
2241 unsigned VTBits = VT.getScalarType().getSizeInBits();
2243 unsigned FirstAnswer = 1;
2246 return 1; // Limit search depth.
2248 switch (Op.getOpcode()) {
2250 case ISD::AssertSext:
2251 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2252 return VTBits-Tmp+1;
2253 case ISD::AssertZext:
2254 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2257 case ISD::Constant: {
2258 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2259 return Val.getNumSignBits();
2262 case ISD::SIGN_EXTEND:
2264 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2265 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2267 case ISD::SIGN_EXTEND_INREG:
2268 // Max of the input and what this extends.
2270 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2273 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2274 return std::max(Tmp, Tmp2);
2277 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2278 // SRA X, C -> adds C sign bits.
2279 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2280 Tmp += C->getZExtValue();
2281 if (Tmp > VTBits) Tmp = VTBits;
2285 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2286 // shl destroys sign bits.
2287 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2288 if (C->getZExtValue() >= VTBits || // Bad shift.
2289 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2290 return Tmp - C->getZExtValue();
2295 case ISD::XOR: // NOT is handled here.
2296 // Logical binary ops preserve the number of sign bits at the worst.
2297 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2299 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2300 FirstAnswer = std::min(Tmp, Tmp2);
2301 // We computed what we know about the sign bits as our first
2302 // answer. Now proceed to the generic code that uses
2303 // ComputeMaskedBits, and pick whichever answer is better.
2308 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2309 if (Tmp == 1) return 1; // Early out.
2310 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2311 return std::min(Tmp, Tmp2);
2319 if (Op.getResNo() != 1)
2321 // The boolean result conforms to getBooleanContents. Fall through.
2323 // If setcc returns 0/-1, all bits are sign bits.
2324 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
2325 TargetLowering::ZeroOrNegativeOneBooleanContent)
2330 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2331 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2333 // Handle rotate right by N like a rotate left by 32-N.
2334 if (Op.getOpcode() == ISD::ROTR)
2335 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2337 // If we aren't rotating out all of the known-in sign bits, return the
2338 // number that are left. This handles rotl(sext(x), 1) for example.
2339 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2340 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2344 // Add can have at most one carry bit. Thus we know that the output
2345 // is, at worst, one more bit than the inputs.
2346 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2347 if (Tmp == 1) return 1; // Early out.
2349 // Special case decrementing a value (ADD X, -1):
2350 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2351 if (CRHS->isAllOnesValue()) {
2352 APInt KnownZero, KnownOne;
2353 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2355 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2357 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2360 // If we are subtracting one from a positive number, there is no carry
2361 // out of the result.
2362 if (KnownZero.isNegative())
2366 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2367 if (Tmp2 == 1) return 1;
2368 return std::min(Tmp, Tmp2)-1;
2371 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2372 if (Tmp2 == 1) return 1;
2375 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2376 if (CLHS->isNullValue()) {
2377 APInt KnownZero, KnownOne;
2378 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2379 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2381 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2384 // If the input is known to be positive (the sign bit is known clear),
2385 // the output of the NEG has the same number of sign bits as the input.
2386 if (KnownZero.isNegative())
2389 // Otherwise, we treat this like a SUB.
2392 // Sub can have at most one carry bit. Thus we know that the output
2393 // is, at worst, one more bit than the inputs.
2394 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2395 if (Tmp == 1) return 1; // Early out.
2396 return std::min(Tmp, Tmp2)-1;
2398 // FIXME: it's tricky to do anything useful for this, but it is an important
2399 // case for targets like X86.
2403 // If we are looking at the loaded value of the SDNode.
2404 if (Op.getResNo() == 0) {
2405 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2406 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2407 unsigned ExtType = LD->getExtensionType();
2410 case ISD::SEXTLOAD: // '17' bits known
2411 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2412 return VTBits-Tmp+1;
2413 case ISD::ZEXTLOAD: // '16' bits known
2414 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2420 // Allow the target to implement this method for its nodes.
2421 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2422 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2423 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2424 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2425 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth);
2426 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2429 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2430 // use this information.
2431 APInt KnownZero, KnownOne;
2432 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2435 if (KnownZero.isNegative()) { // sign bit is 0
2437 } else if (KnownOne.isNegative()) { // sign bit is 1;
2444 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2445 // the number of identical bits in the top of the input value.
2447 Mask <<= Mask.getBitWidth()-VTBits;
2448 // Return # leading zeros. We use 'min' here in case Val was zero before
2449 // shifting. We don't want to return '64' as for an i32 "0".
2450 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2453 /// isBaseWithConstantOffset - Return true if the specified operand is an
2454 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2455 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2456 /// semantics as an ADD. This handles the equivalence:
2457 /// X|Cst == X+Cst iff X&Cst = 0.
2458 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2459 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2460 !isa<ConstantSDNode>(Op.getOperand(1)))
2463 if (Op.getOpcode() == ISD::OR &&
2464 !MaskedValueIsZero(Op.getOperand(0),
2465 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2472 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2473 // If we're told that NaNs won't happen, assume they won't.
2474 if (getTarget().Options.NoNaNsFPMath)
2477 // If the value is a constant, we can obviously see if it is a NaN or not.
2478 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2479 return !C->getValueAPF().isNaN();
2481 // TODO: Recognize more cases here.
2486 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2487 // If the value is a constant, we can obviously see if it is a zero or not.
2488 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2489 return !C->isZero();
2491 // TODO: Recognize more cases here.
2492 switch (Op.getOpcode()) {
2495 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2496 return !C->isNullValue();
2503 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2504 // Check the obvious case.
2505 if (A == B) return true;
2507 // For for negative and positive zero.
2508 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2509 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2510 if (CA->isZero() && CB->isZero()) return true;
2512 // Otherwise they may not be equal.
2516 /// getNode - Gets or creates the specified node.
2518 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2519 FoldingSetNodeID ID;
2520 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2522 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2523 return SDValue(E, 0);
2525 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2526 DL.getDebugLoc(), getVTList(VT));
2527 CSEMap.InsertNode(N, IP);
2529 AllNodes.push_back(N);
2533 return SDValue(N, 0);
2536 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2537 EVT VT, SDValue Operand) {
2538 // Constant fold unary operations with an integer constant operand.
2539 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2540 const APInt &Val = C->getAPIntValue();
2543 case ISD::SIGN_EXTEND:
2544 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
2545 case ISD::ANY_EXTEND:
2546 case ISD::ZERO_EXTEND:
2548 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
2549 case ISD::UINT_TO_FP:
2550 case ISD::SINT_TO_FP: {
2551 APFloat apf(EVTToAPFloatSemantics(VT),
2552 APInt::getNullValue(VT.getSizeInBits()));
2553 (void)apf.convertFromAPInt(Val,
2554 Opcode==ISD::SINT_TO_FP,
2555 APFloat::rmNearestTiesToEven);
2556 return getConstantFP(apf, VT);
2559 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2560 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2561 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2562 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2565 return getConstant(Val.byteSwap(), VT);
2567 return getConstant(Val.countPopulation(), VT);
2569 case ISD::CTLZ_ZERO_UNDEF:
2570 return getConstant(Val.countLeadingZeros(), VT);
2572 case ISD::CTTZ_ZERO_UNDEF:
2573 return getConstant(Val.countTrailingZeros(), VT);
2577 // Constant fold unary operations with a floating point constant operand.
2578 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2579 APFloat V = C->getValueAPF(); // make copy
2583 return getConstantFP(V, VT);
2586 return getConstantFP(V, VT);
2588 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2589 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2590 return getConstantFP(V, VT);
2594 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2595 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2596 return getConstantFP(V, VT);
2600 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2601 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2602 return getConstantFP(V, VT);
2605 case ISD::FP_EXTEND: {
2607 // This can return overflow, underflow, or inexact; we don't care.
2608 // FIXME need to be more flexible about rounding mode.
2609 (void)V.convert(EVTToAPFloatSemantics(VT),
2610 APFloat::rmNearestTiesToEven, &ignored);
2611 return getConstantFP(V, VT);
2613 case ISD::FP_TO_SINT:
2614 case ISD::FP_TO_UINT: {
2617 assert(integerPartWidth >= 64);
2618 // FIXME need to be more flexible about rounding mode.
2619 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2620 Opcode==ISD::FP_TO_SINT,
2621 APFloat::rmTowardZero, &ignored);
2622 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2624 APInt api(VT.getSizeInBits(), x);
2625 return getConstant(api, VT);
2628 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2629 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2630 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2631 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2636 unsigned OpOpcode = Operand.getNode()->getOpcode();
2638 case ISD::TokenFactor:
2639 case ISD::MERGE_VALUES:
2640 case ISD::CONCAT_VECTORS:
2641 return Operand; // Factor, merge or concat of one node? No need.
2642 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2643 case ISD::FP_EXTEND:
2644 assert(VT.isFloatingPoint() &&
2645 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2646 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2647 assert((!VT.isVector() ||
2648 VT.getVectorNumElements() ==
2649 Operand.getValueType().getVectorNumElements()) &&
2650 "Vector element count mismatch!");
2651 if (Operand.getOpcode() == ISD::UNDEF)
2652 return getUNDEF(VT);
2654 case ISD::SIGN_EXTEND:
2655 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2656 "Invalid SIGN_EXTEND!");
2657 if (Operand.getValueType() == VT) return Operand; // noop extension
2658 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2659 "Invalid sext node, dst < src!");
2660 assert((!VT.isVector() ||
2661 VT.getVectorNumElements() ==
2662 Operand.getValueType().getVectorNumElements()) &&
2663 "Vector element count mismatch!");
2664 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2665 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2666 else if (OpOpcode == ISD::UNDEF)
2667 // sext(undef) = 0, because the top bits will all be the same.
2668 return getConstant(0, VT);
2670 case ISD::ZERO_EXTEND:
2671 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2672 "Invalid ZERO_EXTEND!");
2673 if (Operand.getValueType() == VT) return Operand; // noop extension
2674 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2675 "Invalid zext node, dst < src!");
2676 assert((!VT.isVector() ||
2677 VT.getVectorNumElements() ==
2678 Operand.getValueType().getVectorNumElements()) &&
2679 "Vector element count mismatch!");
2680 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2681 return getNode(ISD::ZERO_EXTEND, DL, VT,
2682 Operand.getNode()->getOperand(0));
2683 else if (OpOpcode == ISD::UNDEF)
2684 // zext(undef) = 0, because the top bits will be zero.
2685 return getConstant(0, VT);
2687 case ISD::ANY_EXTEND:
2688 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2689 "Invalid ANY_EXTEND!");
2690 if (Operand.getValueType() == VT) return Operand; // noop extension
2691 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2692 "Invalid anyext node, dst < src!");
2693 assert((!VT.isVector() ||
2694 VT.getVectorNumElements() ==
2695 Operand.getValueType().getVectorNumElements()) &&
2696 "Vector element count mismatch!");
2698 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2699 OpOpcode == ISD::ANY_EXTEND)
2700 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2701 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2702 else if (OpOpcode == ISD::UNDEF)
2703 return getUNDEF(VT);
2705 // (ext (trunx x)) -> x
2706 if (OpOpcode == ISD::TRUNCATE) {
2707 SDValue OpOp = Operand.getNode()->getOperand(0);
2708 if (OpOp.getValueType() == VT)
2713 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2714 "Invalid TRUNCATE!");
2715 if (Operand.getValueType() == VT) return Operand; // noop truncate
2716 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2717 "Invalid truncate node, src < dst!");
2718 assert((!VT.isVector() ||
2719 VT.getVectorNumElements() ==
2720 Operand.getValueType().getVectorNumElements()) &&
2721 "Vector element count mismatch!");
2722 if (OpOpcode == ISD::TRUNCATE)
2723 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2724 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2725 OpOpcode == ISD::ANY_EXTEND) {
2726 // If the source is smaller than the dest, we still need an extend.
2727 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2728 .bitsLT(VT.getScalarType()))
2729 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2730 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2731 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2732 return Operand.getNode()->getOperand(0);
2734 if (OpOpcode == ISD::UNDEF)
2735 return getUNDEF(VT);
2738 // Basic sanity checking.
2739 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2740 && "Cannot BITCAST between types of different sizes!");
2741 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2742 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2743 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2744 if (OpOpcode == ISD::UNDEF)
2745 return getUNDEF(VT);
2747 case ISD::SCALAR_TO_VECTOR:
2748 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2749 (VT.getVectorElementType() == Operand.getValueType() ||
2750 (VT.getVectorElementType().isInteger() &&
2751 Operand.getValueType().isInteger() &&
2752 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2753 "Illegal SCALAR_TO_VECTOR node!");
2754 if (OpOpcode == ISD::UNDEF)
2755 return getUNDEF(VT);
2756 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2757 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2758 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2759 Operand.getConstantOperandVal(1) == 0 &&
2760 Operand.getOperand(0).getValueType() == VT)
2761 return Operand.getOperand(0);
2764 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2765 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2766 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2767 Operand.getNode()->getOperand(0));
2768 if (OpOpcode == ISD::FNEG) // --X -> X
2769 return Operand.getNode()->getOperand(0);
2772 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2773 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2778 SDVTList VTs = getVTList(VT);
2779 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2780 FoldingSetNodeID ID;
2781 SDValue Ops[1] = { Operand };
2782 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2784 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2785 return SDValue(E, 0);
2787 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2788 DL.getDebugLoc(), VTs, Operand);
2789 CSEMap.InsertNode(N, IP);
2791 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2792 DL.getDebugLoc(), VTs, Operand);
2795 AllNodes.push_back(N);
2799 return SDValue(N, 0);
2802 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2803 SDNode *Cst1, SDNode *Cst2) {
2804 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2805 SmallVector<SDValue, 4> Outputs;
2806 EVT SVT = VT.getScalarType();
2808 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2809 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2810 if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
2813 if (Scalar1 && Scalar2)
2814 // Scalar instruction.
2815 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2817 // For vectors extract each constant element into Inputs so we can constant
2818 // fold them individually.
2819 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2820 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2824 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2826 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2827 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2828 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2829 if (!V1 || !V2) // Not a constant, bail.
2832 if (V1->isOpaque() || V2->isOpaque())
2835 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2836 // FIXME: This is valid and could be handled by truncating the APInts.
2837 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2840 Inputs.push_back(std::make_pair(V1, V2));
2844 // We have a number of constant values, constant fold them element by element.
2845 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2846 const APInt &C1 = Inputs[I].first->getAPIntValue();
2847 const APInt &C2 = Inputs[I].second->getAPIntValue();
2851 Outputs.push_back(getConstant(C1 + C2, SVT));
2854 Outputs.push_back(getConstant(C1 - C2, SVT));
2857 Outputs.push_back(getConstant(C1 * C2, SVT));
2860 if (!C2.getBoolValue())
2862 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2865 if (!C2.getBoolValue())
2867 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2870 if (!C2.getBoolValue())
2872 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2875 if (!C2.getBoolValue())
2877 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2880 Outputs.push_back(getConstant(C1 & C2, SVT));
2883 Outputs.push_back(getConstant(C1 | C2, SVT));
2886 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2889 Outputs.push_back(getConstant(C1 << C2, SVT));
2892 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2895 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2898 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2901 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2908 // Handle the scalar case first.
2909 if (Scalar1 && Scalar2)
2910 return Outputs.back();
2912 // Otherwise build a big vector out of the scalar elements we generated.
2913 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(),
2917 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
2919 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2920 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2923 case ISD::TokenFactor:
2924 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2925 N2.getValueType() == MVT::Other && "Invalid token factor!");
2926 // Fold trivial token factors.
2927 if (N1.getOpcode() == ISD::EntryToken) return N2;
2928 if (N2.getOpcode() == ISD::EntryToken) return N1;
2929 if (N1 == N2) return N1;
2931 case ISD::CONCAT_VECTORS:
2932 // Concat of UNDEFs is UNDEF.
2933 if (N1.getOpcode() == ISD::UNDEF &&
2934 N2.getOpcode() == ISD::UNDEF)
2935 return getUNDEF(VT);
2937 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2938 // one big BUILD_VECTOR.
2939 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2940 N2.getOpcode() == ISD::BUILD_VECTOR) {
2941 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2942 N1.getNode()->op_end());
2943 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2944 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2948 assert(VT.isInteger() && "This operator does not apply to FP types!");
2949 assert(N1.getValueType() == N2.getValueType() &&
2950 N1.getValueType() == VT && "Binary operator types must match!");
2951 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2952 // worth handling here.
2953 if (N2C && N2C->isNullValue())
2955 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2962 assert(VT.isInteger() && "This operator does not apply to FP types!");
2963 assert(N1.getValueType() == N2.getValueType() &&
2964 N1.getValueType() == VT && "Binary operator types must match!");
2965 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2966 // it's worth handling here.
2967 if (N2C && N2C->isNullValue())
2977 assert(VT.isInteger() && "This operator does not apply to FP types!");
2978 assert(N1.getValueType() == N2.getValueType() &&
2979 N1.getValueType() == VT && "Binary operator types must match!");
2986 if (getTarget().Options.UnsafeFPMath) {
2987 if (Opcode == ISD::FADD) {
2989 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2990 if (CFP->getValueAPF().isZero())
2993 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2994 if (CFP->getValueAPF().isZero())
2996 } else if (Opcode == ISD::FSUB) {
2998 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2999 if (CFP->getValueAPF().isZero())
3001 } else if (Opcode == ISD::FMUL) {
3002 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
3005 // If the first operand isn't the constant, try the second
3007 CFP = dyn_cast<ConstantFPSDNode>(N2);
3014 return SDValue(CFP,0);
3016 if (CFP->isExactlyValue(1.0))
3021 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3022 assert(N1.getValueType() == N2.getValueType() &&
3023 N1.getValueType() == VT && "Binary operator types must match!");
3025 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
3026 assert(N1.getValueType() == VT &&
3027 N1.getValueType().isFloatingPoint() &&
3028 N2.getValueType().isFloatingPoint() &&
3029 "Invalid FCOPYSIGN!");
3036 assert(VT == N1.getValueType() &&
3037 "Shift operators return type must be the same as their first arg");
3038 assert(VT.isInteger() && N2.getValueType().isInteger() &&
3039 "Shifts only work on integers");
3040 assert((!VT.isVector() || VT == N2.getValueType()) &&
3041 "Vector shift amounts must be in the same as their first arg");
3042 // Verify that the shift amount VT is bit enough to hold valid shift
3043 // amounts. This catches things like trying to shift an i1024 value by an
3044 // i8, which is easy to fall into in generic code that uses
3045 // TLI.getShiftAmount().
3046 assert(N2.getValueType().getSizeInBits() >=
3047 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3048 "Invalid use of small shift amount with oversized value!");
3050 // Always fold shifts of i1 values so the code generator doesn't need to
3051 // handle them. Since we know the size of the shift has to be less than the
3052 // size of the value, the shift/rotate count is guaranteed to be zero.
3055 if (N2C && N2C->isNullValue())
3058 case ISD::FP_ROUND_INREG: {
3059 EVT EVT = cast<VTSDNode>(N2)->getVT();
3060 assert(VT == N1.getValueType() && "Not an inreg round!");
3061 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3062 "Cannot FP_ROUND_INREG integer types");
3063 assert(EVT.isVector() == VT.isVector() &&
3064 "FP_ROUND_INREG type should be vector iff the operand "
3066 assert((!EVT.isVector() ||
3067 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3068 "Vector element counts must match in FP_ROUND_INREG");
3069 assert(EVT.bitsLE(VT) && "Not rounding down!");
3071 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3075 assert(VT.isFloatingPoint() &&
3076 N1.getValueType().isFloatingPoint() &&
3077 VT.bitsLE(N1.getValueType()) &&
3078 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
3079 if (N1.getValueType() == VT) return N1; // noop conversion.
3081 case ISD::AssertSext:
3082 case ISD::AssertZext: {
3083 EVT EVT = cast<VTSDNode>(N2)->getVT();
3084 assert(VT == N1.getValueType() && "Not an inreg extend!");
3085 assert(VT.isInteger() && EVT.isInteger() &&
3086 "Cannot *_EXTEND_INREG FP types");
3087 assert(!EVT.isVector() &&
3088 "AssertSExt/AssertZExt type should be the vector element type "
3089 "rather than the vector type!");
3090 assert(EVT.bitsLE(VT) && "Not extending!");
3091 if (VT == EVT) return N1; // noop assertion.
3094 case ISD::SIGN_EXTEND_INREG: {
3095 EVT EVT = cast<VTSDNode>(N2)->getVT();
3096 assert(VT == N1.getValueType() && "Not an inreg extend!");
3097 assert(VT.isInteger() && EVT.isInteger() &&
3098 "Cannot *_EXTEND_INREG FP types");
3099 assert(EVT.isVector() == VT.isVector() &&
3100 "SIGN_EXTEND_INREG type should be vector iff the operand "
3102 assert((!EVT.isVector() ||
3103 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3104 "Vector element counts must match in SIGN_EXTEND_INREG");
3105 assert(EVT.bitsLE(VT) && "Not extending!");
3106 if (EVT == VT) return N1; // Not actually extending
3109 APInt Val = N1C->getAPIntValue();
3110 unsigned FromBits = EVT.getScalarType().getSizeInBits();
3111 Val <<= Val.getBitWidth()-FromBits;
3112 Val = Val.ashr(Val.getBitWidth()-FromBits);
3113 return getConstant(Val, VT);
3117 case ISD::EXTRACT_VECTOR_ELT:
3118 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3119 if (N1.getOpcode() == ISD::UNDEF)
3120 return getUNDEF(VT);
3122 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3123 // expanding copies of large vectors from registers.
3125 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3126 N1.getNumOperands() > 0) {
3128 N1.getOperand(0).getValueType().getVectorNumElements();
3129 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3130 N1.getOperand(N2C->getZExtValue() / Factor),
3131 getConstant(N2C->getZExtValue() % Factor,
3132 N2.getValueType()));
3135 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3136 // expanding large vector constants.
3137 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3138 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3140 if (VT != Elt.getValueType())
3141 // If the vector element type is not legal, the BUILD_VECTOR operands
3142 // are promoted and implicitly truncated, and the result implicitly
3143 // extended. Make that explicit here.
3144 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3149 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3150 // operations are lowered to scalars.
3151 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3152 // If the indices are the same, return the inserted element else
3153 // if the indices are known different, extract the element from
3154 // the original vector.
3155 SDValue N1Op2 = N1.getOperand(2);
3156 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3158 if (N1Op2C && N2C) {
3159 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3160 if (VT == N1.getOperand(1).getValueType())
3161 return N1.getOperand(1);
3163 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3166 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3170 case ISD::EXTRACT_ELEMENT:
3171 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3172 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3173 (N1.getValueType().isInteger() == VT.isInteger()) &&
3174 N1.getValueType() != VT &&
3175 "Wrong types for EXTRACT_ELEMENT!");
3177 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3178 // 64-bit integers into 32-bit parts. Instead of building the extract of
3179 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3180 if (N1.getOpcode() == ISD::BUILD_PAIR)
3181 return N1.getOperand(N2C->getZExtValue());
3183 // EXTRACT_ELEMENT of a constant int is also very common.
3184 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3185 unsigned ElementSize = VT.getSizeInBits();
3186 unsigned Shift = ElementSize * N2C->getZExtValue();
3187 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3188 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3191 case ISD::EXTRACT_SUBVECTOR: {
3193 if (VT.isSimple() && N1.getValueType().isSimple()) {
3194 assert(VT.isVector() && N1.getValueType().isVector() &&
3195 "Extract subvector VTs must be a vectors!");
3196 assert(VT.getVectorElementType() ==
3197 N1.getValueType().getVectorElementType() &&
3198 "Extract subvector VTs must have the same element type!");
3199 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3200 "Extract subvector must be from larger vector to smaller vector!");
3202 if (isa<ConstantSDNode>(Index.getNode())) {
3203 assert((VT.getVectorNumElements() +
3204 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3205 <= N1.getValueType().getVectorNumElements())
3206 && "Extract subvector overflow!");
3209 // Trivial extraction.
3210 if (VT.getSimpleVT() == N1.getSimpleValueType())
3217 // Perform trivial constant folding.
3218 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3219 if (SV.getNode()) return SV;
3221 // Canonicalize constant to RHS if commutative.
3222 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3223 std::swap(N1C, N2C);
3227 // Constant fold FP operations.
3228 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3229 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3231 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3232 // Canonicalize constant to RHS if commutative.
3233 std::swap(N1CFP, N2CFP);
3236 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3237 APFloat::opStatus s;
3240 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3241 if (s != APFloat::opInvalidOp)
3242 return getConstantFP(V1, VT);
3245 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3246 if (s!=APFloat::opInvalidOp)
3247 return getConstantFP(V1, VT);
3250 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3251 if (s!=APFloat::opInvalidOp)
3252 return getConstantFP(V1, VT);
3255 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3256 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3257 return getConstantFP(V1, VT);
3260 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3261 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3262 return getConstantFP(V1, VT);
3264 case ISD::FCOPYSIGN:
3266 return getConstantFP(V1, VT);
3271 if (Opcode == ISD::FP_ROUND) {
3272 APFloat V = N1CFP->getValueAPF(); // make copy
3274 // This can return overflow, underflow, or inexact; we don't care.
3275 // FIXME need to be more flexible about rounding mode.
3276 (void)V.convert(EVTToAPFloatSemantics(VT),
3277 APFloat::rmNearestTiesToEven, &ignored);
3278 return getConstantFP(V, VT);
3282 // Canonicalize an UNDEF to the RHS, even over a constant.
3283 if (N1.getOpcode() == ISD::UNDEF) {
3284 if (isCommutativeBinOp(Opcode)) {
3288 case ISD::FP_ROUND_INREG:
3289 case ISD::SIGN_EXTEND_INREG:
3295 return N1; // fold op(undef, arg2) -> undef
3303 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3304 // For vectors, we can't easily build an all zero vector, just return
3311 // Fold a bunch of operators when the RHS is undef.
3312 if (N2.getOpcode() == ISD::UNDEF) {
3315 if (N1.getOpcode() == ISD::UNDEF)
3316 // Handle undef ^ undef -> 0 special case. This is a common
3318 return getConstant(0, VT);
3328 return N2; // fold op(arg1, undef) -> undef
3334 if (getTarget().Options.UnsafeFPMath)
3342 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3343 // For vectors, we can't easily build an all zero vector, just return
3348 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3349 // For vectors, we can't easily build an all one vector, just return
3357 // Memoize this node if possible.
3359 SDVTList VTs = getVTList(VT);
3360 if (VT != MVT::Glue) {
3361 SDValue Ops[] = { N1, N2 };
3362 FoldingSetNodeID ID;
3363 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3365 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3366 return SDValue(E, 0);
3368 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3369 DL.getDebugLoc(), VTs, N1, N2);
3370 CSEMap.InsertNode(N, IP);
3372 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3373 DL.getDebugLoc(), VTs, N1, N2);
3376 AllNodes.push_back(N);
3380 return SDValue(N, 0);
3383 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3384 SDValue N1, SDValue N2, SDValue N3) {
3385 // Perform various simplifications.
3386 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3389 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3390 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3391 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3392 if (N1CFP && N2CFP && N3CFP) {
3393 APFloat V1 = N1CFP->getValueAPF();
3394 const APFloat &V2 = N2CFP->getValueAPF();
3395 const APFloat &V3 = N3CFP->getValueAPF();
3396 APFloat::opStatus s =
3397 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3398 if (s != APFloat::opInvalidOp)
3399 return getConstantFP(V1, VT);
3403 case ISD::CONCAT_VECTORS:
3404 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3405 // one big BUILD_VECTOR.
3406 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3407 N2.getOpcode() == ISD::BUILD_VECTOR &&
3408 N3.getOpcode() == ISD::BUILD_VECTOR) {
3409 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3410 N1.getNode()->op_end());
3411 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3412 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3413 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
3417 // Use FoldSetCC to simplify SETCC's.
3418 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3419 if (Simp.getNode()) return Simp;
3424 if (N1C->getZExtValue())
3425 return N2; // select true, X, Y -> X
3426 return N3; // select false, X, Y -> Y
3429 if (N2 == N3) return N2; // select C, X, X -> X
3431 case ISD::VECTOR_SHUFFLE:
3432 llvm_unreachable("should use getVectorShuffle constructor!");
3433 case ISD::INSERT_SUBVECTOR: {
3435 if (VT.isSimple() && N1.getValueType().isSimple()
3436 && N2.getValueType().isSimple()) {
3437 assert(VT.isVector() && N1.getValueType().isVector() &&
3438 N2.getValueType().isVector() &&
3439 "Insert subvector VTs must be a vectors");
3440 assert(VT == N1.getValueType() &&
3441 "Dest and insert subvector source types must match!");
3442 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3443 "Insert subvector must be from smaller vector to larger vector!");
3444 if (isa<ConstantSDNode>(Index.getNode())) {
3445 assert((N2.getValueType().getVectorNumElements() +
3446 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3447 <= VT.getVectorNumElements())
3448 && "Insert subvector overflow!");
3451 // Trivial insertion.
3452 if (VT.getSimpleVT() == N2.getSimpleValueType())
3458 // Fold bit_convert nodes from a type to themselves.
3459 if (N1.getValueType() == VT)
3464 // Memoize node if it doesn't produce a flag.
3466 SDVTList VTs = getVTList(VT);
3467 if (VT != MVT::Glue) {
3468 SDValue Ops[] = { N1, N2, N3 };
3469 FoldingSetNodeID ID;
3470 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3472 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3473 return SDValue(E, 0);
3475 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3476 DL.getDebugLoc(), VTs, N1, N2, N3);
3477 CSEMap.InsertNode(N, IP);
3479 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3480 DL.getDebugLoc(), VTs, N1, N2, N3);
3483 AllNodes.push_back(N);
3487 return SDValue(N, 0);
3490 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3491 SDValue N1, SDValue N2, SDValue N3,
3493 SDValue Ops[] = { N1, N2, N3, N4 };
3494 return getNode(Opcode, DL, VT, Ops, 4);
3497 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3498 SDValue N1, SDValue N2, SDValue N3,
3499 SDValue N4, SDValue N5) {
3500 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3501 return getNode(Opcode, DL, VT, Ops, 5);
3504 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3505 /// the incoming stack arguments to be loaded from the stack.
3506 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3507 SmallVector<SDValue, 8> ArgChains;
3509 // Include the original chain at the beginning of the list. When this is
3510 // used by target LowerCall hooks, this helps legalize find the
3511 // CALLSEQ_BEGIN node.
3512 ArgChains.push_back(Chain);
3514 // Add a chain value for each stack argument.
3515 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3516 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3517 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3518 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3519 if (FI->getIndex() < 0)
3520 ArgChains.push_back(SDValue(L, 1));
3522 // Build a tokenfactor for all the chains.
3523 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
3524 &ArgChains[0], ArgChains.size());
3527 /// getMemsetValue - Vectorized representation of the memset value
3529 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3531 assert(Value.getOpcode() != ISD::UNDEF);
3533 unsigned NumBits = VT.getScalarType().getSizeInBits();
3534 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3535 assert(C->getAPIntValue().getBitWidth() == 8);
3536 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3538 return DAG.getConstant(Val, VT);
3539 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3542 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3544 // Use a multiplication with 0x010101... to extend the input to the
3546 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3547 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3553 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3554 /// used when a memcpy is turned into a memset when the source is a constant
3556 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3557 const TargetLowering &TLI, StringRef Str) {
3558 // Handle vector with all elements zero.
3561 return DAG.getConstant(0, VT);
3562 else if (VT == MVT::f32 || VT == MVT::f64)
3563 return DAG.getConstantFP(0.0, VT);
3564 else if (VT.isVector()) {
3565 unsigned NumElts = VT.getVectorNumElements();
3566 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3567 return DAG.getNode(ISD::BITCAST, dl, VT,
3568 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3571 llvm_unreachable("Expected type!");
3574 assert(!VT.isVector() && "Can't handle vector type here!");
3575 unsigned NumVTBits = VT.getSizeInBits();
3576 unsigned NumVTBytes = NumVTBits / 8;
3577 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3579 APInt Val(NumVTBits, 0);
3580 if (TLI.isLittleEndian()) {
3581 for (unsigned i = 0; i != NumBytes; ++i)
3582 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3584 for (unsigned i = 0; i != NumBytes; ++i)
3585 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3588 // If the "cost" of materializing the integer immediate is less than the cost
3589 // of a load, then it is cost effective to turn the load into the immediate.
3590 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
3591 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
3592 return DAG.getConstant(Val, VT);
3593 return SDValue(0, 0);
3596 /// getMemBasePlusOffset - Returns base and offset node for the
3598 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3599 SelectionDAG &DAG) {
3600 EVT VT = Base.getValueType();
3601 return DAG.getNode(ISD::ADD, dl,
3602 VT, Base, DAG.getConstant(Offset, VT));
3605 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3607 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3608 unsigned SrcDelta = 0;
3609 GlobalAddressSDNode *G = NULL;
3610 if (Src.getOpcode() == ISD::GlobalAddress)
3611 G = cast<GlobalAddressSDNode>(Src);
3612 else if (Src.getOpcode() == ISD::ADD &&
3613 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3614 Src.getOperand(1).getOpcode() == ISD::Constant) {
3615 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3616 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3621 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3624 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3625 /// to replace the memset / memcpy. Return true if the number of memory ops
3626 /// is below the threshold. It returns the types of the sequence of
3627 /// memory ops to perform memset / memcpy by reference.
3628 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3629 unsigned Limit, uint64_t Size,
3630 unsigned DstAlign, unsigned SrcAlign,
3636 const TargetLowering &TLI) {
3637 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3638 "Expecting memcpy / memset source to meet alignment requirement!");
3639 // If 'SrcAlign' is zero, that means the memory operation does not need to
3640 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3641 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3642 // is the specified alignment of the memory operation. If it is zero, that
3643 // means it's possible to change the alignment of the destination.
3644 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3645 // not need to be loaded.
3646 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3647 IsMemset, ZeroMemset, MemcpyStrSrc,
3648 DAG.getMachineFunction());
3650 if (VT == MVT::Other) {
3652 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
3653 TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
3654 VT = TLI.getPointerTy();
3656 switch (DstAlign & 7) {
3657 case 0: VT = MVT::i64; break;
3658 case 4: VT = MVT::i32; break;
3659 case 2: VT = MVT::i16; break;
3660 default: VT = MVT::i8; break;
3665 while (!TLI.isTypeLegal(LVT))
3666 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3667 assert(LVT.isInteger());
3673 unsigned NumMemOps = 0;
3675 unsigned VTSize = VT.getSizeInBits() / 8;
3676 while (VTSize > Size) {
3677 // For now, only use non-vector load / store's for the left-over pieces.
3682 if (VT.isVector() || VT.isFloatingPoint()) {
3683 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3684 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3685 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3687 else if (NewVT == MVT::i64 &&
3688 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3689 TLI.isSafeMemOpType(MVT::f64)) {
3690 // i64 is usually not legal on 32-bit targets, but f64 may be.
3698 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3699 if (NewVT == MVT::i8)
3701 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3703 NewVTSize = NewVT.getSizeInBits() / 8;
3705 // If the new VT cannot cover all of the remaining bits, then consider
3706 // issuing a (or a pair of) unaligned and overlapping load / store.
3707 // FIXME: Only does this for 64-bit or more since we don't have proper
3708 // cost model for unaligned load / store.
3711 if (NumMemOps && AllowOverlap &&
3712 VTSize >= 8 && NewVTSize < Size &&
3713 TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
3721 if (++NumMemOps > Limit)
3724 MemOps.push_back(VT);
3731 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3732 SDValue Chain, SDValue Dst,
3733 SDValue Src, uint64_t Size,
3734 unsigned Align, bool isVol,
3736 MachinePointerInfo DstPtrInfo,
3737 MachinePointerInfo SrcPtrInfo) {
3738 // Turn a memcpy of undef to nop.
3739 if (Src.getOpcode() == ISD::UNDEF)
3742 // Expand memcpy to a series of load and store ops if the size operand falls
3743 // below a certain threshold.
3744 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3745 // rather than maybe a humongous number of loads and stores.
3746 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3747 std::vector<EVT> MemOps;
3748 bool DstAlignCanChange = false;
3749 MachineFunction &MF = DAG.getMachineFunction();
3750 MachineFrameInfo *MFI = MF.getFrameInfo();
3752 MF.getFunction()->getAttributes().
3753 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3754 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3755 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3756 DstAlignCanChange = true;
3757 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3758 if (Align > SrcAlign)
3761 bool CopyFromStr = isMemSrcFromString(Src, Str);
3762 bool isZeroStr = CopyFromStr && Str.empty();
3763 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3765 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3766 (DstAlignCanChange ? 0 : Align),
3767 (isZeroStr ? 0 : SrcAlign),
3768 false, false, CopyFromStr, true, DAG, TLI))
3771 if (DstAlignCanChange) {
3772 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3773 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3775 // Don't promote to an alignment that would require dynamic stack
3777 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3778 if (!TRI->needsStackRealignment(MF))
3779 while (NewAlign > Align &&
3780 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3783 if (NewAlign > Align) {
3784 // Give the stack frame object a larger alignment if needed.
3785 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3786 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3791 SmallVector<SDValue, 8> OutChains;
3792 unsigned NumMemOps = MemOps.size();
3793 uint64_t SrcOff = 0, DstOff = 0;
3794 for (unsigned i = 0; i != NumMemOps; ++i) {
3796 unsigned VTSize = VT.getSizeInBits() / 8;
3797 SDValue Value, Store;
3799 if (VTSize > Size) {
3800 // Issuing an unaligned load / store pair that overlaps with the previous
3801 // pair. Adjust the offset accordingly.
3802 assert(i == NumMemOps-1 && i != 0);
3803 SrcOff -= VTSize - Size;
3804 DstOff -= VTSize - Size;
3808 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3809 // It's unlikely a store of a vector immediate can be done in a single
3810 // instruction. It would require a load from a constantpool first.
3811 // We only handle zero vectors here.
3812 // FIXME: Handle other cases where store of vector immediate is done in
3813 // a single instruction.
3814 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3815 if (Value.getNode())
3816 Store = DAG.getStore(Chain, dl, Value,
3817 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3818 DstPtrInfo.getWithOffset(DstOff), isVol,
3822 if (!Store.getNode()) {
3823 // The type might not be legal for the target. This should only happen
3824 // if the type is smaller than a legal type, as on PPC, so the right
3825 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3826 // to Load/Store if NVT==VT.
3827 // FIXME does the case above also need this?
3828 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3829 assert(NVT.bitsGE(VT));
3830 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3831 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3832 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3833 MinAlign(SrcAlign, SrcOff));
3834 Store = DAG.getTruncStore(Chain, dl, Value,
3835 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3836 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3839 OutChains.push_back(Store);
3845 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3846 &OutChains[0], OutChains.size());
3849 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3850 SDValue Chain, SDValue Dst,
3851 SDValue Src, uint64_t Size,
3852 unsigned Align, bool isVol,
3854 MachinePointerInfo DstPtrInfo,
3855 MachinePointerInfo SrcPtrInfo) {
3856 // Turn a memmove of undef to nop.
3857 if (Src.getOpcode() == ISD::UNDEF)
3860 // Expand memmove to a series of load and store ops if the size operand falls
3861 // below a certain threshold.
3862 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3863 std::vector<EVT> MemOps;
3864 bool DstAlignCanChange = false;
3865 MachineFunction &MF = DAG.getMachineFunction();
3866 MachineFrameInfo *MFI = MF.getFrameInfo();
3867 bool OptSize = MF.getFunction()->getAttributes().
3868 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3869 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3870 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3871 DstAlignCanChange = true;
3872 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3873 if (Align > SrcAlign)
3875 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3877 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3878 (DstAlignCanChange ? 0 : Align), SrcAlign,
3879 false, false, false, false, DAG, TLI))
3882 if (DstAlignCanChange) {
3883 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3884 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3885 if (NewAlign > Align) {
3886 // Give the stack frame object a larger alignment if needed.
3887 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3888 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3893 uint64_t SrcOff = 0, DstOff = 0;
3894 SmallVector<SDValue, 8> LoadValues;
3895 SmallVector<SDValue, 8> LoadChains;
3896 SmallVector<SDValue, 8> OutChains;
3897 unsigned NumMemOps = MemOps.size();
3898 for (unsigned i = 0; i < NumMemOps; i++) {
3900 unsigned VTSize = VT.getSizeInBits() / 8;
3903 Value = DAG.getLoad(VT, dl, Chain,
3904 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3905 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3906 false, false, SrcAlign);
3907 LoadValues.push_back(Value);
3908 LoadChains.push_back(Value.getValue(1));
3911 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3912 &LoadChains[0], LoadChains.size());
3914 for (unsigned i = 0; i < NumMemOps; i++) {
3916 unsigned VTSize = VT.getSizeInBits() / 8;
3919 Store = DAG.getStore(Chain, dl, LoadValues[i],
3920 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3921 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3922 OutChains.push_back(Store);
3926 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3927 &OutChains[0], OutChains.size());
3930 /// \brief Lower the call to 'memset' intrinsic function into a series of store
3933 /// \param DAG Selection DAG where lowered code is placed.
3934 /// \param dl Link to corresponding IR location.
3935 /// \param Chain Control flow dependency.
3936 /// \param Dst Pointer to destination memory location.
3937 /// \param Src Value of byte to write into the memory.
3938 /// \param Size Number of bytes to write.
3939 /// \param Align Alignment of the destination in bytes.
3940 /// \param isVol True if destination is volatile.
3941 /// \param DstPtrInfo IR information on the memory pointer.
3942 /// \returns New head in the control flow, if lowering was successful, empty
3943 /// SDValue otherwise.
3945 /// The function tries to replace 'llvm.memset' intrinsic with several store
3946 /// operations and value calculation code. This is usually profitable for small
3948 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
3949 SDValue Chain, SDValue Dst,
3950 SDValue Src, uint64_t Size,
3951 unsigned Align, bool isVol,
3952 MachinePointerInfo DstPtrInfo) {
3953 // Turn a memset of undef to nop.
3954 if (Src.getOpcode() == ISD::UNDEF)
3957 // Expand memset to a series of load/store ops if the size operand
3958 // falls below a certain threshold.
3959 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3960 std::vector<EVT> MemOps;
3961 bool DstAlignCanChange = false;
3962 MachineFunction &MF = DAG.getMachineFunction();
3963 MachineFrameInfo *MFI = MF.getFrameInfo();
3964 bool OptSize = MF.getFunction()->getAttributes().
3965 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3966 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3967 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3968 DstAlignCanChange = true;
3970 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3971 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3972 Size, (DstAlignCanChange ? 0 : Align), 0,
3973 true, IsZeroVal, false, true, DAG, TLI))
3976 if (DstAlignCanChange) {
3977 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3978 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3979 if (NewAlign > Align) {
3980 // Give the stack frame object a larger alignment if needed.
3981 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3982 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3987 SmallVector<SDValue, 8> OutChains;
3988 uint64_t DstOff = 0;
3989 unsigned NumMemOps = MemOps.size();
3991 // Find the largest store and generate the bit pattern for it.
3992 EVT LargestVT = MemOps[0];
3993 for (unsigned i = 1; i < NumMemOps; i++)
3994 if (MemOps[i].bitsGT(LargestVT))
3995 LargestVT = MemOps[i];
3996 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
3998 for (unsigned i = 0; i < NumMemOps; i++) {
4000 unsigned VTSize = VT.getSizeInBits() / 8;
4001 if (VTSize > Size) {
4002 // Issuing an unaligned load / store pair that overlaps with the previous
4003 // pair. Adjust the offset accordingly.
4004 assert(i == NumMemOps-1 && i != 0);
4005 DstOff -= VTSize - Size;
4008 // If this store is smaller than the largest store see whether we can get
4009 // the smaller value for free with a truncate.
4010 SDValue Value = MemSetValue;
4011 if (VT.bitsLT(LargestVT)) {
4012 if (!LargestVT.isVector() && !VT.isVector() &&
4013 TLI.isTruncateFree(LargestVT, VT))
4014 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4016 Value = getMemsetValue(Src, VT, DAG, dl);
4018 assert(Value.getValueType() == VT && "Value with wrong type.");
4019 SDValue Store = DAG.getStore(Chain, dl, Value,
4020 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4021 DstPtrInfo.getWithOffset(DstOff),
4022 isVol, false, Align);
4023 OutChains.push_back(Store);
4024 DstOff += VT.getSizeInBits() / 8;
4028 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4029 &OutChains[0], OutChains.size());
4032 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
4033 SDValue Src, SDValue Size,
4034 unsigned Align, bool isVol, bool AlwaysInline,
4035 MachinePointerInfo DstPtrInfo,
4036 MachinePointerInfo SrcPtrInfo) {
4037 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4039 // Check to see if we should lower the memcpy to loads and stores first.
4040 // For cases within the target-specified limits, this is the best choice.
4041 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4043 // Memcpy with size zero? Just return the original chain.
4044 if (ConstantSize->isNullValue())
4047 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4048 ConstantSize->getZExtValue(),Align,
4049 isVol, false, DstPtrInfo, SrcPtrInfo);
4050 if (Result.getNode())
4054 // Then check to see if we should lower the memcpy with target-specific
4055 // code. If the target chooses to do this, this is the next best.
4057 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
4058 isVol, AlwaysInline,
4059 DstPtrInfo, SrcPtrInfo);
4060 if (Result.getNode())
4063 // If we really need inline code and the target declined to provide it,
4064 // use a (potentially long) sequence of loads and stores.
4066 assert(ConstantSize && "AlwaysInline requires a constant size!");
4067 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4068 ConstantSize->getZExtValue(), Align, isVol,
4069 true, DstPtrInfo, SrcPtrInfo);
4072 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4073 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4074 // respect volatile, so they may do things like read or write memory
4075 // beyond the given memory regions. But fixing this isn't easy, and most
4076 // people don't care.
4078 const TargetLowering *TLI = TM.getTargetLowering();
4080 // Emit a library call.
4081 TargetLowering::ArgListTy Args;
4082 TargetLowering::ArgListEntry Entry;
4083 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4084 Entry.Node = Dst; Args.push_back(Entry);
4085 Entry.Node = Src; Args.push_back(Entry);
4086 Entry.Node = Size; Args.push_back(Entry);
4087 // FIXME: pass in SDLoc
4089 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4090 false, false, false, false, 0,
4091 TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4092 /*isTailCall=*/false,
4093 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4094 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4095 TLI->getPointerTy()),
4097 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4099 return CallResult.second;
4102 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4103 SDValue Src, SDValue Size,
4104 unsigned Align, bool isVol,
4105 MachinePointerInfo DstPtrInfo,
4106 MachinePointerInfo SrcPtrInfo) {
4107 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4109 // Check to see if we should lower the memmove to loads and stores first.
4110 // For cases within the target-specified limits, this is the best choice.
4111 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4113 // Memmove with size zero? Just return the original chain.
4114 if (ConstantSize->isNullValue())
4118 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4119 ConstantSize->getZExtValue(), Align, isVol,
4120 false, DstPtrInfo, SrcPtrInfo);
4121 if (Result.getNode())
4125 // Then check to see if we should lower the memmove with target-specific
4126 // code. If the target chooses to do this, this is the next best.
4128 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4129 DstPtrInfo, SrcPtrInfo);
4130 if (Result.getNode())
4133 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4134 // not be safe. See memcpy above for more details.
4136 const TargetLowering *TLI = TM.getTargetLowering();
4138 // Emit a library call.
4139 TargetLowering::ArgListTy Args;
4140 TargetLowering::ArgListEntry Entry;
4141 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4142 Entry.Node = Dst; Args.push_back(Entry);
4143 Entry.Node = Src; Args.push_back(Entry);
4144 Entry.Node = Size; Args.push_back(Entry);
4145 // FIXME: pass in SDLoc
4147 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4148 false, false, false, false, 0,
4149 TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4150 /*isTailCall=*/false,
4151 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4152 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4153 TLI->getPointerTy()),
4155 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4157 return CallResult.second;
4160 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4161 SDValue Src, SDValue Size,
4162 unsigned Align, bool isVol,
4163 MachinePointerInfo DstPtrInfo) {
4164 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4166 // Check to see if we should lower the memset to stores first.
4167 // For cases within the target-specified limits, this is the best choice.
4168 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4170 // Memset with size zero? Just return the original chain.
4171 if (ConstantSize->isNullValue())
4175 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4176 Align, isVol, DstPtrInfo);
4178 if (Result.getNode())
4182 // Then check to see if we should lower the memset with target-specific
4183 // code. If the target chooses to do this, this is the next best.
4185 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4187 if (Result.getNode())
4190 // Emit a library call.
4191 const TargetLowering *TLI = TM.getTargetLowering();
4192 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4193 TargetLowering::ArgListTy Args;
4194 TargetLowering::ArgListEntry Entry;
4195 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4196 Args.push_back(Entry);
4197 // Extend or truncate the argument to be an i32 value for the call.
4198 if (Src.getValueType().bitsGT(MVT::i32))
4199 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4201 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4203 Entry.Ty = Type::getInt32Ty(*getContext());
4204 Entry.isSExt = true;
4205 Args.push_back(Entry);
4207 Entry.Ty = IntPtrTy;
4208 Entry.isSExt = false;
4209 Args.push_back(Entry);
4210 // FIXME: pass in SDLoc
4212 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4213 false, false, false, false, 0,
4214 TLI->getLibcallCallingConv(RTLIB::MEMSET),
4215 /*isTailCall=*/false,
4216 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4217 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4218 TLI->getPointerTy()),
4220 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4222 return CallResult.second;
4225 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4226 SDVTList VTList, SDValue* Ops, unsigned NumOps,
4227 MachineMemOperand *MMO,
4228 AtomicOrdering Ordering,
4229 SynchronizationScope SynchScope) {
4230 FoldingSetNodeID ID;
4231 ID.AddInteger(MemVT.getRawBits());
4232 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4233 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4235 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4236 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4237 return SDValue(E, 0);
4240 // Allocate the operands array for the node out of the BumpPtrAllocator, since
4241 // SDNode doesn't have access to it. This memory will be "leaked" when
4242 // the node is deallocated, but recovered when the allocator is released.
4243 // If the number of operands is less than 5 we use AtomicSDNode's internal
4245 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : 0;
4247 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4248 dl.getDebugLoc(), VTList, MemVT,
4249 Ops, DynOps, NumOps, MMO,
4250 Ordering, SynchScope);
4251 CSEMap.InsertNode(N, IP);
4252 AllNodes.push_back(N);
4253 return SDValue(N, 0);
4256 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4257 SDValue Chain, SDValue Ptr, SDValue Cmp,
4258 SDValue Swp, MachinePointerInfo PtrInfo,
4260 AtomicOrdering Ordering,
4261 SynchronizationScope SynchScope) {
4262 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4263 Alignment = getEVTAlignment(MemVT);
4265 MachineFunction &MF = getMachineFunction();
4267 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4268 // For now, atomics are considered to be volatile always.
4269 // FIXME: Volatile isn't really correct; we should keep track of atomic
4270 // orderings in the memoperand.
4271 unsigned Flags = MachineMemOperand::MOVolatile;
4272 if (Opcode != ISD::ATOMIC_STORE)
4273 Flags |= MachineMemOperand::MOLoad;
4274 if (Opcode != ISD::ATOMIC_LOAD)
4275 Flags |= MachineMemOperand::MOStore;
4277 MachineMemOperand *MMO =
4278 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4280 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4281 Ordering, SynchScope);
4284 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4286 SDValue Ptr, SDValue Cmp,
4287 SDValue Swp, MachineMemOperand *MMO,
4288 AtomicOrdering Ordering,
4289 SynchronizationScope SynchScope) {
4290 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4291 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4293 EVT VT = Cmp.getValueType();
4295 SDVTList VTs = getVTList(VT, MVT::Other);
4296 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4297 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, Ordering, SynchScope);
4300 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4302 SDValue Ptr, SDValue Val,
4303 const Value* PtrVal,
4305 AtomicOrdering Ordering,
4306 SynchronizationScope SynchScope) {
4307 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4308 Alignment = getEVTAlignment(MemVT);
4310 MachineFunction &MF = getMachineFunction();
4311 // An atomic store does not load. An atomic load does not store.
4312 // (An atomicrmw obviously both loads and stores.)
4313 // For now, atomics are considered to be volatile always, and they are
4315 // FIXME: Volatile isn't really correct; we should keep track of atomic
4316 // orderings in the memoperand.
4317 unsigned Flags = MachineMemOperand::MOVolatile;
4318 if (Opcode != ISD::ATOMIC_STORE)
4319 Flags |= MachineMemOperand::MOLoad;
4320 if (Opcode != ISD::ATOMIC_LOAD)
4321 Flags |= MachineMemOperand::MOStore;
4323 MachineMemOperand *MMO =
4324 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4325 MemVT.getStoreSize(), Alignment);
4327 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4328 Ordering, SynchScope);
4331 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4333 SDValue Ptr, SDValue Val,
4334 MachineMemOperand *MMO,
4335 AtomicOrdering Ordering,
4336 SynchronizationScope SynchScope) {
4337 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4338 Opcode == ISD::ATOMIC_LOAD_SUB ||
4339 Opcode == ISD::ATOMIC_LOAD_AND ||
4340 Opcode == ISD::ATOMIC_LOAD_OR ||
4341 Opcode == ISD::ATOMIC_LOAD_XOR ||
4342 Opcode == ISD::ATOMIC_LOAD_NAND ||
4343 Opcode == ISD::ATOMIC_LOAD_MIN ||
4344 Opcode == ISD::ATOMIC_LOAD_MAX ||
4345 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4346 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4347 Opcode == ISD::ATOMIC_SWAP ||
4348 Opcode == ISD::ATOMIC_STORE) &&
4349 "Invalid Atomic Op");
4351 EVT VT = Val.getValueType();
4353 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4354 getVTList(VT, MVT::Other);
4355 SDValue Ops[] = {Chain, Ptr, Val};
4356 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope);
4359 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4360 EVT VT, SDValue Chain,
4362 const Value* PtrVal,
4364 AtomicOrdering Ordering,
4365 SynchronizationScope SynchScope) {
4366 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4367 Alignment = getEVTAlignment(MemVT);
4369 MachineFunction &MF = getMachineFunction();
4370 // An atomic store does not load. An atomic load does not store.
4371 // (An atomicrmw obviously both loads and stores.)
4372 // For now, atomics are considered to be volatile always, and they are
4374 // FIXME: Volatile isn't really correct; we should keep track of atomic
4375 // orderings in the memoperand.
4376 unsigned Flags = MachineMemOperand::MOVolatile;
4377 if (Opcode != ISD::ATOMIC_STORE)
4378 Flags |= MachineMemOperand::MOLoad;
4379 if (Opcode != ISD::ATOMIC_LOAD)
4380 Flags |= MachineMemOperand::MOStore;
4382 MachineMemOperand *MMO =
4383 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4384 MemVT.getStoreSize(), Alignment);
4386 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
4387 Ordering, SynchScope);
4390 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4391 EVT VT, SDValue Chain,
4393 MachineMemOperand *MMO,
4394 AtomicOrdering Ordering,
4395 SynchronizationScope SynchScope) {
4396 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4398 SDVTList VTs = getVTList(VT, MVT::Other);
4399 SDValue Ops[] = {Chain, Ptr};
4400 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope);
4403 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4404 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
4409 SmallVector<EVT, 4> VTs;
4410 VTs.reserve(NumOps);
4411 for (unsigned i = 0; i < NumOps; ++i)
4412 VTs.push_back(Ops[i].getValueType());
4413 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
4418 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl,
4419 const EVT *VTs, unsigned NumVTs,
4420 const SDValue *Ops, unsigned NumOps,
4421 EVT MemVT, MachinePointerInfo PtrInfo,
4422 unsigned Align, bool Vol,
4423 bool ReadMem, bool WriteMem) {
4424 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
4425 MemVT, PtrInfo, Align, Vol,
4430 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4431 const SDValue *Ops, unsigned NumOps,
4432 EVT MemVT, MachinePointerInfo PtrInfo,
4433 unsigned Align, bool Vol,
4434 bool ReadMem, bool WriteMem) {
4435 if (Align == 0) // Ensure that codegen never sees alignment 0
4436 Align = getEVTAlignment(MemVT);
4438 MachineFunction &MF = getMachineFunction();
4441 Flags |= MachineMemOperand::MOStore;
4443 Flags |= MachineMemOperand::MOLoad;
4445 Flags |= MachineMemOperand::MOVolatile;
4446 MachineMemOperand *MMO =
4447 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4449 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
4453 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4454 const SDValue *Ops, unsigned NumOps,
4455 EVT MemVT, MachineMemOperand *MMO) {
4456 assert((Opcode == ISD::INTRINSIC_VOID ||
4457 Opcode == ISD::INTRINSIC_W_CHAIN ||
4458 Opcode == ISD::PREFETCH ||
4459 Opcode == ISD::LIFETIME_START ||
4460 Opcode == ISD::LIFETIME_END ||
4461 (Opcode <= INT_MAX &&
4462 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4463 "Opcode is not a memory-accessing opcode!");
4465 // Memoize the node unless it returns a flag.
4466 MemIntrinsicSDNode *N;
4467 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4468 FoldingSetNodeID ID;
4469 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4470 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4472 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4473 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4474 return SDValue(E, 0);
4477 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4478 dl.getDebugLoc(), VTList, Ops,
4479 NumOps, MemVT, MMO);
4480 CSEMap.InsertNode(N, IP);
4482 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4483 dl.getDebugLoc(), VTList, Ops,
4484 NumOps, MemVT, MMO);
4486 AllNodes.push_back(N);
4487 return SDValue(N, 0);
4490 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4491 /// MachinePointerInfo record from it. This is particularly useful because the
4492 /// code generator has many cases where it doesn't bother passing in a
4493 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4494 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4495 // If this is FI+Offset, we can model it.
4496 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4497 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4499 // If this is (FI+Offset1)+Offset2, we can model it.
4500 if (Ptr.getOpcode() != ISD::ADD ||
4501 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4502 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4503 return MachinePointerInfo();
4505 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4506 return MachinePointerInfo::getFixedStack(FI, Offset+
4507 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4510 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4511 /// MachinePointerInfo record from it. This is particularly useful because the
4512 /// code generator has many cases where it doesn't bother passing in a
4513 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4514 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4515 // If the 'Offset' value isn't a constant, we can't handle this.
4516 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4517 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4518 if (OffsetOp.getOpcode() == ISD::UNDEF)
4519 return InferPointerInfo(Ptr);
4520 return MachinePointerInfo();
4525 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4526 EVT VT, SDLoc dl, SDValue Chain,
4527 SDValue Ptr, SDValue Offset,
4528 MachinePointerInfo PtrInfo, EVT MemVT,
4529 bool isVolatile, bool isNonTemporal, bool isInvariant,
4530 unsigned Alignment, const MDNode *TBAAInfo,
4531 const MDNode *Ranges) {
4532 assert(Chain.getValueType() == MVT::Other &&
4533 "Invalid chain type");
4534 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4535 Alignment = getEVTAlignment(VT);
4537 unsigned Flags = MachineMemOperand::MOLoad;
4539 Flags |= MachineMemOperand::MOVolatile;
4541 Flags |= MachineMemOperand::MONonTemporal;
4543 Flags |= MachineMemOperand::MOInvariant;
4545 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4548 PtrInfo = InferPointerInfo(Ptr, Offset);
4550 MachineFunction &MF = getMachineFunction();
4551 MachineMemOperand *MMO =
4552 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4554 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4558 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4559 EVT VT, SDLoc dl, SDValue Chain,
4560 SDValue Ptr, SDValue Offset, EVT MemVT,
4561 MachineMemOperand *MMO) {
4563 ExtType = ISD::NON_EXTLOAD;
4564 } else if (ExtType == ISD::NON_EXTLOAD) {
4565 assert(VT == MemVT && "Non-extending load from different memory type!");
4568 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4569 "Should only be an extending load, not truncating!");
4570 assert(VT.isInteger() == MemVT.isInteger() &&
4571 "Cannot convert from FP to Int or Int -> FP!");
4572 assert(VT.isVector() == MemVT.isVector() &&
4573 "Cannot use trunc store to convert to or from a vector!");
4574 assert((!VT.isVector() ||
4575 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4576 "Cannot use trunc store to change the number of vector elements!");
4579 bool Indexed = AM != ISD::UNINDEXED;
4580 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4581 "Unindexed load with an offset!");
4583 SDVTList VTs = Indexed ?
4584 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4585 SDValue Ops[] = { Chain, Ptr, Offset };
4586 FoldingSetNodeID ID;
4587 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4588 ID.AddInteger(MemVT.getRawBits());
4589 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4590 MMO->isNonTemporal(),
4591 MMO->isInvariant()));
4592 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4594 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4595 cast<LoadSDNode>(E)->refineAlignment(MMO);
4596 return SDValue(E, 0);
4598 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4599 dl.getDebugLoc(), VTs, AM, ExtType,
4601 CSEMap.InsertNode(N, IP);
4602 AllNodes.push_back(N);
4603 return SDValue(N, 0);
4606 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4607 SDValue Chain, SDValue Ptr,
4608 MachinePointerInfo PtrInfo,
4609 bool isVolatile, bool isNonTemporal,
4610 bool isInvariant, unsigned Alignment,
4611 const MDNode *TBAAInfo,
4612 const MDNode *Ranges) {
4613 SDValue Undef = getUNDEF(Ptr.getValueType());
4614 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4615 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4619 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4620 SDValue Chain, SDValue Ptr,
4621 MachineMemOperand *MMO) {
4622 SDValue Undef = getUNDEF(Ptr.getValueType());
4623 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4627 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4628 SDValue Chain, SDValue Ptr,
4629 MachinePointerInfo PtrInfo, EVT MemVT,
4630 bool isVolatile, bool isNonTemporal,
4631 unsigned Alignment, const MDNode *TBAAInfo) {
4632 SDValue Undef = getUNDEF(Ptr.getValueType());
4633 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4634 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4639 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4640 SDValue Chain, SDValue Ptr, EVT MemVT,
4641 MachineMemOperand *MMO) {
4642 SDValue Undef = getUNDEF(Ptr.getValueType());
4643 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4648 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4649 SDValue Offset, ISD::MemIndexedMode AM) {
4650 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4651 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4652 "Load is already a indexed load!");
4653 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4654 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4655 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4656 false, LD->getAlignment());
4659 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4660 SDValue Ptr, MachinePointerInfo PtrInfo,
4661 bool isVolatile, bool isNonTemporal,
4662 unsigned Alignment, const MDNode *TBAAInfo) {
4663 assert(Chain.getValueType() == MVT::Other &&
4664 "Invalid chain type");
4665 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4666 Alignment = getEVTAlignment(Val.getValueType());
4668 unsigned Flags = MachineMemOperand::MOStore;
4670 Flags |= MachineMemOperand::MOVolatile;
4672 Flags |= MachineMemOperand::MONonTemporal;
4675 PtrInfo = InferPointerInfo(Ptr);
4677 MachineFunction &MF = getMachineFunction();
4678 MachineMemOperand *MMO =
4679 MF.getMachineMemOperand(PtrInfo, Flags,
4680 Val.getValueType().getStoreSize(), Alignment,
4683 return getStore(Chain, dl, Val, Ptr, MMO);
4686 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4687 SDValue Ptr, MachineMemOperand *MMO) {
4688 assert(Chain.getValueType() == MVT::Other &&
4689 "Invalid chain type");
4690 EVT VT = Val.getValueType();
4691 SDVTList VTs = getVTList(MVT::Other);
4692 SDValue Undef = getUNDEF(Ptr.getValueType());
4693 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4694 FoldingSetNodeID ID;
4695 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4696 ID.AddInteger(VT.getRawBits());
4697 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4698 MMO->isNonTemporal(), MMO->isInvariant()));
4699 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4701 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4702 cast<StoreSDNode>(E)->refineAlignment(MMO);
4703 return SDValue(E, 0);
4705 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4706 dl.getDebugLoc(), VTs,
4707 ISD::UNINDEXED, false, VT, MMO);
4708 CSEMap.InsertNode(N, IP);
4709 AllNodes.push_back(N);
4710 return SDValue(N, 0);
4713 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4714 SDValue Ptr, MachinePointerInfo PtrInfo,
4715 EVT SVT,bool isVolatile, bool isNonTemporal,
4717 const MDNode *TBAAInfo) {
4718 assert(Chain.getValueType() == MVT::Other &&
4719 "Invalid chain type");
4720 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4721 Alignment = getEVTAlignment(SVT);
4723 unsigned Flags = MachineMemOperand::MOStore;
4725 Flags |= MachineMemOperand::MOVolatile;
4727 Flags |= MachineMemOperand::MONonTemporal;
4730 PtrInfo = InferPointerInfo(Ptr);
4732 MachineFunction &MF = getMachineFunction();
4733 MachineMemOperand *MMO =
4734 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4737 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4740 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4741 SDValue Ptr, EVT SVT,
4742 MachineMemOperand *MMO) {
4743 EVT VT = Val.getValueType();
4745 assert(Chain.getValueType() == MVT::Other &&
4746 "Invalid chain type");
4748 return getStore(Chain, dl, Val, Ptr, MMO);
4750 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4751 "Should only be a truncating store, not extending!");
4752 assert(VT.isInteger() == SVT.isInteger() &&
4753 "Can't do FP-INT conversion!");
4754 assert(VT.isVector() == SVT.isVector() &&
4755 "Cannot use trunc store to convert to or from a vector!");
4756 assert((!VT.isVector() ||
4757 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4758 "Cannot use trunc store to change the number of vector elements!");
4760 SDVTList VTs = getVTList(MVT::Other);
4761 SDValue Undef = getUNDEF(Ptr.getValueType());
4762 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4763 FoldingSetNodeID ID;
4764 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4765 ID.AddInteger(SVT.getRawBits());
4766 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4767 MMO->isNonTemporal(), MMO->isInvariant()));
4768 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4770 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4771 cast<StoreSDNode>(E)->refineAlignment(MMO);
4772 return SDValue(E, 0);
4774 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4775 dl.getDebugLoc(), VTs,
4776 ISD::UNINDEXED, true, SVT, MMO);
4777 CSEMap.InsertNode(N, IP);
4778 AllNodes.push_back(N);
4779 return SDValue(N, 0);
4783 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4784 SDValue Offset, ISD::MemIndexedMode AM) {
4785 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4786 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4787 "Store is already a indexed store!");
4788 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4789 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4790 FoldingSetNodeID ID;
4791 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4792 ID.AddInteger(ST->getMemoryVT().getRawBits());
4793 ID.AddInteger(ST->getRawSubclassData());
4794 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4796 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4797 return SDValue(E, 0);
4799 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4800 dl.getDebugLoc(), VTs, AM,
4801 ST->isTruncatingStore(),
4803 ST->getMemOperand());
4804 CSEMap.InsertNode(N, IP);
4805 AllNodes.push_back(N);
4806 return SDValue(N, 0);
4809 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4810 SDValue Chain, SDValue Ptr,
4813 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4814 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
4817 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4818 const SDUse *Ops, unsigned NumOps) {
4820 case 0: return getNode(Opcode, DL, VT);
4821 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4822 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4823 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4827 // Copy from an SDUse array into an SDValue array for use with
4828 // the regular getNode logic.
4829 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4830 return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
4833 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4834 const SDValue *Ops, unsigned NumOps) {
4836 case 0: return getNode(Opcode, DL, VT);
4837 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4838 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4839 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4845 case ISD::SELECT_CC: {
4846 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4847 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4848 "LHS and RHS of condition must have same type!");
4849 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4850 "True and False arms of SelectCC must have same type!");
4851 assert(Ops[2].getValueType() == VT &&
4852 "select_cc node must be of same type as true and false value!");
4856 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4857 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4858 "LHS/RHS of comparison should match types!");
4865 SDVTList VTs = getVTList(VT);
4867 if (VT != MVT::Glue) {
4868 FoldingSetNodeID ID;
4869 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
4872 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4873 return SDValue(E, 0);
4875 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4877 CSEMap.InsertNode(N, IP);
4879 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4883 AllNodes.push_back(N);
4887 return SDValue(N, 0);
4890 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4891 ArrayRef<EVT> ResultTys,
4892 const SDValue *Ops, unsigned NumOps) {
4893 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
4897 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4898 const EVT *VTs, unsigned NumVTs,
4899 const SDValue *Ops, unsigned NumOps) {
4901 return getNode(Opcode, DL, VTs[0], Ops, NumOps);
4902 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
4905 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4906 const SDValue *Ops, unsigned NumOps) {
4907 if (VTList.NumVTs == 1)
4908 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
4912 // FIXME: figure out how to safely handle things like
4913 // int foo(int x) { return 1 << (x & 255); }
4914 // int bar() { return foo(256); }
4915 case ISD::SRA_PARTS:
4916 case ISD::SRL_PARTS:
4917 case ISD::SHL_PARTS:
4918 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4919 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4920 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4921 else if (N3.getOpcode() == ISD::AND)
4922 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4923 // If the and is only masking out bits that cannot effect the shift,
4924 // eliminate the and.
4925 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4926 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4927 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4933 // Memoize the node unless it returns a flag.
4935 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4936 FoldingSetNodeID ID;
4937 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4939 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4940 return SDValue(E, 0);
4943 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4944 DL.getDebugLoc(), VTList, Ops[0]);
4945 } else if (NumOps == 2) {
4946 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4947 DL.getDebugLoc(), VTList, Ops[0],
4949 } else if (NumOps == 3) {
4950 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4951 DL.getDebugLoc(), VTList, Ops[0],
4954 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4955 VTList, Ops, NumOps);
4957 CSEMap.InsertNode(N, IP);
4960 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4961 DL.getDebugLoc(), VTList, Ops[0]);
4962 } else if (NumOps == 2) {
4963 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4964 DL.getDebugLoc(), VTList, Ops[0],
4966 } else if (NumOps == 3) {
4967 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4968 DL.getDebugLoc(), VTList, Ops[0],
4971 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4972 VTList, Ops, NumOps);
4975 AllNodes.push_back(N);
4979 return SDValue(N, 0);
4982 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
4983 return getNode(Opcode, DL, VTList, 0, 0);
4986 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4988 SDValue Ops[] = { N1 };
4989 return getNode(Opcode, DL, VTList, Ops, 1);
4992 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4993 SDValue N1, SDValue N2) {
4994 SDValue Ops[] = { N1, N2 };
4995 return getNode(Opcode, DL, VTList, Ops, 2);
4998 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4999 SDValue N1, SDValue N2, SDValue N3) {
5000 SDValue Ops[] = { N1, N2, N3 };
5001 return getNode(Opcode, DL, VTList, Ops, 3);
5004 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5005 SDValue N1, SDValue N2, SDValue N3,
5007 SDValue Ops[] = { N1, N2, N3, N4 };
5008 return getNode(Opcode, DL, VTList, Ops, 4);
5011 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
5012 SDValue N1, SDValue N2, SDValue N3,
5013 SDValue N4, SDValue N5) {
5014 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5015 return getNode(Opcode, DL, VTList, Ops, 5);
5018 SDVTList SelectionDAG::getVTList(EVT VT) {
5019 return makeVTList(SDNode::getValueTypeList(VT), 1);
5022 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
5023 FoldingSetNodeID ID;
5025 ID.AddInteger(VT1.getRawBits());
5026 ID.AddInteger(VT2.getRawBits());
5029 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5030 if (Result == NULL) {
5031 EVT *Array = Allocator.Allocate<EVT>(2);
5034 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5035 VTListMap.InsertNode(Result, IP);
5037 return Result->getSDVTList();
5040 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5041 FoldingSetNodeID ID;
5043 ID.AddInteger(VT1.getRawBits());
5044 ID.AddInteger(VT2.getRawBits());
5045 ID.AddInteger(VT3.getRawBits());
5048 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5049 if (Result == NULL) {
5050 EVT *Array = Allocator.Allocate<EVT>(3);
5054 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5055 VTListMap.InsertNode(Result, IP);
5057 return Result->getSDVTList();
5060 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5061 FoldingSetNodeID ID;
5063 ID.AddInteger(VT1.getRawBits());
5064 ID.AddInteger(VT2.getRawBits());
5065 ID.AddInteger(VT3.getRawBits());
5066 ID.AddInteger(VT4.getRawBits());
5069 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5070 if (Result == NULL) {
5071 EVT *Array = Allocator.Allocate<EVT>(4);
5076 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5077 VTListMap.InsertNode(Result, IP);
5079 return Result->getSDVTList();
5082 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
5083 FoldingSetNodeID ID;
5084 ID.AddInteger(NumVTs);
5085 for (unsigned index = 0; index < NumVTs; index++) {
5086 ID.AddInteger(VTs[index].getRawBits());
5090 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5091 if (Result == NULL) {
5092 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5093 std::copy(VTs, VTs + NumVTs, Array);
5094 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5095 VTListMap.InsertNode(Result, IP);
5097 return Result->getSDVTList();
5101 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5102 /// specified operands. If the resultant node already exists in the DAG,
5103 /// this does not modify the specified node, instead it returns the node that
5104 /// already exists. If the resultant node does not exist in the DAG, the
5105 /// input node is returned. As a degenerate case, if you specify the same
5106 /// input operands as the node already has, the input node is returned.
5107 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5108 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5110 // Check to see if there is no change.
5111 if (Op == N->getOperand(0)) return N;
5113 // See if the modified node already exists.
5114 void *InsertPos = 0;
5115 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5118 // Nope it doesn't. Remove the node from its current place in the maps.
5120 if (!RemoveNodeFromCSEMaps(N))
5123 // Now we update the operands.
5124 N->OperandList[0].set(Op);
5126 // If this gets put into a CSE map, add it.
5127 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5131 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5132 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5134 // Check to see if there is no change.
5135 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5136 return N; // No operands changed, just return the input node.
5138 // See if the modified node already exists.
5139 void *InsertPos = 0;
5140 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5143 // Nope it doesn't. Remove the node from its current place in the maps.
5145 if (!RemoveNodeFromCSEMaps(N))
5148 // Now we update the operands.
5149 if (N->OperandList[0] != Op1)
5150 N->OperandList[0].set(Op1);
5151 if (N->OperandList[1] != Op2)
5152 N->OperandList[1].set(Op2);
5154 // If this gets put into a CSE map, add it.
5155 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5159 SDNode *SelectionDAG::
5160 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5161 SDValue Ops[] = { Op1, Op2, Op3 };
5162 return UpdateNodeOperands(N, Ops, 3);
5165 SDNode *SelectionDAG::
5166 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5167 SDValue Op3, SDValue Op4) {
5168 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5169 return UpdateNodeOperands(N, Ops, 4);
5172 SDNode *SelectionDAG::
5173 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5174 SDValue Op3, SDValue Op4, SDValue Op5) {
5175 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5176 return UpdateNodeOperands(N, Ops, 5);
5179 SDNode *SelectionDAG::
5180 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
5181 assert(N->getNumOperands() == NumOps &&
5182 "Update with wrong number of operands");
5184 // Check to see if there is no change.
5185 bool AnyChange = false;
5186 for (unsigned i = 0; i != NumOps; ++i) {
5187 if (Ops[i] != N->getOperand(i)) {
5193 // No operands changed, just return the input node.
5194 if (!AnyChange) return N;
5196 // See if the modified node already exists.
5197 void *InsertPos = 0;
5198 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
5201 // Nope it doesn't. Remove the node from its current place in the maps.
5203 if (!RemoveNodeFromCSEMaps(N))
5206 // Now we update the operands.
5207 for (unsigned i = 0; i != NumOps; ++i)
5208 if (N->OperandList[i] != Ops[i])
5209 N->OperandList[i].set(Ops[i]);
5211 // If this gets put into a CSE map, add it.
5212 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5216 /// DropOperands - Release the operands and set this node to have
5218 void SDNode::DropOperands() {
5219 // Unlike the code in MorphNodeTo that does this, we don't need to
5220 // watch for dead nodes here.
5221 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5227 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5230 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5232 SDVTList VTs = getVTList(VT);
5233 return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
5236 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5237 EVT VT, SDValue Op1) {
5238 SDVTList VTs = getVTList(VT);
5239 SDValue Ops[] = { Op1 };
5240 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5243 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5244 EVT VT, SDValue Op1,
5246 SDVTList VTs = getVTList(VT);
5247 SDValue Ops[] = { Op1, Op2 };
5248 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5251 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5252 EVT VT, SDValue Op1,
5253 SDValue Op2, SDValue Op3) {
5254 SDVTList VTs = getVTList(VT);
5255 SDValue Ops[] = { Op1, Op2, Op3 };
5256 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5259 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5260 EVT VT, const SDValue *Ops,
5262 SDVTList VTs = getVTList(VT);
5263 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5266 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5267 EVT VT1, EVT VT2, const SDValue *Ops,
5269 SDVTList VTs = getVTList(VT1, VT2);
5270 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5273 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5275 SDVTList VTs = getVTList(VT1, VT2);
5276 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
5279 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5280 EVT VT1, EVT VT2, EVT VT3,
5281 const SDValue *Ops, unsigned NumOps) {
5282 SDVTList VTs = getVTList(VT1, VT2, VT3);
5283 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5286 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5287 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5288 const SDValue *Ops, unsigned NumOps) {
5289 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5290 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5293 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5296 SDVTList VTs = getVTList(VT1, VT2);
5297 SDValue Ops[] = { Op1 };
5298 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5301 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5303 SDValue Op1, SDValue Op2) {
5304 SDVTList VTs = getVTList(VT1, VT2);
5305 SDValue Ops[] = { Op1, Op2 };
5306 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5309 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5311 SDValue Op1, SDValue Op2,
5313 SDVTList VTs = getVTList(VT1, VT2);
5314 SDValue Ops[] = { Op1, Op2, Op3 };
5315 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5318 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5319 EVT VT1, EVT VT2, EVT VT3,
5320 SDValue Op1, SDValue Op2,
5322 SDVTList VTs = getVTList(VT1, VT2, VT3);
5323 SDValue Ops[] = { Op1, Op2, Op3 };
5324 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5327 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5328 SDVTList VTs, const SDValue *Ops,
5330 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5331 // Reset the NodeID to -1.
5336 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5337 /// the line number information on the merged node since it is not possible to
5338 /// preserve the information that operation is associated with multiple lines.
5339 /// This will make the debugger working better at -O0, were there is a higher
5340 /// probability having other instructions associated with that line.
5342 /// For IROrder, we keep the smaller of the two
5343 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5344 DebugLoc NLoc = N->getDebugLoc();
5345 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5346 (OLoc.getDebugLoc() != NLoc)) {
5347 N->setDebugLoc(DebugLoc());
5349 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5350 N->setIROrder(Order);
5354 /// MorphNodeTo - This *mutates* the specified node to have the specified
5355 /// return type, opcode, and operands.
5357 /// Note that MorphNodeTo returns the resultant node. If there is already a
5358 /// node of the specified opcode and operands, it returns that node instead of
5359 /// the current one. Note that the SDLoc need not be the same.
5361 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5362 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5363 /// node, and because it doesn't require CSE recalculation for any of
5364 /// the node's users.
5366 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5367 SDVTList VTs, const SDValue *Ops,
5369 // If an identical node already exists, use it.
5371 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5372 FoldingSetNodeID ID;
5373 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5374 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5375 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5378 if (!RemoveNodeFromCSEMaps(N))
5381 // Start the morphing.
5383 N->ValueList = VTs.VTs;
5384 N->NumValues = VTs.NumVTs;
5386 // Clear the operands list, updating used nodes to remove this from their
5387 // use list. Keep track of any operands that become dead as a result.
5388 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5389 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5391 SDNode *Used = Use.getNode();
5393 if (Used->use_empty())
5394 DeadNodeSet.insert(Used);
5397 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5398 // Initialize the memory references information.
5399 MN->setMemRefs(0, 0);
5400 // If NumOps is larger than the # of operands we can have in a
5401 // MachineSDNode, reallocate the operand list.
5402 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5403 if (MN->OperandsNeedDelete)
5404 delete[] MN->OperandList;
5405 if (NumOps > array_lengthof(MN->LocalOperands))
5406 // We're creating a final node that will live unmorphed for the
5407 // remainder of the current SelectionDAG iteration, so we can allocate
5408 // the operands directly out of a pool with no recycling metadata.
5409 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5412 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5413 MN->OperandsNeedDelete = false;
5415 MN->InitOperands(MN->OperandList, Ops, NumOps);
5417 // If NumOps is larger than the # of operands we currently have, reallocate
5418 // the operand list.
5419 if (NumOps > N->NumOperands) {
5420 if (N->OperandsNeedDelete)
5421 delete[] N->OperandList;
5422 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5423 N->OperandsNeedDelete = true;
5425 N->InitOperands(N->OperandList, Ops, NumOps);
5428 // Delete any nodes that are still dead after adding the uses for the
5430 if (!DeadNodeSet.empty()) {
5431 SmallVector<SDNode *, 16> DeadNodes;
5432 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5433 E = DeadNodeSet.end(); I != E; ++I)
5434 if ((*I)->use_empty())
5435 DeadNodes.push_back(*I);
5436 RemoveDeadNodes(DeadNodes);
5440 CSEMap.InsertNode(N, IP); // Memoize the new node.
5445 /// getMachineNode - These are used for target selectors to create a new node
5446 /// with specified return type(s), MachineInstr opcode, and operands.
5448 /// Note that getMachineNode returns the resultant node. If there is already a
5449 /// node of the specified opcode and operands, it returns that node instead of
5450 /// the current one.
5452 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5453 SDVTList VTs = getVTList(VT);
5454 return getMachineNode(Opcode, dl, VTs, None);
5458 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5459 SDVTList VTs = getVTList(VT);
5460 SDValue Ops[] = { Op1 };
5461 return getMachineNode(Opcode, dl, VTs, Ops);
5465 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5466 SDValue Op1, SDValue Op2) {
5467 SDVTList VTs = getVTList(VT);
5468 SDValue Ops[] = { Op1, Op2 };
5469 return getMachineNode(Opcode, dl, VTs, Ops);
5473 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5474 SDValue Op1, SDValue Op2, SDValue Op3) {
5475 SDVTList VTs = getVTList(VT);
5476 SDValue Ops[] = { Op1, Op2, Op3 };
5477 return getMachineNode(Opcode, dl, VTs, Ops);
5481 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5482 ArrayRef<SDValue> Ops) {
5483 SDVTList VTs = getVTList(VT);
5484 return getMachineNode(Opcode, dl, VTs, Ops);
5488 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5489 SDVTList VTs = getVTList(VT1, VT2);
5490 return getMachineNode(Opcode, dl, VTs, None);
5494 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5495 EVT VT1, EVT VT2, SDValue Op1) {
5496 SDVTList VTs = getVTList(VT1, VT2);
5497 SDValue Ops[] = { Op1 };
5498 return getMachineNode(Opcode, dl, VTs, Ops);
5502 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5503 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5504 SDVTList VTs = getVTList(VT1, VT2);
5505 SDValue Ops[] = { Op1, Op2 };
5506 return getMachineNode(Opcode, dl, VTs, Ops);
5510 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5511 EVT VT1, EVT VT2, SDValue Op1,
5512 SDValue Op2, SDValue Op3) {
5513 SDVTList VTs = getVTList(VT1, VT2);
5514 SDValue Ops[] = { Op1, Op2, Op3 };
5515 return getMachineNode(Opcode, dl, VTs, Ops);
5519 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5521 ArrayRef<SDValue> Ops) {
5522 SDVTList VTs = getVTList(VT1, VT2);
5523 return getMachineNode(Opcode, dl, VTs, Ops);
5527 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5528 EVT VT1, EVT VT2, EVT VT3,
5529 SDValue Op1, SDValue Op2) {
5530 SDVTList VTs = getVTList(VT1, VT2, VT3);
5531 SDValue Ops[] = { Op1, Op2 };
5532 return getMachineNode(Opcode, dl, VTs, Ops);
5536 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5537 EVT VT1, EVT VT2, EVT VT3,
5538 SDValue Op1, SDValue Op2, SDValue Op3) {
5539 SDVTList VTs = getVTList(VT1, VT2, VT3);
5540 SDValue Ops[] = { Op1, Op2, Op3 };
5541 return getMachineNode(Opcode, dl, VTs, Ops);
5545 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5546 EVT VT1, EVT VT2, EVT VT3,
5547 ArrayRef<SDValue> Ops) {
5548 SDVTList VTs = getVTList(VT1, VT2, VT3);
5549 return getMachineNode(Opcode, dl, VTs, Ops);
5553 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5554 EVT VT2, EVT VT3, EVT VT4,
5555 ArrayRef<SDValue> Ops) {
5556 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5557 return getMachineNode(Opcode, dl, VTs, Ops);
5561 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5562 ArrayRef<EVT> ResultTys,
5563 ArrayRef<SDValue> Ops) {
5564 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
5565 return getMachineNode(Opcode, dl, VTs, Ops);
5569 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5570 ArrayRef<SDValue> OpsArray) {
5571 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5574 const SDValue *Ops = OpsArray.data();
5575 unsigned NumOps = OpsArray.size();
5578 FoldingSetNodeID ID;
5579 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5581 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5582 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5586 // Allocate a new MachineSDNode.
5587 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5588 DL.getDebugLoc(), VTs);
5590 // Initialize the operands list.
5591 if (NumOps > array_lengthof(N->LocalOperands))
5592 // We're creating a final node that will live unmorphed for the
5593 // remainder of the current SelectionDAG iteration, so we can allocate
5594 // the operands directly out of a pool with no recycling metadata.
5595 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5598 N->InitOperands(N->LocalOperands, Ops, NumOps);
5599 N->OperandsNeedDelete = false;
5602 CSEMap.InsertNode(N, IP);
5604 AllNodes.push_back(N);
5606 VerifyMachineNode(N);
5611 /// getTargetExtractSubreg - A convenience function for creating
5612 /// TargetOpcode::EXTRACT_SUBREG nodes.
5614 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5616 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5617 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5618 VT, Operand, SRIdxVal);
5619 return SDValue(Subreg, 0);
5622 /// getTargetInsertSubreg - A convenience function for creating
5623 /// TargetOpcode::INSERT_SUBREG nodes.
5625 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5626 SDValue Operand, SDValue Subreg) {
5627 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5628 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5629 VT, Operand, Subreg, SRIdxVal);
5630 return SDValue(Result, 0);
5633 /// getNodeIfExists - Get the specified node if it's already available, or
5634 /// else return NULL.
5635 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5636 const SDValue *Ops, unsigned NumOps) {
5637 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5638 FoldingSetNodeID ID;
5639 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5641 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5647 /// getDbgValue - Creates a SDDbgValue node.
5650 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
5651 DebugLoc DL, unsigned O) {
5652 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
5656 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
5657 DebugLoc DL, unsigned O) {
5658 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5662 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5663 DebugLoc DL, unsigned O) {
5664 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5669 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5670 /// pointed to by a use iterator is deleted, increment the use iterator
5671 /// so that it doesn't dangle.
5673 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5674 SDNode::use_iterator &UI;
5675 SDNode::use_iterator &UE;
5677 virtual void NodeDeleted(SDNode *N, SDNode *E) {
5678 // Increment the iterator as needed.
5679 while (UI != UE && N == *UI)
5684 RAUWUpdateListener(SelectionDAG &d,
5685 SDNode::use_iterator &ui,
5686 SDNode::use_iterator &ue)
5687 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5692 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5693 /// This can cause recursive merging of nodes in the DAG.
5695 /// This version assumes From has a single result value.
5697 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5698 SDNode *From = FromN.getNode();
5699 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5700 "Cannot replace with this method!");
5701 assert(From != To.getNode() && "Cannot replace uses of with self");
5703 // Iterate over all the existing uses of From. New uses will be added
5704 // to the beginning of the use list, which we avoid visiting.
5705 // This specifically avoids visiting uses of From that arise while the
5706 // replacement is happening, because any such uses would be the result
5707 // of CSE: If an existing node looks like From after one of its operands
5708 // is replaced by To, we don't want to replace of all its users with To
5709 // too. See PR3018 for more info.
5710 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5711 RAUWUpdateListener Listener(*this, UI, UE);
5715 // This node is about to morph, remove its old self from the CSE maps.
5716 RemoveNodeFromCSEMaps(User);
5718 // A user can appear in a use list multiple times, and when this
5719 // happens the uses are usually next to each other in the list.
5720 // To help reduce the number of CSE recomputations, process all
5721 // the uses of this user that we can find this way.
5723 SDUse &Use = UI.getUse();
5726 } while (UI != UE && *UI == User);
5728 // Now that we have modified User, add it back to the CSE maps. If it
5729 // already exists there, recursively merge the results together.
5730 AddModifiedNodeToCSEMaps(User);
5733 // If we just RAUW'd the root, take note.
5734 if (FromN == getRoot())
5738 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5739 /// This can cause recursive merging of nodes in the DAG.
5741 /// This version assumes that for each value of From, there is a
5742 /// corresponding value in To in the same position with the same type.
5744 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5746 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5747 assert((!From->hasAnyUseOfValue(i) ||
5748 From->getValueType(i) == To->getValueType(i)) &&
5749 "Cannot use this version of ReplaceAllUsesWith!");
5752 // Handle the trivial case.
5756 // Iterate over just the existing users of From. See the comments in
5757 // the ReplaceAllUsesWith above.
5758 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5759 RAUWUpdateListener Listener(*this, UI, UE);
5763 // This node is about to morph, remove its old self from the CSE maps.
5764 RemoveNodeFromCSEMaps(User);
5766 // A user can appear in a use list multiple times, and when this
5767 // happens the uses are usually next to each other in the list.
5768 // To help reduce the number of CSE recomputations, process all
5769 // the uses of this user that we can find this way.
5771 SDUse &Use = UI.getUse();
5774 } while (UI != UE && *UI == User);
5776 // Now that we have modified User, add it back to the CSE maps. If it
5777 // already exists there, recursively merge the results together.
5778 AddModifiedNodeToCSEMaps(User);
5781 // If we just RAUW'd the root, take note.
5782 if (From == getRoot().getNode())
5783 setRoot(SDValue(To, getRoot().getResNo()));
5786 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5787 /// This can cause recursive merging of nodes in the DAG.
5789 /// This version can replace From with any result values. To must match the
5790 /// number and types of values returned by From.
5791 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5792 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5793 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5795 // Iterate over just the existing users of From. See the comments in
5796 // the ReplaceAllUsesWith above.
5797 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5798 RAUWUpdateListener Listener(*this, UI, UE);
5802 // This node is about to morph, remove its old self from the CSE maps.
5803 RemoveNodeFromCSEMaps(User);
5805 // A user can appear in a use list multiple times, and when this
5806 // happens the uses are usually next to each other in the list.
5807 // To help reduce the number of CSE recomputations, process all
5808 // the uses of this user that we can find this way.
5810 SDUse &Use = UI.getUse();
5811 const SDValue &ToOp = To[Use.getResNo()];
5814 } while (UI != UE && *UI == User);
5816 // Now that we have modified User, add it back to the CSE maps. If it
5817 // already exists there, recursively merge the results together.
5818 AddModifiedNodeToCSEMaps(User);
5821 // If we just RAUW'd the root, take note.
5822 if (From == getRoot().getNode())
5823 setRoot(SDValue(To[getRoot().getResNo()]));
5826 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5827 /// uses of other values produced by From.getNode() alone. The Deleted
5828 /// vector is handled the same way as for ReplaceAllUsesWith.
5829 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5830 // Handle the really simple, really trivial case efficiently.
5831 if (From == To) return;
5833 // Handle the simple, trivial, case efficiently.
5834 if (From.getNode()->getNumValues() == 1) {
5835 ReplaceAllUsesWith(From, To);
5839 // Iterate over just the existing users of From. See the comments in
5840 // the ReplaceAllUsesWith above.
5841 SDNode::use_iterator UI = From.getNode()->use_begin(),
5842 UE = From.getNode()->use_end();
5843 RAUWUpdateListener Listener(*this, UI, UE);
5846 bool UserRemovedFromCSEMaps = false;
5848 // A user can appear in a use list multiple times, and when this
5849 // happens the uses are usually next to each other in the list.
5850 // To help reduce the number of CSE recomputations, process all
5851 // the uses of this user that we can find this way.
5853 SDUse &Use = UI.getUse();
5855 // Skip uses of different values from the same node.
5856 if (Use.getResNo() != From.getResNo()) {
5861 // If this node hasn't been modified yet, it's still in the CSE maps,
5862 // so remove its old self from the CSE maps.
5863 if (!UserRemovedFromCSEMaps) {
5864 RemoveNodeFromCSEMaps(User);
5865 UserRemovedFromCSEMaps = true;
5870 } while (UI != UE && *UI == User);
5872 // We are iterating over all uses of the From node, so if a use
5873 // doesn't use the specific value, no changes are made.
5874 if (!UserRemovedFromCSEMaps)
5877 // Now that we have modified User, add it back to the CSE maps. If it
5878 // already exists there, recursively merge the results together.
5879 AddModifiedNodeToCSEMaps(User);
5882 // If we just RAUW'd the root, take note.
5883 if (From == getRoot())
5888 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5889 /// to record information about a use.
5896 /// operator< - Sort Memos by User.
5897 bool operator<(const UseMemo &L, const UseMemo &R) {
5898 return (intptr_t)L.User < (intptr_t)R.User;
5902 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5903 /// uses of other values produced by From.getNode() alone. The same value
5904 /// may appear in both the From and To list. The Deleted vector is
5905 /// handled the same way as for ReplaceAllUsesWith.
5906 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5909 // Handle the simple, trivial case efficiently.
5911 return ReplaceAllUsesOfValueWith(*From, *To);
5913 // Read up all the uses and make records of them. This helps
5914 // processing new uses that are introduced during the
5915 // replacement process.
5916 SmallVector<UseMemo, 4> Uses;
5917 for (unsigned i = 0; i != Num; ++i) {
5918 unsigned FromResNo = From[i].getResNo();
5919 SDNode *FromNode = From[i].getNode();
5920 for (SDNode::use_iterator UI = FromNode->use_begin(),
5921 E = FromNode->use_end(); UI != E; ++UI) {
5922 SDUse &Use = UI.getUse();
5923 if (Use.getResNo() == FromResNo) {
5924 UseMemo Memo = { *UI, i, &Use };
5925 Uses.push_back(Memo);
5930 // Sort the uses, so that all the uses from a given User are together.
5931 std::sort(Uses.begin(), Uses.end());
5933 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5934 UseIndex != UseIndexEnd; ) {
5935 // We know that this user uses some value of From. If it is the right
5936 // value, update it.
5937 SDNode *User = Uses[UseIndex].User;
5939 // This node is about to morph, remove its old self from the CSE maps.
5940 RemoveNodeFromCSEMaps(User);
5942 // The Uses array is sorted, so all the uses for a given User
5943 // are next to each other in the list.
5944 // To help reduce the number of CSE recomputations, process all
5945 // the uses of this user that we can find this way.
5947 unsigned i = Uses[UseIndex].Index;
5948 SDUse &Use = *Uses[UseIndex].Use;
5952 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5954 // Now that we have modified User, add it back to the CSE maps. If it
5955 // already exists there, recursively merge the results together.
5956 AddModifiedNodeToCSEMaps(User);
5960 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5961 /// based on their topological order. It returns the maximum id and a vector
5962 /// of the SDNodes* in assigned order by reference.
5963 unsigned SelectionDAG::AssignTopologicalOrder() {
5965 unsigned DAGSize = 0;
5967 // SortedPos tracks the progress of the algorithm. Nodes before it are
5968 // sorted, nodes after it are unsorted. When the algorithm completes
5969 // it is at the end of the list.
5970 allnodes_iterator SortedPos = allnodes_begin();
5972 // Visit all the nodes. Move nodes with no operands to the front of
5973 // the list immediately. Annotate nodes that do have operands with their
5974 // operand count. Before we do this, the Node Id fields of the nodes
5975 // may contain arbitrary values. After, the Node Id fields for nodes
5976 // before SortedPos will contain the topological sort index, and the
5977 // Node Id fields for nodes At SortedPos and after will contain the
5978 // count of outstanding operands.
5979 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5982 unsigned Degree = N->getNumOperands();
5984 // A node with no uses, add it to the result array immediately.
5985 N->setNodeId(DAGSize++);
5986 allnodes_iterator Q = N;
5988 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5989 assert(SortedPos != AllNodes.end() && "Overran node list");
5992 // Temporarily use the Node Id as scratch space for the degree count.
5993 N->setNodeId(Degree);
5997 // Visit all the nodes. As we iterate, move nodes into sorted order,
5998 // such that by the time the end is reached all nodes will be sorted.
5999 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
6002 // N is in sorted position, so all its uses have one less operand
6003 // that needs to be sorted.
6004 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6007 unsigned Degree = P->getNodeId();
6008 assert(Degree != 0 && "Invalid node degree");
6011 // All of P's operands are sorted, so P may sorted now.
6012 P->setNodeId(DAGSize++);
6014 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6015 assert(SortedPos != AllNodes.end() && "Overran node list");
6018 // Update P's outstanding operand count.
6019 P->setNodeId(Degree);
6022 if (I == SortedPos) {
6025 dbgs() << "Overran sorted position:\n";
6028 llvm_unreachable(0);
6032 assert(SortedPos == AllNodes.end() &&
6033 "Topological sort incomplete!");
6034 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6035 "First node in topological sort is not the entry token!");
6036 assert(AllNodes.front().getNodeId() == 0 &&
6037 "First node in topological sort has non-zero id!");
6038 assert(AllNodes.front().getNumOperands() == 0 &&
6039 "First node in topological sort has operands!");
6040 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6041 "Last node in topologic sort has unexpected id!");
6042 assert(AllNodes.back().use_empty() &&
6043 "Last node in topologic sort has users!");
6044 assert(DAGSize == allnodes_size() && "Node count mismatch!");
6048 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6049 /// value is produced by SD.
6050 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6051 DbgInfo->add(DB, SD, isParameter);
6053 SD->setHasDebugValue(true);
6056 /// TransferDbgValues - Transfer SDDbgValues.
6057 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6058 if (From == To || !From.getNode()->getHasDebugValue())
6060 SDNode *FromNode = From.getNode();
6061 SDNode *ToNode = To.getNode();
6062 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6063 SmallVector<SDDbgValue *, 2> ClonedDVs;
6064 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6066 SDDbgValue *Dbg = *I;
6067 if (Dbg->getKind() == SDDbgValue::SDNODE) {
6068 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
6069 Dbg->getOffset(), Dbg->getDebugLoc(),
6071 ClonedDVs.push_back(Clone);
6074 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6075 E = ClonedDVs.end(); I != E; ++I)
6076 AddDbgValue(*I, ToNode, false);
6079 //===----------------------------------------------------------------------===//
6081 //===----------------------------------------------------------------------===//
6083 HandleSDNode::~HandleSDNode() {
6087 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6088 DebugLoc DL, const GlobalValue *GA,
6089 EVT VT, int64_t o, unsigned char TF)
6090 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6094 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6095 SDValue X, unsigned SrcAS,
6097 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6098 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6100 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6101 EVT memvt, MachineMemOperand *mmo)
6102 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6103 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6104 MMO->isNonTemporal(), MMO->isInvariant());
6105 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6106 assert(isNonTemporal() == MMO->isNonTemporal() &&
6107 "Non-temporal encoding error!");
6108 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6111 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6112 const SDValue *Ops, unsigned NumOps, EVT memvt,
6113 MachineMemOperand *mmo)
6114 : SDNode(Opc, Order, dl, VTs, Ops, NumOps),
6115 MemoryVT(memvt), MMO(mmo) {
6116 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6117 MMO->isNonTemporal(), MMO->isInvariant());
6118 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6119 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6122 /// Profile - Gather unique data for the node.
6124 void SDNode::Profile(FoldingSetNodeID &ID) const {
6125 AddNodeIDNode(ID, this);
6130 std::vector<EVT> VTs;
6133 VTs.reserve(MVT::LAST_VALUETYPE);
6134 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6135 VTs.push_back(MVT((MVT::SimpleValueType)i));
6140 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6141 static ManagedStatic<EVTArray> SimpleVTArray;
6142 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6144 /// getValueTypeList - Return a pointer to the specified value type.
6146 const EVT *SDNode::getValueTypeList(EVT VT) {
6147 if (VT.isExtended()) {
6148 sys::SmartScopedLock<true> Lock(*VTMutex);
6149 return &(*EVTs->insert(VT).first);
6151 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6152 "Value type out of range!");
6153 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6157 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6158 /// indicated value. This method ignores uses of other values defined by this
6160 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6161 assert(Value < getNumValues() && "Bad value!");
6163 // TODO: Only iterate over uses of a given value of the node
6164 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6165 if (UI.getUse().getResNo() == Value) {
6172 // Found exactly the right number of uses?
6177 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6178 /// value. This method ignores uses of other values defined by this operation.
6179 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6180 assert(Value < getNumValues() && "Bad value!");
6182 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6183 if (UI.getUse().getResNo() == Value)
6190 /// isOnlyUserOf - Return true if this node is the only use of N.
6192 bool SDNode::isOnlyUserOf(SDNode *N) const {
6194 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6205 /// isOperand - Return true if this node is an operand of N.
6207 bool SDValue::isOperandOf(SDNode *N) const {
6208 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6209 if (*this == N->getOperand(i))
6214 bool SDNode::isOperandOf(SDNode *N) const {
6215 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6216 if (this == N->OperandList[i].getNode())
6221 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6222 /// be a chain) reaches the specified operand without crossing any
6223 /// side-effecting instructions on any chain path. In practice, this looks
6224 /// through token factors and non-volatile loads. In order to remain efficient,
6225 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6226 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6227 unsigned Depth) const {
6228 if (*this == Dest) return true;
6230 // Don't search too deeply, we just want to be able to see through
6231 // TokenFactor's etc.
6232 if (Depth == 0) return false;
6234 // If this is a token factor, all inputs to the TF happen in parallel. If any
6235 // of the operands of the TF does not reach dest, then we cannot do the xform.
6236 if (getOpcode() == ISD::TokenFactor) {
6237 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6238 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6243 // Loads don't have side effects, look through them.
6244 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6245 if (!Ld->isVolatile())
6246 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6251 /// hasPredecessor - Return true if N is a predecessor of this node.
6252 /// N is either an operand of this node, or can be reached by recursively
6253 /// traversing up the operands.
6254 /// NOTE: This is an expensive method. Use it carefully.
6255 bool SDNode::hasPredecessor(const SDNode *N) const {
6256 SmallPtrSet<const SDNode *, 32> Visited;
6257 SmallVector<const SDNode *, 16> Worklist;
6258 return hasPredecessorHelper(N, Visited, Worklist);
6262 SDNode::hasPredecessorHelper(const SDNode *N,
6263 SmallPtrSet<const SDNode *, 32> &Visited,
6264 SmallVectorImpl<const SDNode *> &Worklist) const {
6265 if (Visited.empty()) {
6266 Worklist.push_back(this);
6268 // Take a look in the visited set. If we've already encountered this node
6269 // we needn't search further.
6270 if (Visited.count(N))
6274 // Haven't visited N yet. Continue the search.
6275 while (!Worklist.empty()) {
6276 const SDNode *M = Worklist.pop_back_val();
6277 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6278 SDNode *Op = M->getOperand(i).getNode();
6279 if (Visited.insert(Op))
6280 Worklist.push_back(Op);
6289 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6290 assert(Num < NumOperands && "Invalid child # of SDNode!");
6291 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6294 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6295 assert(N->getNumValues() == 1 &&
6296 "Can't unroll a vector with multiple results!");
6298 EVT VT = N->getValueType(0);
6299 unsigned NE = VT.getVectorNumElements();
6300 EVT EltVT = VT.getVectorElementType();
6303 SmallVector<SDValue, 8> Scalars;
6304 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6306 // If ResNE is 0, fully unroll the vector op.
6309 else if (NE > ResNE)
6313 for (i= 0; i != NE; ++i) {
6314 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6315 SDValue Operand = N->getOperand(j);
6316 EVT OperandVT = Operand.getValueType();
6317 if (OperandVT.isVector()) {
6318 // A vector operand; extract a single element.
6319 const TargetLowering *TLI = TM.getTargetLowering();
6320 EVT OperandEltVT = OperandVT.getVectorElementType();
6321 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6324 getConstant(i, TLI->getVectorIdxTy()));
6326 // A scalar operand; just use it as is.
6327 Operands[j] = Operand;
6331 switch (N->getOpcode()) {
6333 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6334 &Operands[0], Operands.size()));
6337 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT,
6338 &Operands[0], Operands.size()));
6345 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6346 getShiftAmountOperand(Operands[0].getValueType(),
6349 case ISD::SIGN_EXTEND_INREG:
6350 case ISD::FP_ROUND_INREG: {
6351 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6352 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6354 getValueType(ExtVT)));
6359 for (; i < ResNE; ++i)
6360 Scalars.push_back(getUNDEF(EltVT));
6362 return getNode(ISD::BUILD_VECTOR, dl,
6363 EVT::getVectorVT(*getContext(), EltVT, ResNE),
6364 &Scalars[0], Scalars.size());
6368 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6369 /// location that is 'Dist' units away from the location that the 'Base' load
6370 /// is loading from.
6371 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6372 unsigned Bytes, int Dist) const {
6373 if (LD->getChain() != Base->getChain())
6375 EVT VT = LD->getValueType(0);
6376 if (VT.getSizeInBits() / 8 != Bytes)
6379 SDValue Loc = LD->getOperand(1);
6380 SDValue BaseLoc = Base->getOperand(1);
6381 if (Loc.getOpcode() == ISD::FrameIndex) {
6382 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6384 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6385 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6386 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6387 int FS = MFI->getObjectSize(FI);
6388 int BFS = MFI->getObjectSize(BFI);
6389 if (FS != BFS || FS != (int)Bytes) return false;
6390 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6394 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6395 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6398 const GlobalValue *GV1 = NULL;
6399 const GlobalValue *GV2 = NULL;
6400 int64_t Offset1 = 0;
6401 int64_t Offset2 = 0;
6402 const TargetLowering *TLI = TM.getTargetLowering();
6403 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6404 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6405 if (isGA1 && isGA2 && GV1 == GV2)
6406 return Offset1 == (Offset2 + Dist*Bytes);
6411 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6412 /// it cannot be inferred.
6413 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6414 // If this is a GlobalAddress + cst, return the alignment.
6415 const GlobalValue *GV;
6416 int64_t GVOffset = 0;
6417 const TargetLowering *TLI = TM.getTargetLowering();
6418 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6419 unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
6420 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6421 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6422 TLI->getDataLayout());
6423 unsigned AlignBits = KnownZero.countTrailingOnes();
6424 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6426 return MinAlign(Align, GVOffset);
6429 // If this is a direct reference to a stack slot, use information about the
6430 // stack slot's alignment.
6431 int FrameIdx = 1 << 31;
6432 int64_t FrameOffset = 0;
6433 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6434 FrameIdx = FI->getIndex();
6435 } else if (isBaseWithConstantOffset(Ptr) &&
6436 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6438 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6439 FrameOffset = Ptr.getConstantOperandVal(1);
6442 if (FrameIdx != (1 << 31)) {
6443 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6444 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6452 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
6453 /// which is split (or expanded) into two not necessarily identical pieces.
6454 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
6455 // Currently all types are split in half.
6457 if (!VT.isVector()) {
6458 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
6460 unsigned NumElements = VT.getVectorNumElements();
6461 assert(!(NumElements & 1) && "Splitting vector, but not in half!");
6462 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
6465 return std::make_pair(LoVT, HiVT);
6468 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
6470 std::pair<SDValue, SDValue>
6471 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
6473 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
6474 N.getValueType().getVectorNumElements() &&
6475 "More vector elements requested than available!");
6477 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
6478 getConstant(0, TLI->getVectorIdxTy()));
6479 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
6480 getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy()));
6481 return std::make_pair(Lo, Hi);
6484 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6485 unsigned GlobalAddressSDNode::getAddressSpace() const {
6486 return getGlobal()->getType()->getAddressSpace();
6490 Type *ConstantPoolSDNode::getType() const {
6491 if (isMachineConstantPoolEntry())
6492 return Val.MachineCPVal->getType();
6493 return Val.ConstVal->getType();
6496 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6498 unsigned &SplatBitSize,
6500 unsigned MinSplatBits,
6501 bool isBigEndian) const {
6502 EVT VT = getValueType(0);
6503 assert(VT.isVector() && "Expected a vector type");
6504 unsigned sz = VT.getSizeInBits();
6505 if (MinSplatBits > sz)
6508 SplatValue = APInt(sz, 0);
6509 SplatUndef = APInt(sz, 0);
6511 // Get the bits. Bits with undefined values (when the corresponding element
6512 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6513 // in SplatValue. If any of the values are not constant, give up and return
6515 unsigned int nOps = getNumOperands();
6516 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6517 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6519 for (unsigned j = 0; j < nOps; ++j) {
6520 unsigned i = isBigEndian ? nOps-1-j : j;
6521 SDValue OpVal = getOperand(i);
6522 unsigned BitPos = j * EltBitSize;
6524 if (OpVal.getOpcode() == ISD::UNDEF)
6525 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6526 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6527 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6528 zextOrTrunc(sz) << BitPos;
6529 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6530 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6535 // The build_vector is all constants or undefs. Find the smallest element
6536 // size that splats the vector.
6538 HasAnyUndefs = (SplatUndef != 0);
6541 unsigned HalfSize = sz / 2;
6542 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6543 APInt LowValue = SplatValue.trunc(HalfSize);
6544 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6545 APInt LowUndef = SplatUndef.trunc(HalfSize);
6547 // If the two halves do not match (ignoring undef bits), stop here.
6548 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6549 MinSplatBits > HalfSize)
6552 SplatValue = HighValue | LowValue;
6553 SplatUndef = HighUndef & LowUndef;
6562 bool BuildVectorSDNode::isConstant() const {
6563 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6564 unsigned Opc = getOperand(i).getOpcode();
6565 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
6571 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6572 // Find the first non-undef value in the shuffle mask.
6574 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6577 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6579 // Make sure all remaining elements are either undef or the same as the first
6581 for (int Idx = Mask[i]; i != e; ++i)
6582 if (Mask[i] >= 0 && Mask[i] != Idx)
6588 static void checkForCyclesHelper(const SDNode *N,
6589 SmallPtrSet<const SDNode*, 32> &Visited,
6590 SmallPtrSet<const SDNode*, 32> &Checked) {
6591 // If this node has already been checked, don't check it again.
6592 if (Checked.count(N))
6595 // If a node has already been visited on this depth-first walk, reject it as
6597 if (!Visited.insert(N)) {
6598 dbgs() << "Offending node:\n";
6600 errs() << "Detected cycle in SelectionDAG\n";
6604 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6605 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6612 void llvm::checkForCycles(const llvm::SDNode *N) {
6614 assert(N && "Checking nonexistent SDNode");
6615 SmallPtrSet<const SDNode*, 32> visited;
6616 SmallPtrSet<const SDNode*, 32> checked;
6617 checkForCyclesHelper(N, visited, checked);
6621 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6622 checkForCycles(DAG->getRoot().getNode());