X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FSelectionDAG%2FLegalizeDAG.cpp;h=94080a04e39efb70587413b50bc91271b5eb17ca;hb=6154f6c9292179fab6346ae8336f2ad790b52028;hp=51404dcb5c4c437c7d69adfab1fe383b77eb100b;hpb=cfa7ca19974a463b47f16a61bfecf7f532dca07d;p=oota-llvm.git diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 51404dcb5c4..94080a04e39 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -11,20 +11,19 @@ // //===----------------------------------------------------------------------===// -#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/Analysis/DebugInfo.h" +#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h" -#include "llvm/CodeGen/DwarfWriter.h" -#include "llvm/Analysis/DebugInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" -#include "llvm/Target/TargetFrameInfo.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetData.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" -#include "llvm/Target/TargetSubtarget.h" #include "llvm/CallingConv.h" #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" @@ -39,7 +38,6 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallPtrSet.h" -#include using namespace llvm; //===----------------------------------------------------------------------===// @@ -56,7 +54,8 @@ using namespace llvm; /// namespace { class SelectionDAGLegalize { - TargetLowering &TLI; + const TargetMachine &TM; + const TargetLowering &TLI; SelectionDAG &DAG; CodeGenOpt::Level OptLevel; @@ -67,11 +66,6 @@ class SelectionDAGLegalize { /// against each other, including inserted libcalls. SDValue LastCALLSEQ_END; - /// IsLegalizingCall - This member is used *only* for purposes of providing - /// helpful assertions that a libcall isn't created while another call is - /// being legalized (which could lead to non-serialized call sequences). - bool IsLegalizingCall; - enum LegalizeAction { Legal, // The target natively supports this operation. Promote, // This operation should be executed in a larger type. @@ -93,6 +87,9 @@ class SelectionDAGLegalize { // If someone requests legalization of the new node, return itself. if (From != To) LegalizedNodes.insert(std::make_pair(To, To)); + + // Transfer SDDbgValues. + DAG.TransferDbgValues(From, To); } public: @@ -102,8 +99,7 @@ public: /// it is already legal or we need to expand it into multiple registers of /// smaller integer type, or we need to promote it to a larger type. LegalizeAction getTypeAction(EVT VT) const { - return - (LegalizeAction)ValueTypeActions.getTypeAction(*DAG.getContext(), VT); + return (LegalizeAction)ValueTypeActions.getTypeAction(VT); } /// isTypeLegal - Return true if this type is legal on this target. @@ -136,7 +132,7 @@ private: /// whose vector element type is narrower than the original shuffle type. /// e.g. <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, - SDValue N1, SDValue N2, + SDValue N1, SDValue N2, SmallVectorImpl &Mask) const; bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, @@ -146,6 +142,8 @@ private: DebugLoc dl); SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); + std::pair ExpandChainLibCall(RTLIB::Libcall LC, + SDNode *Node, bool isSigned); SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, RTLIB::Libcall Call_PPCF128); @@ -173,8 +171,11 @@ private: SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); SDValue ExpandExtractFromVectorThroughStack(SDValue Op); + SDValue ExpandInsertToVectorThroughStack(SDValue Op); SDValue ExpandVectorBuildThroughStack(SDNode* Node); + std::pair ExpandAtomic(SDNode *Node); + void ExpandNode(SDNode *Node, SmallVectorImpl &Results); void PromoteNode(SDNode *Node, SmallVectorImpl &Results); }; @@ -184,8 +185,8 @@ private: /// performs the same shuffe in terms of order or result bytes, but on a type /// whose vector element type is narrower than the original shuffle type. /// e.g. <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> -SDValue -SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, +SDValue +SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, SDValue N1, SDValue N2, SmallVectorImpl &Mask) const { unsigned NumMaskElts = VT.getVectorNumElements(); @@ -196,12 +197,12 @@ SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, if (NumEltsGrowth == 1) return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); - + SmallVector NewMask; for (unsigned i = 0; i != NumMaskElts; ++i) { int Idx = Mask[i]; for (unsigned j = 0; j != NumEltsGrowth; ++j) { - if (Idx < 0) + if (Idx < 0) NewMask.push_back(-1); else NewMask.push_back(Idx * NumEltsGrowth + j); @@ -214,7 +215,8 @@ SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag, CodeGenOpt::Level ol) - : TLI(dag.getTargetLoweringInfo()), DAG(dag), OptLevel(ol), + : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), + DAG(dag), OptLevel(ol), ValueTypeActions(TLI.getValueTypeActions()) { assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && "Too many value types for ValueTypeActions to hold!"); @@ -222,7 +224,6 @@ SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag, void SelectionDAGLegalize::LegalizeDAG() { LastCALLSEQ_END = DAG.getEntryNode(); - IsLegalizingCall = false; // The legalize process is inherently a bottom-up recursive process (users // legalize their uses before themselves). Given infinite stack space, we @@ -249,9 +250,16 @@ void SelectionDAGLegalize::LegalizeDAG() { /// FindCallEndFromCallStart - Given a chained node that is part of a call /// sequence, find the CALLSEQ_END node that terminates the call sequence. -static SDNode *FindCallEndFromCallStart(SDNode *Node) { - if (Node->getOpcode() == ISD::CALLSEQ_END) - return Node; +static SDNode *FindCallEndFromCallStart(SDNode *Node, int depth = 0) { + // Nested CALLSEQ_START/END constructs aren't yet legal, + // but we can DTRT and handle them correctly here. + if (Node->getOpcode() == ISD::CALLSEQ_START) + depth++; + else if (Node->getOpcode() == ISD::CALLSEQ_END) { + depth--; + if (depth == 0) + return Node; + } if (Node->use_empty()) return 0; // No CallSeqEnd @@ -281,7 +289,7 @@ static SDNode *FindCallEndFromCallStart(SDNode *Node) { SDNode *User = *UI; for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) if (User->getOperand(i) == TheChain) - if (SDNode *Result = FindCallEndFromCallStart(User)) + if (SDNode *Result = FindCallEndFromCallStart(User, depth)) return Result; } return 0; @@ -290,12 +298,26 @@ static SDNode *FindCallEndFromCallStart(SDNode *Node) { /// FindCallStartFromCallEnd - Given a chained node that is part of a call /// sequence, find the CALLSEQ_START node that initiates the call sequence. static SDNode *FindCallStartFromCallEnd(SDNode *Node) { + int nested = 0; assert(Node && "Didn't find callseq_start for a call??"); - if (Node->getOpcode() == ISD::CALLSEQ_START) return Node; - - assert(Node->getOperand(0).getValueType() == MVT::Other && - "Node doesn't have a token chain argument!"); - return FindCallStartFromCallEnd(Node->getOperand(0).getNode()); + while (Node->getOpcode() != ISD::CALLSEQ_START || nested) { + Node = Node->getOperand(0).getNode(); + assert(Node->getOperand(0).getValueType() == MVT::Other && + "Node doesn't have a token chain argument!"); + switch (Node->getOpcode()) { + default: + break; + case ISD::CALLSEQ_START: + if (!nested) + return Node; + nested--; + break; + case ISD::CALLSEQ_END: + nested++; + break; + } + } + return 0; } /// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to @@ -322,7 +344,8 @@ bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, bool OperandsLeadToDest = false; for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) OperandsLeadToDest |= // If an operand leads to Dest, so do we. - LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, NodesLeadingTo); + LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, + NodesLeadingTo); if (OperandsLeadToDest) { NodesLeadingTo.insert(N); @@ -359,7 +382,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, EVT SVT = VT; while (SVT != MVT::f32) { SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); - if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) && + if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && // Only do this if the target has a native EXTLOAD instruction from // smaller type. TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && @@ -374,12 +397,12 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); unsigned Alignment = cast(CPIdx)->getAlignment(); if (Extend) - return DAG.getExtLoad(ISD::EXTLOAD, dl, - OrigVT, DAG.getEntryNode(), - CPIdx, PseudoSourceValue::getConstantPool(), - 0, VT, false, false, Alignment); + return DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, + DAG.getEntryNode(), + CPIdx, MachinePointerInfo::getConstantPool(), + VT, false, false, Alignment); return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, - PseudoSourceValue::getConstantPool(), 0, false, false, + MachinePointerInfo::getConstantPool(), false, false, Alignment); } @@ -392,7 +415,6 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, SDValue Val = ST->getValue(); EVT VT = Val.getValueType(); int Alignment = ST->getAlignment(); - int SVOffset = ST->getSrcValueOffset(); DebugLoc dl = ST->getDebugLoc(); if (ST->getMemoryVT().isFloatingPoint() || ST->getMemoryVT().isVector()) { @@ -401,16 +423,17 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, // Expand to a bitconvert of the value to the integer type of the // same size, then a (misaligned) int store. // FIXME: Does not handle truncating floating point stores! - SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, intVT, Val); - return DAG.getStore(Chain, dl, Result, Ptr, ST->getSrcValue(), - SVOffset, ST->isVolatile(), ST->isNonTemporal(), - Alignment); + SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); + return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), + ST->isVolatile(), ST->isNonTemporal(), Alignment); } else { // Do a (aligned) store to a stack slot, then copy from the stack slot // to the final destination using (unaligned) integer loads and stores. EVT StoredVT = ST->getMemoryVT(); EVT RegVT = - TLI.getRegisterType(*DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), StoredVT.getSizeInBits())); + TLI.getRegisterType(*DAG.getContext(), + EVT::getIntegerVT(*DAG.getContext(), + StoredVT.getSizeInBits())); unsigned StoredBytes = StoredVT.getSizeInBits() / 8; unsigned RegBytes = RegVT.getSizeInBits() / 8; unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; @@ -420,8 +443,8 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, // Perform the original store, only redirected to the stack slot. SDValue Store = DAG.getTruncStore(Chain, dl, - Val, StackPtr, NULL, 0, StoredVT, - false, false, 0); + Val, StackPtr, MachinePointerInfo(), + StoredVT, false, false, 0); SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); SmallVector Stores; unsigned Offset = 0; @@ -429,11 +452,12 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, // Do all but one copies using the full register width. for (unsigned i = 1; i < NumRegs; i++) { // Load one integer register's worth from the stack slot. - SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, NULL, 0, + SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, + MachinePointerInfo(), false, false, 0); // Store it to the final location. Remember the store. Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, - ST->getSrcValue(), SVOffset + Offset, + ST->getPointerInfo().getWithOffset(Offset), ST->isVolatile(), ST->isNonTemporal(), MinAlign(ST->getAlignment(), Offset))); // Increment the pointers. @@ -446,14 +470,17 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, // The last store may be partial. Do a truncating store. On big-endian // machines this requires an extending load from the stack slot to ensure // that the bits are in the right place. - EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); + EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), + 8 * (StoredBytes - Offset)); // Load from the stack slot. SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, - NULL, 0, MemVT, false, false, 0); + MachinePointerInfo(), + MemVT, false, false, 0); Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, - ST->getSrcValue(), SVOffset + Offset, + ST->getPointerInfo() + .getWithOffset(Offset), MemVT, ST->isVolatile(), ST->isNonTemporal(), MinAlign(ST->getAlignment(), Offset))); @@ -471,20 +498,21 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, int IncrementSize = NumBits / 8; // Divide the stored value in two parts. - SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); + SDValue ShiftAmount = DAG.getConstant(NumBits, + TLI.getShiftAmountTy(Val.getValueType())); SDValue Lo = Val; SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); // Store the two parts SDValue Store1, Store2; Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, - ST->getSrcValue(), SVOffset, NewStoredVT, + ST->getPointerInfo(), NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), Alignment); Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, DAG.getConstant(IncrementSize, TLI.getPointerTy())); Alignment = MinAlign(Alignment, IncrementSize); Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, - ST->getSrcValue(), SVOffset + IncrementSize, + ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), Alignment); @@ -495,7 +523,6 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, static SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, const TargetLowering &TLI) { - int SVOffset = LD->getSrcValueOffset(); SDValue Chain = LD->getChain(); SDValue Ptr = LD->getBasePtr(); EVT VT = LD->getValueType(0); @@ -506,73 +533,75 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, if (TLI.isTypeLegal(intVT)) { // Expand to a (misaligned) integer load of the same size, // then bitconvert to floating point or vector. - SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getSrcValue(), - SVOffset, LD->isVolatile(), + SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), + LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment()); - SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, LoadedVT, newLoad); + SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); if (VT.isFloatingPoint() && LoadedVT != VT) Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); SDValue Ops[] = { Result, Chain }; return DAG.getMergeValues(Ops, 2, dl); - } else { - // Copy the value to a (aligned) stack slot using (unaligned) integer - // loads and stores, then do a (aligned) load from the stack slot. - EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); - unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; - unsigned RegBytes = RegVT.getSizeInBits() / 8; - unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; - - // Make sure the stack slot is also aligned for the register type. - SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); - - SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); - SmallVector Stores; - SDValue StackPtr = StackBase; - unsigned Offset = 0; - - // Do all but one copies using the full register width. - for (unsigned i = 1; i < NumRegs; i++) { - // Load one integer register's worth from the original location. - SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, LD->getSrcValue(), - SVOffset + Offset, LD->isVolatile(), - LD->isNonTemporal(), - MinAlign(LD->getAlignment(), Offset)); - // Follow the load with a store to the stack slot. Remember the store. - Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, - NULL, 0, false, false, 0)); - // Increment the pointers. - Offset += RegBytes; - Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); - StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, - Increment); - } + } - // The last copy may be partial. Do an extending load. - EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (LoadedBytes - Offset)); - SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, - LD->getSrcValue(), SVOffset + Offset, - MemVT, LD->isVolatile(), - LD->isNonTemporal(), - MinAlign(LD->getAlignment(), Offset)); + // Copy the value to a (aligned) stack slot using (unaligned) integer + // loads and stores, then do a (aligned) load from the stack slot. + EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); + unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; + unsigned RegBytes = RegVT.getSizeInBits() / 8; + unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; + + // Make sure the stack slot is also aligned for the register type. + SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); + + SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); + SmallVector Stores; + SDValue StackPtr = StackBase; + unsigned Offset = 0; + + // Do all but one copies using the full register width. + for (unsigned i = 1; i < NumRegs; i++) { + // Load one integer register's worth from the original location. + SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, + LD->getPointerInfo().getWithOffset(Offset), + LD->isVolatile(), LD->isNonTemporal(), + MinAlign(LD->getAlignment(), Offset)); // Follow the load with a store to the stack slot. Remember the store. - // On big-endian machines this requires a truncating store to ensure - // that the bits end up in the right place. - Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, - NULL, 0, MemVT, false, false, 0)); - - // The order of the stores doesn't matter - say it with a TokenFactor. - SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], - Stores.size()); - - // Finally, perform the original load only redirected to the stack slot. - Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, - NULL, 0, LoadedVT, false, false, 0); - - // Callers expect a MERGE_VALUES node. - SDValue Ops[] = { Load, TF }; - return DAG.getMergeValues(Ops, 2, dl); + Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, + MachinePointerInfo(), false, false, 0)); + // Increment the pointers. + Offset += RegBytes; + Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); + StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, + Increment); } + + // The last copy may be partial. Do an extending load. + EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), + 8 * (LoadedBytes - Offset)); + SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, + LD->getPointerInfo().getWithOffset(Offset), + MemVT, LD->isVolatile(), + LD->isNonTemporal(), + MinAlign(LD->getAlignment(), Offset)); + // Follow the load with a store to the stack slot. Remember the store. + // On big-endian machines this requires a truncating store to ensure + // that the bits end up in the right place. + Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, + MachinePointerInfo(), MemVT, + false, false, 0)); + + // The order of the stores doesn't matter - say it with a TokenFactor. + SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], + Stores.size()); + + // Finally, perform the original load only redirected to the stack slot. + Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, + MachinePointerInfo(), LoadedVT, false, false, 0); + + // Callers expect a MERGE_VALUES node. + SDValue Ops[] = { Load, TF }; + return DAG.getMergeValues(Ops, 2, dl); } assert(LoadedVT.isInteger() && !LoadedVT.isVector() && "Unaligned load of unsupported type."); @@ -595,27 +624,30 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, // Load the value in two parts SDValue Lo, Hi; if (TLI.isLittleEndian()) { - Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(), - SVOffset, NewLoadedVT, LD->isVolatile(), + Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), + NewLoadedVT, LD->isVolatile(), LD->isNonTemporal(), Alignment); Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, DAG.getConstant(IncrementSize, TLI.getPointerTy())); - Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(), - SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(), - LD->isNonTemporal(), MinAlign(Alignment, IncrementSize)); + Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, + LD->getPointerInfo().getWithOffset(IncrementSize), + NewLoadedVT, LD->isVolatile(), + LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); } else { - Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(), - SVOffset, NewLoadedVT, LD->isVolatile(), + Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), + NewLoadedVT, LD->isVolatile(), LD->isNonTemporal(), Alignment); Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, DAG.getConstant(IncrementSize, TLI.getPointerTy())); - Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(), - SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(), - LD->isNonTemporal(), MinAlign(Alignment, IncrementSize)); + Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, + LD->getPointerInfo().getWithOffset(IncrementSize), + NewLoadedVT, LD->isVolatile(), + LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); } // aggregate the two parts - SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); + SDValue ShiftAmount = DAG.getConstant(NumBits, + TLI.getShiftAmountTy(Hi.getValueType())); SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); @@ -653,7 +685,7 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, // Store the vector. SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, - PseudoSourceValue::getFixedStack(SPFI), 0, + MachinePointerInfo::getFixedStack(SPFI), false, false, 0); // Truncate or zero extend offset to target pointer type. @@ -664,13 +696,11 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); // Store the scalar value. - Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, - PseudoSourceValue::getFixedStack(SPFI), 0, EltVT, + Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, false, false, 0); // Load the updated vector. return DAG.getLoad(VT, dl, Ch, StackPtr, - PseudoSourceValue::getFixedStack(SPFI), 0, - false, false, 0); + MachinePointerInfo::getFixedStack(SPFI), false, false, 0); } @@ -712,7 +742,6 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { SDValue Tmp1 = ST->getChain(); SDValue Tmp2 = ST->getBasePtr(); SDValue Tmp3; - int SVOffset = ST->getSrcValueOffset(); unsigned Alignment = ST->getAlignment(); bool isVolatile = ST->isVolatile(); bool isNonTemporal = ST->isNonTemporal(); @@ -723,29 +752,34 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { Tmp3 = DAG.getConstant(CFP->getValueAPF(). bitcastToAPInt().zextOrTrunc(32), MVT::i32); - return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), - SVOffset, isVolatile, isNonTemporal, Alignment); - } else if (CFP->getValueType(0) == MVT::f64) { + return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), + isVolatile, isNonTemporal, Alignment); + } + + if (CFP->getValueType(0) == MVT::f64) { // If this target supports 64-bit registers, do a single 64-bit store. if (getTypeAction(MVT::i64) == Legal) { Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). zextOrTrunc(64), MVT::i64); - return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), - SVOffset, isVolatile, isNonTemporal, Alignment); - } else if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) { + return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), + isVolatile, isNonTemporal, Alignment); + } + + if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) { // Otherwise, if the target supports 32-bit registers, use 2 32-bit // stores. If the target supports neither 32- nor 64-bits, this // xform is certainly not worth it. const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); - SDValue Lo = DAG.getConstant(APInt(IntVal).trunc(32), MVT::i32); + SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); if (TLI.isBigEndian()) std::swap(Lo, Hi); - Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getSrcValue(), - SVOffset, isVolatile, isNonTemporal, Alignment); + Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile, + isNonTemporal, Alignment); Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, DAG.getIntPtrConstant(4)); - Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), SVOffset+4, + Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, + ST->getPointerInfo().getWithOffset(4), isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); @@ -771,7 +805,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { "Unexpected illegal type!"); for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) - assert((isTypeLegal(Node->getOperand(i).getValueType()) || + assert((isTypeLegal(Node->getOperand(i).getValueType()) || Node->getOperand(i).getOpcode() == ISD::TargetConstant) && "Unexpected illegal type!"); @@ -785,7 +819,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { bool isCustom = false; // Figure out the correct action; the way to query this varies by opcode - TargetLowering::LegalizeAction Action; + TargetLowering::LegalizeAction Action = TargetLowering::Legal; bool SimpleFinishLegalizing = true; switch (Node->getOpcode()) { case ISD::INTRINSIC_W_CHAIN: @@ -851,6 +885,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { case ISD::MERGE_VALUES: case ISD::EH_RETURN: case ISD::FRAME_TO_ARGS_OFFSET: + case ISD::EH_SJLJ_SETJMP: + case ISD::EH_SJLJ_LONGJMP: + case ISD::EH_SJLJ_DISPATCHSETUP: // These operations lie about being legal: when they claim to be legal, // they should actually be expanded. Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); @@ -911,7 +948,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { // Legalizing shifts/rotates requires adjusting the shift amount // to the appropriate width. if (!Ops[1].getValueType().isVector()) - Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[1])); + Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), + Ops[1])); break; case ISD::SRL_PARTS: case ISD::SRA_PARTS: @@ -919,12 +957,13 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { // Legalizing shifts/rotates requires adjusting the shift amount // to the appropriate width. if (!Ops[2].getValueType().isVector()) - Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[2])); + Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), + Ops[2])); break; } - Result = DAG.UpdateNodeOperands(Result.getValue(0), Ops.data(), - Ops.size()); + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(), + Ops.size()), 0); switch (Action) { case TargetLowering::Legal: for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) @@ -969,11 +1008,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { Node->dump( &DAG); dbgs() << "\n"; #endif - llvm_unreachable("Do not know how to legalize this operator!"); + assert(0 && "Do not know how to legalize this operator!"); case ISD::BUILD_VECTOR: switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) { - default: llvm_unreachable("This action is not supported yet!"); + default: assert(0 && "This action is not supported yet!"); case TargetLowering::Custom: Tmp3 = TLI.LowerOperation(Result, DAG); if (Tmp3.getNode()) { @@ -987,6 +1026,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { } break; case ISD::CALLSEQ_START: { + static int depth = 0; SDNode *CallEnd = FindCallEndFromCallStart(Node); // Recursively Legalize all of the inputs of the call end that do not lead @@ -998,13 +1038,13 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { NodesLeadingTo); } - // Now that we legalized all of the inputs (which may have inserted - // libcalls) create the new CALLSEQ_START node. + // Now that we have legalized all of the inputs (which may have inserted + // libcalls), create the new CALLSEQ_START node. Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. - // Merge in the last call, to ensure that this call start after the last + // Merge in the last call to ensure that this call starts after the last // call ended. - if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) { + if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken && depth == 0) { Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Tmp1, LastCALLSEQ_END); Tmp1 = LegalizeOp(Tmp1); @@ -1014,7 +1054,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { if (Tmp1 != Node->getOperand(0)) { SmallVector Ops(Node->op_begin(), Node->op_end()); Ops[0] = Tmp1; - Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0], + Ops.size()), Result.getResNo()); } // Remember that the CALLSEQ_START is legalized. @@ -1026,14 +1067,18 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { // sequence have been legalized, legalize the call itself. During this // process, no libcalls can/will be inserted, guaranteeing that no calls // can overlap. - assert(!IsLegalizingCall && "Inconsistent sequentialization of calls!"); + + SDValue Saved_LastCALLSEQ_END = LastCALLSEQ_END ; // Note that we are selecting this call! LastCALLSEQ_END = SDValue(CallEnd, 0); - IsLegalizingCall = true; + depth++; // Legalize the call, starting from the CALLSEQ_END. LegalizeOp(LastCALLSEQ_END); - assert(!IsLegalizingCall && "CALLSEQ_END should have cleared this!"); + depth--; + assert(depth >= 0 && "Un-matched CALLSEQ_START?"); + if (depth > 0) + LastCALLSEQ_END = Saved_LastCALLSEQ_END; return Result; } case ISD::CALLSEQ_END: @@ -1052,11 +1097,13 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. // Do not try to legalize the target-specific arguments (#1+), except for // an optional flag input. - if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Flag){ + if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Glue){ if (Tmp1 != Node->getOperand(0)) { SmallVector Ops(Node->op_begin(), Node->op_end()); Ops[0] = Tmp1; - Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), + &Ops[0], Ops.size()), + Result.getResNo()); } } else { Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1)); @@ -1065,13 +1112,12 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { SmallVector Ops(Node->op_begin(), Node->op_end()); Ops[0] = Tmp1; Ops.back() = Tmp2; - Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), + &Ops[0], Ops.size()), + Result.getResNo()); } } - assert(IsLegalizingCall && "Call sequence imbalance between start/end?"); // This finishes up call legalization. - IsLegalizingCall = false; - // If the CALLSEQ_END node has a flag, remember that we legalized it. AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); if (Node->getNumValues() == 2) @@ -1085,12 +1131,14 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { ISD::LoadExtType ExtType = LD->getExtensionType(); if (ExtType == ISD::NON_EXTLOAD) { EVT VT = Node->getValueType(0); - Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset()); + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), + Tmp1, Tmp2, LD->getOffset()), + Result.getResNo()); Tmp3 = Result.getValue(0); Tmp4 = Result.getValue(1); switch (TLI.getOperationAction(Node->getOpcode(), VT)) { - default: llvm_unreachable("This action is not supported yet!"); + default: assert(0 && "This action is not supported yet!"); case TargetLowering::Legal: // If this is an unaligned load and the target doesn't support it, // expand it. @@ -1098,7 +1146,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); if (LD->getAlignment() < ABIAlignment){ - Result = ExpandUnalignedLoad(cast(Result.getNode()), + Result = ExpandUnalignedLoad(cast(Result.getNode()), DAG, TLI); Tmp3 = Result.getOperand(0); Tmp4 = Result.getOperand(1); @@ -1120,11 +1168,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { // Change base type to a different vector type. EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); - Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getSrcValue(), - LD->getSrcValueOffset(), + Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment()); - Tmp3 = LegalizeOp(DAG.getNode(ISD::BIT_CONVERT, dl, VT, Tmp1)); + Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1)); Tmp4 = LegalizeOp(Tmp1.getValue(1)); break; } @@ -1134,213 +1181,226 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { AddLegalizedOperand(SDValue(Node, 0), Tmp3); AddLegalizedOperand(SDValue(Node, 1), Tmp4); return Op.getResNo() ? Tmp4 : Tmp3; - } else { - EVT SrcVT = LD->getMemoryVT(); - unsigned SrcWidth = SrcVT.getSizeInBits(); - int SVOffset = LD->getSrcValueOffset(); - unsigned Alignment = LD->getAlignment(); - bool isVolatile = LD->isVolatile(); - bool isNonTemporal = LD->isNonTemporal(); - - if (SrcWidth != SrcVT.getStoreSizeInBits() && - // Some targets pretend to have an i1 loading operation, and actually - // load an i8. This trick is correct for ZEXTLOAD because the top 7 - // bits are guaranteed to be zero; it helps the optimizers understand - // that these bits are zero. It is also useful for EXTLOAD, since it - // tells the optimizers that those bits are undefined. It would be - // nice to have an effective generic way of getting these benefits... - // Until such a way is found, don't insist on promoting i1 here. - (SrcVT != MVT::i1 || - TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { - // Promote to a byte-sized load if not loading an integral number of - // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. - unsigned NewWidth = SrcVT.getStoreSizeInBits(); - EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); - SDValue Ch; - - // The extra bits are guaranteed to be zero, since we stored them that - // way. A zext load from NVT thus automatically gives zext from SrcVT. - - ISD::LoadExtType NewExtType = - ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; - - Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), - Tmp1, Tmp2, LD->getSrcValue(), SVOffset, - NVT, isVolatile, isNonTemporal, Alignment); - - Ch = Result.getValue(1); // The chain. + } - if (ExtType == ISD::SEXTLOAD) - // Having the top bits zero doesn't help when sign extending. - Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, - Result.getValueType(), - Result, DAG.getValueType(SrcVT)); - else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) - // All the top bits are guaranteed to be zero - inform the optimizers. - Result = DAG.getNode(ISD::AssertZext, dl, - Result.getValueType(), Result, - DAG.getValueType(SrcVT)); - - Tmp1 = LegalizeOp(Result); - Tmp2 = LegalizeOp(Ch); - } else if (SrcWidth & (SrcWidth - 1)) { - // If not loading a power-of-2 number of bits, expand as two loads. - assert(!SrcVT.isVector() && "Unsupported extload!"); - unsigned RoundWidth = 1 << Log2_32(SrcWidth); - assert(RoundWidth < SrcWidth); - unsigned ExtraWidth = SrcWidth - RoundWidth; - assert(ExtraWidth < RoundWidth); - assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && - "Load size not an integral number of bytes!"); - EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); - EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); - SDValue Lo, Hi, Ch; - unsigned IncrementSize; + EVT SrcVT = LD->getMemoryVT(); + unsigned SrcWidth = SrcVT.getSizeInBits(); + unsigned Alignment = LD->getAlignment(); + bool isVolatile = LD->isVolatile(); + bool isNonTemporal = LD->isNonTemporal(); + + if (SrcWidth != SrcVT.getStoreSizeInBits() && + // Some targets pretend to have an i1 loading operation, and actually + // load an i8. This trick is correct for ZEXTLOAD because the top 7 + // bits are guaranteed to be zero; it helps the optimizers understand + // that these bits are zero. It is also useful for EXTLOAD, since it + // tells the optimizers that those bits are undefined. It would be + // nice to have an effective generic way of getting these benefits... + // Until such a way is found, don't insist on promoting i1 here. + (SrcVT != MVT::i1 || + TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { + // Promote to a byte-sized load if not loading an integral number of + // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. + unsigned NewWidth = SrcVT.getStoreSizeInBits(); + EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); + SDValue Ch; + + // The extra bits are guaranteed to be zero, since we stored them that + // way. A zext load from NVT thus automatically gives zext from SrcVT. + + ISD::LoadExtType NewExtType = + ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; + + Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), + Tmp1, Tmp2, LD->getPointerInfo(), + NVT, isVolatile, isNonTemporal, Alignment); + + Ch = Result.getValue(1); // The chain. + + if (ExtType == ISD::SEXTLOAD) + // Having the top bits zero doesn't help when sign extending. + Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, + Result.getValueType(), + Result, DAG.getValueType(SrcVT)); + else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) + // All the top bits are guaranteed to be zero - inform the optimizers. + Result = DAG.getNode(ISD::AssertZext, dl, + Result.getValueType(), Result, + DAG.getValueType(SrcVT)); + + Tmp1 = LegalizeOp(Result); + Tmp2 = LegalizeOp(Ch); + } else if (SrcWidth & (SrcWidth - 1)) { + // If not loading a power-of-2 number of bits, expand as two loads. + assert(!SrcVT.isVector() && "Unsupported extload!"); + unsigned RoundWidth = 1 << Log2_32(SrcWidth); + assert(RoundWidth < SrcWidth); + unsigned ExtraWidth = SrcWidth - RoundWidth; + assert(ExtraWidth < RoundWidth); + assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && + "Load size not an integral number of bytes!"); + EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); + EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); + SDValue Lo, Hi, Ch; + unsigned IncrementSize; + + if (TLI.isLittleEndian()) { + // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) + // Load the bottom RoundWidth bits. + Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), + Tmp1, Tmp2, + LD->getPointerInfo(), RoundVT, isVolatile, + isNonTemporal, Alignment); + + // Load the remaining ExtraWidth bits. + IncrementSize = RoundWidth / 8; + Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, + DAG.getIntPtrConstant(IncrementSize)); + Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, + LD->getPointerInfo().getWithOffset(IncrementSize), + ExtraVT, isVolatile, isNonTemporal, + MinAlign(Alignment, IncrementSize)); + + // Build a factor node to remember that this load is independent of + // the other one. + Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), + Hi.getValue(1)); + + // Move the top bits to the right place. + Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, + DAG.getConstant(RoundWidth, + TLI.getShiftAmountTy(Hi.getValueType()))); + + // Join the hi and lo parts. + Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); + } else { + // Big endian - avoid unaligned loads. + // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 + // Load the top RoundWidth bits. + Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, + LD->getPointerInfo(), RoundVT, isVolatile, + isNonTemporal, Alignment); + + // Load the remaining ExtraWidth bits. + IncrementSize = RoundWidth / 8; + Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, + DAG.getIntPtrConstant(IncrementSize)); + Lo = DAG.getExtLoad(ISD::ZEXTLOAD, + dl, Node->getValueType(0), Tmp1, Tmp2, + LD->getPointerInfo().getWithOffset(IncrementSize), + ExtraVT, isVolatile, isNonTemporal, + MinAlign(Alignment, IncrementSize)); + + // Build a factor node to remember that this load is independent of + // the other one. + Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), + Hi.getValue(1)); + + // Move the top bits to the right place. + Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, + DAG.getConstant(ExtraWidth, + TLI.getShiftAmountTy(Hi.getValueType()))); + + // Join the hi and lo parts. + Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); + } - if (TLI.isLittleEndian()) { - // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) - // Load the bottom RoundWidth bits. - Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, - Node->getValueType(0), Tmp1, Tmp2, - LD->getSrcValue(), SVOffset, RoundVT, isVolatile, - isNonTemporal, Alignment); - - // Load the remaining ExtraWidth bits. - IncrementSize = RoundWidth / 8; - Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, - DAG.getIntPtrConstant(IncrementSize)); - Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, - LD->getSrcValue(), SVOffset + IncrementSize, - ExtraVT, isVolatile, isNonTemporal, - MinAlign(Alignment, IncrementSize)); - - // Build a factor node to remember that this load is independent of the - // other one. - Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), - Hi.getValue(1)); - - // Move the top bits to the right place. - Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, - DAG.getConstant(RoundWidth, TLI.getShiftAmountTy())); - - // Join the hi and lo parts. - Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); + Tmp1 = LegalizeOp(Result); + Tmp2 = LegalizeOp(Ch); + } else { + switch (TLI.getLoadExtAction(ExtType, SrcVT)) { + default: assert(0 && "This action is not supported yet!"); + case TargetLowering::Custom: + isCustom = true; + // FALLTHROUGH + case TargetLowering::Legal: + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), + Tmp1, Tmp2, LD->getOffset()), + Result.getResNo()); + Tmp1 = Result.getValue(0); + Tmp2 = Result.getValue(1); + + if (isCustom) { + Tmp3 = TLI.LowerOperation(Result, DAG); + if (Tmp3.getNode()) { + Tmp1 = LegalizeOp(Tmp3); + Tmp2 = LegalizeOp(Tmp3.getValue(1)); + } } else { - // Big endian - avoid unaligned loads. - // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 - // Load the top RoundWidth bits. - Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, - LD->getSrcValue(), SVOffset, RoundVT, isVolatile, - isNonTemporal, Alignment); - - // Load the remaining ExtraWidth bits. - IncrementSize = RoundWidth / 8; - Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, - DAG.getIntPtrConstant(IncrementSize)); - Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, - Node->getValueType(0), Tmp1, Tmp2, - LD->getSrcValue(), SVOffset + IncrementSize, - ExtraVT, isVolatile, isNonTemporal, - MinAlign(Alignment, IncrementSize)); - - // Build a factor node to remember that this load is independent of the - // other one. - Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), - Hi.getValue(1)); - - // Move the top bits to the right place. - Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, - DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy())); - - // Join the hi and lo parts. - Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); - } - - Tmp1 = LegalizeOp(Result); - Tmp2 = LegalizeOp(Ch); - } else { - switch (TLI.getLoadExtAction(ExtType, SrcVT)) { - default: llvm_unreachable("This action is not supported yet!"); - case TargetLowering::Custom: - isCustom = true; - // FALLTHROUGH - case TargetLowering::Legal: - Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset()); - Tmp1 = Result.getValue(0); - Tmp2 = Result.getValue(1); - - if (isCustom) { - Tmp3 = TLI.LowerOperation(Result, DAG); - if (Tmp3.getNode()) { - Tmp1 = LegalizeOp(Tmp3); - Tmp2 = LegalizeOp(Tmp3.getValue(1)); - } - } else { - // If this is an unaligned load and the target doesn't support it, - // expand it. - if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { - const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); - if (LD->getAlignment() < ABIAlignment){ - Result = ExpandUnalignedLoad(cast(Result.getNode()), - DAG, TLI); - Tmp1 = Result.getOperand(0); - Tmp2 = Result.getOperand(1); - Tmp1 = LegalizeOp(Tmp1); - Tmp2 = LegalizeOp(Tmp2); - } + // If this is an unaligned load and the target doesn't support it, + // expand it. + if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { + const Type *Ty = + LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); + unsigned ABIAlignment = + TLI.getTargetData()->getABITypeAlignment(Ty); + if (LD->getAlignment() < ABIAlignment){ + Result = ExpandUnalignedLoad(cast(Result.getNode()), + DAG, TLI); + Tmp1 = Result.getOperand(0); + Tmp2 = Result.getOperand(1); + Tmp1 = LegalizeOp(Tmp1); + Tmp2 = LegalizeOp(Tmp2); } } - break; - case TargetLowering::Expand: - // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND - // f128 = EXTLOAD {f32,f64} too - if ((SrcVT == MVT::f32 && (Node->getValueType(0) == MVT::f64 || - Node->getValueType(0) == MVT::f128)) || - (SrcVT == MVT::f64 && Node->getValueType(0) == MVT::f128)) { - SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, LD->getSrcValue(), - LD->getSrcValueOffset(), - LD->isVolatile(), LD->isNonTemporal(), - LD->getAlignment()); - Result = DAG.getNode(ISD::FP_EXTEND, dl, - Node->getValueType(0), Load); - Tmp1 = LegalizeOp(Result); // Relegalize new nodes. - Tmp2 = LegalizeOp(Load.getValue(1)); + } + break; + case TargetLowering::Expand: + if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && isTypeLegal(SrcVT)) { + SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, + LD->getPointerInfo(), + LD->isVolatile(), LD->isNonTemporal(), + LD->getAlignment()); + unsigned ExtendOp; + switch (ExtType) { + case ISD::EXTLOAD: + ExtendOp = (SrcVT.isFloatingPoint() ? + ISD::FP_EXTEND : ISD::ANY_EXTEND); break; + case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; + case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; + default: llvm_unreachable("Unexpected extend load type!"); } - assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!"); - // Turn the unsupported load into an EXTLOAD followed by an explicit - // zero/sign extend inreg. - Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), - Tmp1, Tmp2, LD->getSrcValue(), - LD->getSrcValueOffset(), SrcVT, - LD->isVolatile(), LD->isNonTemporal(), - LD->getAlignment()); - SDValue ValRes; - if (ExtType == ISD::SEXTLOAD) - ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, - Result.getValueType(), - Result, DAG.getValueType(SrcVT)); - else - ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT); - Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes. - Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes. + Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); + Tmp1 = LegalizeOp(Result); // Relegalize new nodes. + Tmp2 = LegalizeOp(Load.getValue(1)); break; } + // FIXME: This does not work for vectors on most targets. Sign- and + // zero-extend operations are currently folded into extending loads, + // whether they are legal or not, and then we end up here without any + // support for legalizing them. + assert(ExtType != ISD::EXTLOAD && + "EXTLOAD should always be supported!"); + // Turn the unsupported load into an EXTLOAD followed by an explicit + // zero/sign extend inreg. + Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), + Tmp1, Tmp2, LD->getPointerInfo(), SrcVT, + LD->isVolatile(), LD->isNonTemporal(), + LD->getAlignment()); + SDValue ValRes; + if (ExtType == ISD::SEXTLOAD) + ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, + Result.getValueType(), + Result, DAG.getValueType(SrcVT)); + else + ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); + Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes. + Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes. + break; } - - // Since loads produce two values, make sure to remember that we legalized - // both of them. - AddLegalizedOperand(SDValue(Node, 0), Tmp1); - AddLegalizedOperand(SDValue(Node, 1), Tmp2); - return Op.getResNo() ? Tmp2 : Tmp1; } + + // Since loads produce two values, make sure to remember that we legalized + // both of them. + AddLegalizedOperand(SDValue(Node, 0), Tmp1); + AddLegalizedOperand(SDValue(Node, 1), Tmp2); + return Op.getResNo() ? Tmp2 : Tmp1; } case ISD::STORE: { StoreSDNode *ST = cast(Node); Tmp1 = LegalizeOp(ST->getChain()); // Legalize the chain. Tmp2 = LegalizeOp(ST->getBasePtr()); // Legalize the pointer. - int SVOffset = ST->getSrcValueOffset(); unsigned Alignment = ST->getAlignment(); bool isVolatile = ST->isVolatile(); bool isNonTemporal = ST->isNonTemporal(); @@ -1353,18 +1413,20 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { { Tmp3 = LegalizeOp(ST->getValue()); - Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2, - ST->getOffset()); + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), + Tmp1, Tmp3, Tmp2, + ST->getOffset()), + Result.getResNo()); EVT VT = Tmp3.getValueType(); switch (TLI.getOperationAction(ISD::STORE, VT)) { - default: llvm_unreachable("This action is not supported yet!"); + default: assert(0 && "This action is not supported yet!"); case TargetLowering::Legal: // If this is an unaligned store and the target doesn't support it, // expand it. if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); + unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); if (ST->getAlignment() < ABIAlignment) Result = ExpandUnalignedStore(cast(Result.getNode()), DAG, TLI); @@ -1376,10 +1438,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { break; case TargetLowering::Promote: assert(VT.isVector() && "Unknown legal promote case!"); - Tmp3 = DAG.getNode(ISD::BIT_CONVERT, dl, + Tmp3 = DAG.getNode(ISD::BITCAST, dl, TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, - ST->getSrcValue(), SVOffset, isVolatile, + ST->getPointerInfo(), isVolatile, isNonTemporal, Alignment); break; } @@ -1395,11 +1457,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { // Promote to a byte-sized store with upper bits zero if not // storing an integral number of bytes. For example, promote // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) - EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StVT.getStoreSizeInBits()); + EVT NVT = EVT::getIntegerVT(*DAG.getContext(), + StVT.getStoreSizeInBits()); Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); - Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), - SVOffset, NVT, isVolatile, isNonTemporal, - Alignment); + Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), + NVT, isVolatile, isNonTemporal, Alignment); } else if (StWidth & (StWidth - 1)) { // If not storing a power-of-2 number of bits, expand as two stores. assert(!StVT.isVector() && "Unsupported truncstore!"); @@ -1417,8 +1479,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { if (TLI.isLittleEndian()) { // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) // Store the bottom RoundWidth bits. - Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), - SVOffset, RoundVT, + Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), + RoundVT, isVolatile, isNonTemporal, Alignment); // Store the remaining ExtraWidth bits. @@ -1426,28 +1488,29 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, DAG.getIntPtrConstant(IncrementSize)); Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, - DAG.getConstant(RoundWidth, TLI.getShiftAmountTy())); - Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), - SVOffset + IncrementSize, ExtraVT, isVolatile, - isNonTemporal, + DAG.getConstant(RoundWidth, + TLI.getShiftAmountTy(Tmp3.getValueType()))); + Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, + ST->getPointerInfo().getWithOffset(IncrementSize), + ExtraVT, isVolatile, isNonTemporal, MinAlign(Alignment, IncrementSize)); } else { // Big endian - avoid unaligned stores. // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X // Store the top RoundWidth bits. Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, - DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy())); - Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), - SVOffset, RoundVT, isVolatile, isNonTemporal, - Alignment); + DAG.getConstant(ExtraWidth, + TLI.getShiftAmountTy(Tmp3.getValueType()))); + Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(), + RoundVT, isVolatile, isNonTemporal, Alignment); // Store the remaining ExtraWidth bits. IncrementSize = RoundWidth / 8; Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, DAG.getIntPtrConstant(IncrementSize)); - Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), - SVOffset + IncrementSize, ExtraVT, isVolatile, - isNonTemporal, + Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, + ST->getPointerInfo().getWithOffset(IncrementSize), + ExtraVT, isVolatile, isNonTemporal, MinAlign(Alignment, IncrementSize)); } @@ -1456,17 +1519,19 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { } else { if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() || Tmp2 != ST->getBasePtr()) - Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2, - ST->getOffset()); + Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), + Tmp1, Tmp3, Tmp2, + ST->getOffset()), + Result.getResNo()); switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { - default: llvm_unreachable("This action is not supported yet!"); + default: assert(0 && "This action is not supported yet!"); case TargetLowering::Legal: // If this is an unaligned store and the target doesn't support it, // expand it. if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); + unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); if (ST->getAlignment() < ABIAlignment) Result = ExpandUnalignedStore(cast(Result.getNode()), DAG, TLI); @@ -1479,9 +1544,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { // TRUNCSTORE:i16 i32 -> STORE i16 assert(isTypeLegal(StVT) && "Do not know how to expand this store!"); Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); - Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), - SVOffset, isVolatile, isNonTemporal, - Alignment); + Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), + isVolatile, isNonTemporal, Alignment); break; } } @@ -1508,8 +1572,8 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { DebugLoc dl = Op.getDebugLoc(); // Store the value to a temporary stack slot, then LOAD the returned part. SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); - SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, NULL, 0, - false, false, 0); + SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, + MachinePointerInfo(), false, false, 0); // Add the offset to the index. unsigned EltSize = @@ -1525,12 +1589,56 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); if (Op.getValueType().isVector()) - return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, NULL, 0, + return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), false, false, 0); + return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, + MachinePointerInfo(), + Vec.getValueType().getVectorElementType(), + false, false, 0); +} + +SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { + assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); + + SDValue Vec = Op.getOperand(0); + SDValue Part = Op.getOperand(1); + SDValue Idx = Op.getOperand(2); + DebugLoc dl = Op.getDebugLoc(); + + // Store the value to a temporary stack slot, then LOAD the returned part. + + SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); + int FI = cast(StackPtr.getNode())->getIndex(); + MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); + + // First store the whole vector. + SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, + false, false, 0); + + // Then store the inserted part. + + // Add the offset to the index. + unsigned EltSize = + Vec.getValueType().getVectorElementType().getSizeInBits()/8; + + Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, + DAG.getConstant(EltSize, Idx.getValueType())); + + if (Idx.getValueType().bitsGT(TLI.getPointerTy())) + Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); else - return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, - NULL, 0, Vec.getValueType().getVectorElementType(), - false, false, 0); + Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); + + SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, + StackPtr); + + // Store the subvector. + Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, + MachinePointerInfo(), false, false, 0); + + // Finally, load the updated vector. + return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, + false, false, 0); } SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { @@ -1543,7 +1651,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { DebugLoc dl = Node->getDebugLoc(); SDValue FIPtr = DAG.CreateStackTemporary(VT); int FI = cast(FIPtr.getNode())->getIndex(); - const Value *SV = PseudoSourceValue::getFixedStack(FI); + MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); // Emit a store of each element to the stack slot. SmallVector Stores; @@ -1562,11 +1670,13 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { // element type, only store the bits necessary. if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, - Node->getOperand(i), Idx, SV, Offset, + Node->getOperand(i), Idx, + PtrInfo.getWithOffset(Offset), EltVT, false, false, 0)); } else - Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, - Node->getOperand(i), Idx, SV, Offset, + Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, + Node->getOperand(i), Idx, + PtrInfo.getWithOffset(Offset), false, false, 0)); } @@ -1578,42 +1688,61 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { StoreChain = DAG.getEntryNode(); // Result is a load from the stack slot. - return DAG.getLoad(VT, dl, StoreChain, FIPtr, SV, 0, false, false, 0); + return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, false, false, 0); } SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { DebugLoc dl = Node->getDebugLoc(); SDValue Tmp1 = Node->getOperand(0); SDValue Tmp2 = Node->getOperand(1); - assert((Tmp2.getValueType() == MVT::f32 || - Tmp2.getValueType() == MVT::f64) && - "Ugly special-cased code!"); - // Get the sign bit of the RHS. + + // Get the sign bit of the RHS. First obtain a value that has the same + // sign as the sign bit, i.e. negative if and only if the sign bit is 1. SDValue SignBit; - EVT IVT = Tmp2.getValueType() == MVT::f64 ? MVT::i64 : MVT::i32; + EVT FloatVT = Tmp2.getValueType(); + EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); if (isTypeLegal(IVT)) { - SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2); + // Convert to an integer with the same sign bit. + SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); } else { - assert(isTypeLegal(TLI.getPointerTy()) && - (TLI.getPointerTy() == MVT::i32 || - TLI.getPointerTy() == MVT::i64) && - "Legal type for load?!"); - SDValue StackPtr = DAG.CreateStackTemporary(Tmp2.getValueType()); - SDValue StorePtr = StackPtr, LoadPtr = StackPtr; + // Store the float to memory, then load the sign part out as an integer. + MVT LoadTy = TLI.getPointerTy(); + // First create a temporary that is aligned for both the load and store. + SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); + // Then store the float to it. SDValue Ch = - DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StorePtr, NULL, 0, + DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), false, false, 0); - if (Tmp2.getValueType() == MVT::f64 && TLI.isLittleEndian()) - LoadPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), - LoadPtr, DAG.getIntPtrConstant(4)); - SignBit = DAG.getExtLoad(ISD::SEXTLOAD, dl, TLI.getPointerTy(), - Ch, LoadPtr, NULL, 0, MVT::i32, - false, false, 0); - } - SignBit = - DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), - SignBit, DAG.getConstant(0, SignBit.getValueType()), - ISD::SETLT); + if (TLI.isBigEndian()) { + assert(FloatVT.isByteSized() && "Unsupported floating point type!"); + // Load out a legal integer with the same sign bit as the float. + SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), + false, false, 0); + } else { // Little endian + SDValue LoadPtr = StackPtr; + // The float may be wider than the integer we are going to load. Advance + // the pointer so that the loaded integer will contain the sign bit. + unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); + unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; + LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), + LoadPtr, DAG.getIntPtrConstant(ByteOffset)); + // Load a legal integer containing the sign bit. + SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), + false, false, 0); + // Move the sign bit to the top bit of the loaded integer. + unsigned BitShift = LoadTy.getSizeInBits() - + (FloatVT.getSizeInBits() - 8 * ByteOffset); + assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); + if (BitShift) + SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, + DAG.getConstant(BitShift, + TLI.getShiftAmountTy(SignBit.getValueType()))); + } + } + // Now get the sign bit proper, by seeing whether the value is negative. + SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), + SignBit, DAG.getConstant(0, SignBit.getValueType()), + ISD::SETLT); // Get the absolute value of the result. SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); // Select between the nabs and abs value based on the sign bit of @@ -1643,8 +1772,7 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); Chain = SP.getValue(1); unsigned Align = cast(Tmp3)->getZExtValue(); - unsigned StackAlign = - TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); + unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); if (Align > StackAlign) SP = DAG.getNode(ISD::AND, dl, VT, SP, DAG.getConstant(-(uint64_t)Align, VT)); @@ -1668,7 +1796,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, EVT OpVT = LHS.getValueType(); ISD::CondCode CCCode = cast(CC)->get(); switch (TLI.getCondCodeAction(CCCode, OpVT)) { - default: llvm_unreachable("Unknown condition code action!"); + default: assert(0 && "Unknown condition code action!"); case TargetLowering::Legal: // Nothing to do. break; @@ -1676,7 +1804,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; unsigned Opc = 0; switch (CCCode) { - default: llvm_unreachable("Don't know how to expand this condition!"); + default: assert(0 && "Don't know how to expand this condition!"); case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; @@ -1718,13 +1846,13 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, FrameIndexSDNode *StackPtrFI = cast(FIPtr); int SPFI = StackPtrFI->getIndex(); - const Value *SV = PseudoSourceValue::getFixedStack(SPFI); + MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); unsigned SlotSize = SlotVT.getSizeInBits(); unsigned DestSize = DestVT.getSizeInBits(); - unsigned DestAlign = - TLI.getTargetData()->getPrefTypeAlignment(DestVT.getTypeForEVT(*DAG.getContext())); + const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); + unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); // Emit a store to the stack slot. Use a truncstore if the input value is // later than DestVT. @@ -1732,21 +1860,21 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, if (SrcSize > SlotSize) Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, - SV, 0, SlotVT, false, false, SrcAlign); + PtrInfo, SlotVT, false, false, SrcAlign); else { assert(SrcSize == SlotSize && "Invalid store"); Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, - SV, 0, false, false, SrcAlign); + PtrInfo, false, false, SrcAlign); } // Result is a load from the stack slot. if (SlotSize == DestSize) - return DAG.getLoad(DestVT, dl, Store, FIPtr, SV, 0, false, false, - DestAlign); + return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, + false, false, DestAlign); assert(SlotSize < DestSize && "Unknown extension!"); - return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, SV, 0, SlotVT, - false, false, DestAlign); + return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, + PtrInfo, SlotVT, false, false, DestAlign); } SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { @@ -1760,11 +1888,11 @@ SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), StackPtr, - PseudoSourceValue::getFixedStack(SPFI), 0, + MachinePointerInfo::getFixedStack(SPFI), Node->getValueType(0).getVectorElementType(), false, false, 0); return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, - PseudoSourceValue::getFixedStack(SPFI), 0, + MachinePointerInfo::getFixedStack(SPFI), false, false, 0); } @@ -1838,7 +1966,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); unsigned Alignment = cast(CPIdx)->getAlignment(); return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, - PseudoSourceValue::getConstantPool(), 0, + MachinePointerInfo::getConstantPool(), false, false, Alignment); } @@ -1874,7 +2002,6 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { // and leave the Hi part unset. SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned) { - assert(!IsLegalizingCall && "Cannot overlap legalization of calls!"); // The input chain to this libcall is the entry node of the function. // Legalizing the call will automatically add the previous call to the // dependence. @@ -1895,12 +2022,20 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, // Splice the libcall in wherever FindInputOutputChains tells us to. const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); + + // isTailCall may be true since the callee does not reference caller stack + // frame. Check if it's in the right position. + bool isTailCall = isInTailCallPosition(DAG, Node, TLI); std::pair CallInfo = TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, - 0, TLI.getLibcallCallingConv(LC), false, + 0, TLI.getLibcallCallingConv(LC), isTailCall, /*isReturnValueUsed=*/true, Callee, Args, DAG, Node->getDebugLoc()); + if (!CallInfo.second.getNode()) + // It's a tailcall, return the chain (which is the DAG root). + return DAG.getRoot(); + // Legalize the call sequence, starting with the chain. This will advance // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that // was added by LowerCallTo (guaranteeing proper serialization of calls). @@ -1908,6 +2043,43 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, return CallInfo.first; } +// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to +// ExpandLibCall except that the first operand is the in-chain. +std::pair +SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, + SDNode *Node, + bool isSigned) { + SDValue InChain = Node->getOperand(0); + + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { + EVT ArgVT = Node->getOperand(i).getValueType(); + const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); + Entry.Node = Node->getOperand(i); + Entry.Ty = ArgTy; + Entry.isSExt = isSigned; + Entry.isZExt = !isSigned; + Args.push_back(Entry); + } + SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), + TLI.getPointerTy()); + + // Splice the libcall in wherever FindInputOutputChains tells us to. + const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); + std::pair CallInfo = + TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, + 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, + /*isReturnValueUsed=*/true, + Callee, Args, DAG, Node->getDebugLoc()); + + // Legalize the call sequence, starting with the chain. This will advance + // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that + // was added by LowerCallTo (guaranteeing proper serialization of calls). + LegalizeOp(CallInfo.second); + return CallInfo; +} + SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, @@ -1915,7 +2087,7 @@ SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, RTLIB::Libcall Call_PPCF128) { RTLIB::Libcall LC; switch (Node->getValueType(0).getSimpleVT().SimpleTy) { - default: llvm_unreachable("Unexpected request for libcall!"); + default: assert(0 && "Unexpected request for libcall!"); case MVT::f32: LC = Call_F32; break; case MVT::f64: LC = Call_F64; break; case MVT::f80: LC = Call_F80; break; @@ -1932,7 +2104,7 @@ SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, RTLIB::Libcall Call_I128) { RTLIB::Libcall LC; switch (Node->getValueType(0).getSimpleVT().SimpleTy) { - default: llvm_unreachable("Unexpected request for libcall!"); + default: assert(0 && "Unexpected request for libcall!"); case MVT::i8: LC = Call_I8; break; case MVT::i16: LC = Call_I16; break; case MVT::i32: LC = Call_I32; break; @@ -1976,16 +2148,17 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, } // store the lo of the constructed double - based on integer input SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, - Op0Mapped, Lo, NULL, 0, + Op0Mapped, Lo, MachinePointerInfo(), false, false, 0); // initial hi portion of constructed double SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); // store the hi of the constructed double - biased exponent - SDValue Store2=DAG.getStore(Store1, dl, InitialHi, Hi, NULL, 0, - false, false, 0); + SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, + MachinePointerInfo(), + false, false, 0); // load the constructed double - SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, NULL, 0, - false, false, 0); + SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, + MachinePointerInfo(), false, false, 0); // FP constant to bias correct the final result SDValue Bias = DAG.getConstantFP(isSigned ? BitsToDouble(0x4330000080000000ULL) : @@ -2008,24 +2181,133 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, return Result; } assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); + // Code below here assumes !isSigned without checking again. + + // Implementation of unsigned i64 to f64 following the algorithm in + // __floatundidf in compiler_rt. This implementation has the advantage + // of performing rounding correctly, both in the default rounding mode + // and in all alternate rounding modes. + // TODO: Generalize this for use with other types. + if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { + SDValue TwoP52 = + DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); + SDValue TwoP84PlusTwoP52 = + DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); + SDValue TwoP84 = + DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); + + SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); + SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, + DAG.getConstant(32, MVT::i64)); + SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); + SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); + SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); + SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); + SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, + TwoP84PlusTwoP52); + return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); + } + + // Implementation of unsigned i64 to f32. + // TODO: Generalize this for use with other types. + if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { + // For unsigned conversions, convert them to signed conversions using the + // algorithm from the x86_64 __floatundidf in compiler_rt. + if (!isSigned) { + SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); + + SDValue ShiftConst = + DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); + SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); + SDValue AndConst = DAG.getConstant(1, MVT::i64); + SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); + SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); + + SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); + SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); + + // TODO: This really should be implemented using a branch rather than a + // select. We happen to get lucky and machinesink does the right + // thing most of the time. This would be a good candidate for a + //pseudo-op, or, even better, for whole-function isel. + SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), + Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); + return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); + } + + // Otherwise, implement the fully general conversion. + + SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, + DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); + SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, + DAG.getConstant(UINT64_C(0x800), MVT::i64)); + SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, + DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); + SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), + And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); + SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); + SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), + Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), + ISD::SETUGE); + SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); + EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); + + SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, + DAG.getConstant(32, SHVT)); + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); + SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); + SDValue TwoP32 = + DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); + SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); + SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); + SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); + SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); + return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, + DAG.getIntPtrConstant(0)); + } + + SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); + + SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), + Op0, DAG.getConstant(0, Op0.getValueType()), + ISD::SETLT); + SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); + SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), + SignSet, Four, Zero); + + // If the sign bit of the integer is set, the large number will be treated + // as a negative number. To counteract this, the dynamic code adds an + // offset depending on the data type. + uint64_t FF; + switch (Op0.getValueType().getSimpleVT().SimpleTy) { + default: assert(0 && "Unsupported integer type!"); + case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) + case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) + case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) + case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) + } + if (TLI.isLittleEndian()) FF <<= 32; + Constant *FudgeFactor = ConstantInt::get( + Type::getInt64Ty(*DAG.getContext()), FF); + + SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); + unsigned Alignment = cast(CPIdx)->getAlignment(); + CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); + Alignment = std::min(Alignment, 4u); + SDValue FudgeInReg; + if (DestVT == MVT::f32) + FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, + MachinePointerInfo::getConstantPool(), + false, false, Alignment); + else { + FudgeInReg = + LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, + DAG.getEntryNode(), CPIdx, + MachinePointerInfo::getConstantPool(), + MVT::f32, false, false, Alignment)); + } - // Implementation following __floatundidf in compiler_rt. - SDValue TwoP52 = - DAG.getConstant(0x4330000000000000, MVT::i64); - SDValue TwoP84PlusTwoP52 = - DAG.getConstantFP(BitsToDouble(0x4530000000100000), MVT::f64); - SDValue TwoP84 = - DAG.getConstant(0x4530000000000000, MVT::i64); - - SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); - SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, - DAG.getConstant(32, MVT::i64)); - SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); - SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); - SDValue LoFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, LoOr); - SDValue HiFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, HiOr); - SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, TwoP84PlusTwoP52); - return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); + return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); } /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a @@ -2115,10 +2397,10 @@ SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, /// SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { EVT VT = Op.getValueType(); - EVT SHVT = TLI.getShiftAmountTy(); + EVT SHVT = TLI.getShiftAmountTy(VT); SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; switch (VT.getSimpleVT().SimpleTy) { - default: llvm_unreachable("Unhandled Expand type in BSWAP!"); + default: assert(0 && "Unhandled Expand type in BSWAP!"); case MVT::i16: Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); @@ -2158,33 +2440,64 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { } } +/// SplatByte - Distribute ByteVal over NumBits bits. +// FIXME: Move this helper to a common place. +static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { + APInt Val = APInt(NumBits, ByteVal); + unsigned Shift = 8; + for (unsigned i = NumBits; i > 8; i >>= 1) { + Val = (Val << Shift) | Val; + Shift <<= 1; + } + return Val; +} + /// ExpandBitCount - Expand the specified bitcount instruction into operations. /// SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl) { switch (Opc) { - default: llvm_unreachable("Cannot expand this yet!"); + default: assert(0 && "Cannot expand this yet!"); case ISD::CTPOP: { - static const uint64_t mask[6] = { - 0x5555555555555555ULL, 0x3333333333333333ULL, - 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, - 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL - }; EVT VT = Op.getValueType(); - EVT ShVT = TLI.getShiftAmountTy(); - unsigned len = VT.getSizeInBits(); - for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { - //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8]) - unsigned EltSize = VT.isVector() ? - VT.getVectorElementType().getSizeInBits() : len; - SDValue Tmp2 = DAG.getConstant(APInt(EltSize, mask[i]), VT); - SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); - Op = DAG.getNode(ISD::ADD, dl, VT, - DAG.getNode(ISD::AND, dl, VT, Op, Tmp2), - DAG.getNode(ISD::AND, dl, VT, - DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3), - Tmp2)); - } + EVT ShVT = TLI.getShiftAmountTy(VT); + unsigned Len = VT.getSizeInBits(); + + assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && + "CTPOP not implemented for this type."); + + // This is the "best" algorithm from + // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + + SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); + SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); + SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); + SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); + + // v = v - ((v >> 1) & 0x55555555...) + Op = DAG.getNode(ISD::SUB, dl, VT, Op, + DAG.getNode(ISD::AND, dl, VT, + DAG.getNode(ISD::SRL, dl, VT, Op, + DAG.getConstant(1, ShVT)), + Mask55)); + // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) + Op = DAG.getNode(ISD::ADD, dl, VT, + DAG.getNode(ISD::AND, dl, VT, Op, Mask33), + DAG.getNode(ISD::AND, dl, VT, + DAG.getNode(ISD::SRL, dl, VT, Op, + DAG.getConstant(2, ShVT)), + Mask33)); + // v = (v + (v >> 4)) & 0x0F0F0F0F... + Op = DAG.getNode(ISD::AND, dl, VT, + DAG.getNode(ISD::ADD, dl, VT, Op, + DAG.getNode(ISD::SRL, dl, VT, Op, + DAG.getConstant(4, ShVT))), + Mask0F); + // v = (v * 0x01010101...) >> (Len - 8) + Op = DAG.getNode(ISD::SRL, dl, VT, + DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), + DAG.getConstant(Len - 8, ShVT)); + return Op; } case ISD::CTLZ: { @@ -2198,7 +2511,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, // // but see also: http://www.hackersdelight.org/HDcode/nlz.cc EVT VT = Op.getValueType(); - EVT ShVT = TLI.getShiftAmountTy(); + EVT ShVT = TLI.getShiftAmountTy(VT); unsigned len = VT.getSizeInBits(); for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); @@ -2229,6 +2542,92 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, } } +std::pair SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { + unsigned Opc = Node->getOpcode(); + MVT VT = cast(Node)->getMemoryVT().getSimpleVT(); + RTLIB::Libcall LC; + + switch (Opc) { + default: + llvm_unreachable("Unhandled atomic intrinsic Expand!"); + break; + case ISD::ATOMIC_SWAP: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; + case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; + case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; + case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; + } + break; + case ISD::ATOMIC_CMP_SWAP: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; + case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; + case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; + case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; + } + break; + case ISD::ATOMIC_LOAD_ADD: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; + case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; + case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; + case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; + } + break; + case ISD::ATOMIC_LOAD_SUB: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; + case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; + case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; + case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; + } + break; + case ISD::ATOMIC_LOAD_AND: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; + case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; + case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; + case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; + } + break; + case ISD::ATOMIC_LOAD_OR: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; + case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; + case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; + case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; + } + break; + case ISD::ATOMIC_LOAD_XOR: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; + case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; + case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; + case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; + } + break; + case ISD::ATOMIC_LOAD_NAND: + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type for atomic!"); + case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; + case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; + case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; + case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; + } + break; + } + + return ExpandChainLibCall(LC, Node, false); +} + void SelectionDAGLegalize::ExpandNode(SDNode *Node, SmallVectorImpl &Results) { DebugLoc dl = Node->getDebugLoc(); @@ -2254,10 +2653,53 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, case ISD::EH_RETURN: case ISD::EH_LABEL: case ISD::PREFETCH: - case ISD::MEMBARRIER: case ISD::VAEND: + case ISD::EH_SJLJ_LONGJMP: + case ISD::EH_SJLJ_DISPATCHSETUP: + // If the target didn't expand these, there's nothing to do, so just + // preserve the chain and be done. Results.push_back(Node->getOperand(0)); break; + case ISD::EH_SJLJ_SETJMP: + // If the target didn't expand this, just return 'zero' and preserve the + // chain. + Results.push_back(DAG.getConstant(0, MVT::i32)); + Results.push_back(Node->getOperand(0)); + break; + case ISD::MEMBARRIER: { + // If the target didn't lower this, lower it to '__sync_synchronize()' call + TargetLowering::ArgListTy Args; + std::pair CallResult = + TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), + false, false, false, false, 0, CallingConv::C, + /*isTailCall=*/false, + /*isReturnValueUsed=*/true, + DAG.getExternalSymbol("__sync_synchronize", + TLI.getPointerTy()), + Args, DAG, dl); + Results.push_back(CallResult.second); + break; + } + // By default, atomic intrinsics are marked Legal and lowered. Targets + // which don't support them directly, however, may want libcalls, in which + // case they mark them Expand, and we get here. + case ISD::ATOMIC_SWAP: + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: + case ISD::ATOMIC_CMP_SWAP: { + std::pair Tmp = ExpandAtomic(Node); + Results.push_back(Tmp.first); + Results.push_back(Tmp.second); + break; + } case ISD::DYNAMIC_STACKALLOC: ExpandDYNAMIC_STACKALLOC(Node, Results); break; @@ -2269,10 +2711,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, EVT VT = Node->getValueType(0); if (VT.isInteger()) Results.push_back(DAG.getConstant(0, VT)); - else if (VT.isFloatingPoint()) + else { + assert(VT.isFloatingPoint() && "Unknown value type!"); Results.push_back(DAG.getConstantFP(0, VT)); - else - llvm_unreachable("Unknown value type!"); + } break; } case ISD::TRAP: { @@ -2280,7 +2722,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, TargetLowering::ArgListTy Args; std::pair CallResult = TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), - false, false, false, false, 0, CallingConv::C, false, + false, false, false, false, 0, CallingConv::C, + /*isTailCall=*/false, /*isReturnValueUsed=*/true, DAG.getExternalSymbol("abort", TLI.getPointerTy()), Args, DAG, dl); @@ -2288,7 +2731,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, break; } case ISD::FP_ROUND: - case ISD::BIT_CONVERT: + case ISD::BITCAST: Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), Node->getValueType(0), dl); Results.push_back(Tmp1); @@ -2304,7 +2747,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, // SAR. However, it is doubtful that any exist. EVT ExtraVT = cast(Node->getOperand(1))->getVT(); EVT VT = Node->getValueType(0); - EVT ShiftAmountTy = TLI.getShiftAmountTy(); + EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); if (VT.isVector()) ShiftAmountTy = VT; unsigned BitsDiff = VT.getScalarType().getSizeInBits() - @@ -2339,8 +2782,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, SDValue True, False; EVT VT = Node->getOperand(0).getValueType(); EVT NVT = Node->getValueType(0); - const uint64_t zero[] = {0, 0}; - APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero)); + APFloat apf(APInt::getNullValue(VT.getSizeInBits())); APInt x = APInt::getSignBit(NVT.getSizeInBits()); (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); Tmp1 = DAG.getConstantFP(apf, VT); @@ -2362,18 +2804,34 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, EVT VT = Node->getValueType(0); Tmp1 = Node->getOperand(0); Tmp2 = Node->getOperand(1); - SDValue VAList = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0, - false, false, 0); + unsigned Align = Node->getConstantOperandVal(3); + + SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, + MachinePointerInfo(V), false, false, 0); + SDValue VAList = VAListLoad; + + if (Align > TLI.getMinStackArgumentAlignment()) { + assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); + + VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, + DAG.getConstant(Align - 1, + TLI.getPointerTy())); + + VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, + DAG.getConstant(-(int64_t)Align, + TLI.getPointerTy())); + } + // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, DAG.getConstant(TLI.getTargetData()-> - getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), + getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), TLI.getPointerTy())); // Store the incremented VAList to the legalized pointer - Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0, - false, false, 0); + Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, + MachinePointerInfo(V), false, false, 0); // Load the actual argument out of the pointer VAList - Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0, + Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), false, false, 0)); Results.push_back(Results[0].getValue(1)); break; @@ -2384,16 +2842,17 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, const Value *VD = cast(Node->getOperand(3))->getValue(); const Value *VS = cast(Node->getOperand(4))->getValue(); Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), - Node->getOperand(2), VS, 0, false, false, 0); - Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), VD, 0, - false, false, 0); + Node->getOperand(2), MachinePointerInfo(VS), + false, false, 0); + Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), + MachinePointerInfo(VD), false, false, 0); Results.push_back(Tmp1); break; } case ISD::EXTRACT_VECTOR_ELT: if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) // This must be an access of the only element. Return it. - Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0), + Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Node->getOperand(0)); else Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); @@ -2402,6 +2861,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, case ISD::EXTRACT_SUBVECTOR: Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); break; + case ISD::INSERT_SUBVECTOR: + Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); + break; case ISD::CONCAT_VECTORS: { Results.push_back(ExpandVectorBuildThroughStack(Node)); break; @@ -2420,6 +2882,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, EVT VT = Node->getValueType(0); EVT EltVT = VT.getVectorElementType(); + if (getTypeAction(EltVT) == Promote) + EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); unsigned NumElems = VT.getVectorNumElements(); SmallVector Ops; for (unsigned i = 0; i != NumElems; ++i) { @@ -2447,7 +2911,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, // 1 -> Hi Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), DAG.getConstant(OpTy.getSizeInBits()/2, - TLI.getShiftAmountTy())); + TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); } else { // 0 -> Lo @@ -2571,6 +3035,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, RTLIB::REM_F80, RTLIB::REM_PPCF128)); break; + case ISD::FP16_TO_FP32: + Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); + break; + case ISD::FP32_TO_FP16: + Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); + break; case ISD::ConstantFP: { ConstantFPSDNode *CFP = cast(Node); // Check to see if this FP immediate is already legal. @@ -2772,7 +3242,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, RHS); TopHalf = BottomHalf.getValue(1); - } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2))) { + } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), + VT.getSizeInBits() * 2))) { EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); @@ -2782,14 +3253,33 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, DAG.getIntPtrConstant(1)); } else { - // FIXME: We should be able to fall back to a libcall with an illegal - // type in some cases. + // We can fall back to a libcall with an illegal type for the MUL if we + // have a libcall big enough. // Also, we can fall back to a division in some cases, but that's a big // performance hit in the general case. - llvm_unreachable("Don't know how to expand this operation yet!"); + EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); + RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; + if (WideVT == MVT::i16) + LC = RTLIB::MUL_I16; + else if (WideVT == MVT::i32) + LC = RTLIB::MUL_I32; + else if (WideVT == MVT::i64) + LC = RTLIB::MUL_I64; + else if (WideVT == MVT::i128) + LC = RTLIB::MUL_I128; + assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); + LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); + RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); + + SDValue Ret = ExpandLibCall(LC, Node, isSigned); + BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Ret); + TopHalf = DAG.getNode(ISD::SRL, dl, Ret.getValueType(), Ret, + DAG.getConstant(VT.getSizeInBits(), TLI.getPointerTy())); + TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, TopHalf); } if (isSigned) { - Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy()); + Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, + TLI.getShiftAmountTy(BottomHalf.getValueType())); Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, ISD::SETNE); @@ -2807,7 +3297,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, DAG.getConstant(PairTy.getSizeInBits()/2, - TLI.getShiftAmountTy())); + TLI.getShiftAmountTy(PairTy))); Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); break; } @@ -2836,17 +3326,17 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, const TargetData &TD = *TLI.getTargetData(); unsigned EntrySize = DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); - + Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(EntrySize, PTy)); SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, - PseudoSourceValue::getJumpTable(), 0, MemVT, + MachinePointerInfo::getJumpTable(), MemVT, false, false, 0); Addr = LD; - if (TLI.getTargetMachine().getRelocationModel() == Reloc::PIC_) { + if (TM.getRelocationModel() == Reloc::PIC_) { // For PIC, the sequence is: // BRIND(load(Jumptable + index) + RelocBase) // RelocBase can be JumpTable, GOT or some sort of global base. @@ -2985,7 +3475,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node, Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, - DAG.getConstant(DiffBits, TLI.getShiftAmountTy())); + DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); Results.push_back(Tmp1); break; } @@ -3006,13 +3496,12 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node, case ISD::XOR: { unsigned ExtOp, TruncOp; if (OVT.isVector()) { - ExtOp = ISD::BIT_CONVERT; - TruncOp = ISD::BIT_CONVERT; - } else if (OVT.isInteger()) { + ExtOp = ISD::BITCAST; + TruncOp = ISD::BITCAST; + } else { + assert(OVT.isInteger() && "Cannot promote logic operation"); ExtOp = ISD::ANY_EXTEND; TruncOp = ISD::TRUNCATE; - } else { - llvm_report_error("Cannot promote logic operation"); } // Promote each of the values to the new type. Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); @@ -3025,8 +3514,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node, case ISD::SELECT: { unsigned ExtOp, TruncOp; if (Node->getValueType(0).isVector()) { - ExtOp = ISD::BIT_CONVERT; - TruncOp = ISD::BIT_CONVERT; + ExtOp = ISD::BITCAST; + TruncOp = ISD::BITCAST; } else if (Node->getValueType(0).isInteger()) { ExtOp = ISD::ANY_EXTEND; TruncOp = ISD::TRUNCATE; @@ -3053,12 +3542,12 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node, cast(Node)->getMask(Mask); // Cast the two input vectors. - Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(0)); - Tmp2 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(1)); + Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); + Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); // Convert the shuffle mask to the right # elements. Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); - Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, OVT, Tmp1); + Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); Results.push_back(Tmp1); break; }