#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetData.h"
SDOperand ExpandEXTRACT_SUBVECTOR(SDOperand Op);
SDOperand ExpandEXTRACT_VECTOR_ELT(SDOperand Op);
-
- SDOperand getIntPtrConstant(uint64_t Val) {
- return DAG.getConstant(Val, TLI.getPointerTy());
- }
};
}
// the constant pool as a float, even if it's is statically typed as a
// double.
MVT::ValueType VT = CFP->getValueType(0);
- bool isDouble = VT == MVT::f64;
ConstantFP *LLVMC = ConstantFP::get(MVT::getTypeForValueType(VT),
CFP->getValueAPF());
if (!UseCP) {
if (VT!=MVT::f64 && VT!=MVT::f32)
assert(0 && "Invalid type expansion");
return DAG.getConstant(LLVMC->getValueAPF().convertToAPInt().getZExtValue(),
- isDouble ? MVT::i64 : MVT::i32);
+ (VT == MVT::f64) ? MVT::i64 : MVT::i32);
}
- if (isDouble && CFP->isValueValidForType(MVT::f32, CFP->getValueAPF()) &&
- // Only do this if the target has a native EXTLOAD instruction from f32.
- // Do not try to be clever about long doubles (so far)
- TLI.isLoadXLegal(ISD::EXTLOAD, MVT::f32)) {
- LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC,Type::FloatTy));
- VT = MVT::f32;
- Extend = true;
+ MVT::ValueType OrigVT = VT;
+ MVT::ValueType SVT = VT;
+ while (SVT != MVT::f32) {
+ SVT = (unsigned)SVT - 1;
+ if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
+ // Only do this if the target has a native EXTLOAD instruction from
+ // smaller type.
+ TLI.isLoadXLegal(ISD::EXTLOAD, SVT)) {
+ const Type *SType = MVT::getTypeForValueType(SVT);
+ LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
+ VT = SVT;
+ Extend = true;
+ }
}
SDOperand CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
- if (Extend) {
- return DAG.getExtLoad(ISD::EXTLOAD, MVT::f64, DAG.getEntryNode(),
- CPIdx, NULL, 0, MVT::f32);
- } else {
- return DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0);
- }
+ if (Extend)
+ return DAG.getExtLoad(ISD::EXTLOAD, OrigVT, DAG.getEntryNode(),
+ CPIdx, PseudoSourceValue::getConstantPool(),
+ 0, VT);
+ return DAG.getLoad(OrigVT, DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0);
}
MVT::ValueType VT = Val.getValueType();
int Alignment = ST->getAlignment();
int SVOffset = ST->getSrcValueOffset();
- if (MVT::isFloatingPoint(ST->getStoredVT())) {
+ if (MVT::isFloatingPoint(ST->getMemoryVT()) ||
+ MVT::isVector(ST->getMemoryVT())) {
// Expand to a bitconvert of the value to the integer type of the
// same size, then a (misaligned) int store.
MVT::ValueType intVT;
- if (VT==MVT::f64)
+ if (MVT::is128BitVector(VT) || VT == MVT::ppcf128 || VT == MVT::f128)
+ intVT = MVT::i128;
+ else if (MVT::is64BitVector(VT) || VT==MVT::f64)
intVT = MVT::i64;
else if (VT==MVT::f32)
intVT = MVT::i32;
else
- assert(0 && "Unaligned load of unsupported floating point type");
+ assert(0 && "Unaligned store of unsupported type");
SDOperand Result = DAG.getNode(ISD::BIT_CONVERT, intVT, Val);
return DAG.getStore(Chain, Result, Ptr, ST->getSrcValue(),
SVOffset, ST->isVolatile(), Alignment);
}
- assert(MVT::isInteger(ST->getStoredVT()) &&
+ assert(MVT::isInteger(ST->getMemoryVT()) &&
+ !MVT::isVector(ST->getMemoryVT()) &&
"Unaligned store of unknown type.");
// Get the half-size VT
- MVT::ValueType NewStoredVT = ST->getStoredVT() - 1;
+ MVT::ValueType NewStoredVT = ST->getMemoryVT() - 1;
int NumBits = MVT::getSizeInBits(NewStoredVT);
int IncrementSize = NumBits / 8;
SDOperand Chain = LD->getChain();
SDOperand Ptr = LD->getBasePtr();
MVT::ValueType VT = LD->getValueType(0);
- MVT::ValueType LoadedVT = LD->getLoadedVT();
- if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT)) {
+ MVT::ValueType LoadedVT = LD->getMemoryVT();
+ if (MVT::isFloatingPoint(VT) || MVT::isVector(VT)) {
// Expand to a (misaligned) integer load of the same size,
- // then bitconvert to floating point.
+ // then bitconvert to floating point or vector.
MVT::ValueType intVT;
- if (LoadedVT == MVT::f64)
+ if (MVT::is128BitVector(LoadedVT) ||
+ LoadedVT == MVT::ppcf128 || LoadedVT == MVT::f128)
+ intVT = MVT::i128;
+ else if (MVT::is64BitVector(LoadedVT) || LoadedVT == MVT::f64)
intVT = MVT::i64;
else if (LoadedVT == MVT::f32)
intVT = MVT::i32;
else
- assert(0 && "Unaligned load of unsupported floating point type");
+ assert(0 && "Unaligned load of unsupported type");
SDOperand newLoad = DAG.getLoad(intVT, Chain, Ptr, LD->getSrcValue(),
SVOffset, LD->isVolatile(),
LD->getAlignment());
SDOperand Result = DAG.getNode(ISD::BIT_CONVERT, LoadedVT, newLoad);
- if (LoadedVT != VT)
+ if (MVT::isFloatingPoint(VT) && LoadedVT != VT)
Result = DAG.getNode(ISD::FP_EXTEND, VT, Result);
SDOperand Ops[] = { Result, Chain };
return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other),
Ops, 2);
}
- assert((MVT::isInteger(LoadedVT) || MVT::isVector(LoadedVT)) &&
+ assert(MVT::isInteger(LoadedVT) && !MVT::isVector(LoadedVT) &&
"Unaligned load of unsupported type.");
- // Compute the new VT that is half the size of the old one. We either have an
- // integer MVT or we have a vector MVT.
+ // Compute the new VT that is half the size of the old one. This is an
+ // integer MVT.
unsigned NumBits = MVT::getSizeInBits(LoadedVT);
MVT::ValueType NewLoadedVT;
- if (!MVT::isVector(LoadedVT)) {
- NewLoadedVT = MVT::getIntegerType(NumBits/2);
- } else {
- // FIXME: This is not right for <1 x anything> it is also not right for
- // non-power-of-two vectors.
- NewLoadedVT = MVT::getVectorType(MVT::getVectorElementType(LoadedVT),
- MVT::getVectorNumElements(LoadedVT)/2);
- }
+ NewLoadedVT = MVT::getIntegerType(NumBits/2);
NumBits >>= 1;
unsigned Alignment = LD->getAlignment();
case ISD::TargetExternalSymbol:
case ISD::VALUETYPE:
case ISD::SRCVALUE:
+ case ISD::MEMOPERAND:
case ISD::STRING:
case ISD::CONDCODE:
// Primitives must all be legal.
} else {
unsigned Line = cast<ConstantSDNode>(LineOp)->getValue();
unsigned Col = cast<ConstantSDNode>(ColOp)->getValue();
- unsigned ID = MMI->RecordLabel(Line, Col, SrcFile);
+ unsigned ID = MMI->RecordSourceLine(Line, Col, SrcFile);
Ops.push_back(DAG.getConstant(ID, MVT::i32));
- Result = DAG.getNode(ISD::LABEL, MVT::Other,&Ops[0],Ops.size());
+ Ops.push_back(DAG.getConstant(0, MVT::i32)); // a debug label
+ Result = DAG.getNode(ISD::LABEL, MVT::Other, &Ops[0], Ops.size());
}
} else {
Result = Tmp1; // chain
break;
}
break;
+
+ case ISD::DECLARE:
+ assert(Node->getNumOperands() == 3 && "Invalid DECLARE node!");
+ switch (TLI.getOperationAction(ISD::DECLARE, MVT::Other)) {
+ default: assert(0 && "This action is not supported yet!");
+ case TargetLowering::Legal:
+ Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
+ Tmp2 = LegalizeOp(Node->getOperand(1)); // Legalize the address.
+ Tmp3 = LegalizeOp(Node->getOperand(2)); // Legalize the variable.
+ Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, Tmp3);
+ break;
+ case TargetLowering::Expand:
+ Result = LegalizeOp(Node->getOperand(0));
+ break;
+ }
+ break;
case ISD::DEBUG_LOC:
assert(Node->getNumOperands() == 4 && "Invalid DEBUG_LOC node!");
break;
case ISD::LABEL:
- assert(Node->getNumOperands() == 2 && "Invalid LABEL node!");
+ assert(Node->getNumOperands() == 3 && "Invalid LABEL node!");
switch (TLI.getOperationAction(ISD::LABEL, MVT::Other)) {
default: assert(0 && "This action is not supported yet!");
case TargetLowering::Legal:
Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
Tmp2 = LegalizeOp(Node->getOperand(1)); // Legalize the label id.
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2);
+ Tmp3 = LegalizeOp(Node->getOperand(2)); // Legalize the "flavor" operand.
+ Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, Tmp3);
break;
case TargetLowering::Expand:
Result = LegalizeOp(Node->getOperand(0));
}
break;
+ case ISD::MEMBARRIER: {
+ assert(Node->getNumOperands() == 6 && "Invalid MemBarrier node!");
+ switch (TLI.getOperationAction(ISD::MEMBARRIER, MVT::Other)) {
+ default: assert(0 && "This action is not supported yet!");
+ case TargetLowering::Legal: {
+ SDOperand Ops[6];
+ Ops[0] = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
+ for (int x = 1; x < 6; ++x) {
+ Ops[x] = Node->getOperand(x);
+ if (!isTypeLegal(Ops[x].getValueType()))
+ Ops[x] = PromoteOp(Ops[x]);
+ }
+ Result = DAG.UpdateNodeOperands(Result, &Ops[0], 6);
+ break;
+ }
+ case TargetLowering::Expand:
+ //There is no libgcc call for this op
+ Result = Node->getOperand(0); // Noop
+ break;
+ }
+ break;
+ }
+
+ case ISD::ATOMIC_LCS:
+ case ISD::ATOMIC_LAS:
+ case ISD::ATOMIC_SWAP: {
+ assert(((Node->getNumOperands() == 4 && Node->getOpcode() == ISD::ATOMIC_LCS) ||
+ (Node->getNumOperands() == 3 && Node->getOpcode() == ISD::ATOMIC_LAS) ||
+ (Node->getNumOperands() == 3 && Node->getOpcode() == ISD::ATOMIC_SWAP)) &&
+ "Invalid Atomic node!");
+ int num = Node->getOpcode() == ISD::ATOMIC_LCS ? 4 : 3;
+ SDOperand Ops[4];
+ for (int x = 0; x < num; ++x)
+ Ops[x] = LegalizeOp(Node->getOperand(x));
+ Result = DAG.UpdateNodeOperands(Result, &Ops[0], num);
+
+ switch (TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0))) {
+ default: assert(0 && "This action is not supported yet!");
+ case TargetLowering::Custom:
+ Result = TLI.LowerOperation(Result, DAG);
+ break;
+ case TargetLowering::Legal:
+ break;
+ }
+ AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0));
+ AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1));
+ return Result.getValue(Op.ResNo);
+ }
+
case ISD::Constant: {
ConstantSDNode *CN = cast<ConstantSDNode>(Node);
unsigned opAction =
// leave these constants as ConstantFP nodes for the target to deal with.
ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
- // Check to see if this FP immediate is already legal.
- bool isLegal = false;
- for (TargetLowering::legal_fpimm_iterator I = TLI.legal_fpimm_begin(),
- E = TLI.legal_fpimm_end(); I != E; ++I)
- if (CFP->isExactlyValue(*I)) {
- isLegal = true;
- break;
- }
-
- // If this is a legal constant, turn it into a TargetConstantFP node.
- if (isLegal) {
- Result = DAG.getTargetConstantFP(CFP->getValueAPF(),
- CFP->getValueType(0));
- break;
- }
-
switch (TLI.getOperationAction(ISD::ConstantFP, CFP->getValueType(0))) {
default: assert(0 && "This action is not supported yet!");
+ case TargetLowering::Legal:
+ break;
case TargetLowering::Custom:
Tmp3 = TLI.LowerOperation(Result, DAG);
if (Tmp3.Val) {
break;
}
// FALLTHROUGH
- case TargetLowering::Expand:
+ case TargetLowering::Expand: {
+ // Check to see if this FP immediate is already legal.
+ bool isLegal = false;
+ for (TargetLowering::legal_fpimm_iterator I = TLI.legal_fpimm_begin(),
+ E = TLI.legal_fpimm_end(); I != E; ++I) {
+ if (CFP->isExactlyValue(*I)) {
+ isLegal = true;
+ break;
+ }
+ }
+ // If this is a legal constant, turn it into a TargetConstantFP node.
+ if (isLegal)
+ break;
Result = ExpandConstantFP(CFP, true, DAG, TLI);
}
+ }
break;
}
case ISD::TokenFactor:
break;
case ISD::INSERT_VECTOR_ELT:
Tmp1 = LegalizeOp(Node->getOperand(0)); // InVec
- Tmp2 = LegalizeOp(Node->getOperand(1)); // InVal
Tmp3 = LegalizeOp(Node->getOperand(2)); // InEltNo
+
+ // The type of the value to insert may not be legal, even though the vector
+ // type is legal. Legalize/Promote accordingly. We do not handle Expand
+ // here.
+ switch (getTypeAction(Node->getOperand(1).getValueType())) {
+ default: assert(0 && "Cannot expand insert element operand");
+ case Legal: Tmp2 = LegalizeOp(Node->getOperand(1)); break;
+ case Promote: Tmp2 = PromoteOp(Node->getOperand(1)); break;
+ }
Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, Tmp3);
switch (TLI.getOperationAction(ISD::INSERT_VECTOR_ELT,
// If the insert index is a constant, codegen this as a scalar_to_vector,
// then a shuffle that inserts it into the right position in the vector.
if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Tmp3)) {
- SDOperand ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR,
- Tmp1.getValueType(), Tmp2);
-
- unsigned NumElts = MVT::getVectorNumElements(Tmp1.getValueType());
- MVT::ValueType ShufMaskVT = MVT::getIntVectorWithNumElements(NumElts);
- MVT::ValueType ShufMaskEltVT = MVT::getVectorElementType(ShufMaskVT);
-
- // We generate a shuffle of InVec and ScVec, so the shuffle mask should
- // be 0,1,2,3,4,5... with the appropriate element replaced with elt 0 of
- // the RHS.
- SmallVector<SDOperand, 8> ShufOps;
- for (unsigned i = 0; i != NumElts; ++i) {
- if (i != InsertPos->getValue())
- ShufOps.push_back(DAG.getConstant(i, ShufMaskEltVT));
- else
- ShufOps.push_back(DAG.getConstant(NumElts, ShufMaskEltVT));
+ // SCALAR_TO_VECTOR requires that the type of the value being inserted
+ // match the element type of the vector being created.
+ if (Tmp2.getValueType() ==
+ MVT::getVectorElementType(Op.getValueType())) {
+ SDOperand ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR,
+ Tmp1.getValueType(), Tmp2);
+
+ unsigned NumElts = MVT::getVectorNumElements(Tmp1.getValueType());
+ MVT::ValueType ShufMaskVT = MVT::getIntVectorWithNumElements(NumElts);
+ MVT::ValueType ShufMaskEltVT = MVT::getVectorElementType(ShufMaskVT);
+
+ // We generate a shuffle of InVec and ScVec, so the shuffle mask
+ // should be 0,1,2,3,4,5... with the appropriate element replaced with
+ // elt 0 of the RHS.
+ SmallVector<SDOperand, 8> ShufOps;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (i != InsertPos->getValue())
+ ShufOps.push_back(DAG.getConstant(i, ShufMaskEltVT));
+ else
+ ShufOps.push_back(DAG.getConstant(NumElts, ShufMaskEltVT));
+ }
+ SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, ShufMaskVT,
+ &ShufOps[0], ShufOps.size());
+
+ Result = DAG.getNode(ISD::VECTOR_SHUFFLE, Tmp1.getValueType(),
+ Tmp1, ScVec, ShufMask);
+ Result = LegalizeOp(Result);
+ break;
}
- SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, ShufMaskVT,
- &ShufOps[0], ShufOps.size());
-
- Result = DAG.getNode(ISD::VECTOR_SHUFFLE, Tmp1.getValueType(),
- Tmp1, ScVec, ShufMask);
- Result = LegalizeOp(Result);
- break;
}
// If the target doesn't support this, we have to spill the input vector
// permute it into place, if the idx is a constant and if the idx is
// supported by the target.
MVT::ValueType VT = Tmp1.getValueType();
- MVT::ValueType EltVT = Tmp2.getValueType();
+ MVT::ValueType EltVT = MVT::getVectorElementType(VT);
MVT::ValueType IdxVT = Tmp3.getValueType();
MVT::ValueType PtrVT = TLI.getPointerTy();
SDOperand StackPtr = DAG.CreateStackTemporary(VT);
+
+ FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr.Val);
+ int SPFI = StackPtrFI->getIndex();
+
// Store the vector.
- SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Tmp1, StackPtr, NULL, 0);
+ SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Tmp1, StackPtr,
+ PseudoSourceValue::getFixedStack(),
+ SPFI);
// Truncate or zero extend offset to target pointer type.
unsigned CastOpc = (IdxVT > PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
Tmp3 = DAG.getNode(ISD::MUL, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT));
SDOperand StackPtr2 = DAG.getNode(ISD::ADD, IdxVT, Tmp3, StackPtr);
// Store the scalar value.
- Ch = DAG.getStore(Ch, Tmp2, StackPtr2, NULL, 0);
+ Ch = DAG.getTruncStore(Ch, Tmp2, StackPtr2,
+ PseudoSourceValue::getFixedStack(), SPFI, EltVT);
// Load the updated vector.
- Result = DAG.getLoad(VT, Ch, StackPtr, NULL, 0);
+ Result = DAG.getLoad(VT, Ch, StackPtr,
+ PseudoSourceValue::getFixedStack(), SPFI);
break;
}
}
SDOperand LD;
switch (EntrySize) {
default: assert(0 && "Size of jump table not supported yet."); break;
- case 4: LD = DAG.getLoad(MVT::i32, Chain, Addr, NULL, 0); break;
- case 8: LD = DAG.getLoad(MVT::i64, Chain, Addr, NULL, 0); break;
+ case 4: LD = DAG.getLoad(MVT::i32, Chain, Addr,
+ PseudoSourceValue::getJumpTable(), 0); break;
+ case 8: LD = DAG.getLoad(MVT::i64, Chain, Addr,
+ PseudoSourceValue::getJumpTable(), 0); break;
}
Addr = LD;
case Legal:
Tmp2 = LegalizeOp(Node->getOperand(1)); // Legalize the condition.
break;
- case Promote:
+ case Promote: {
Tmp2 = PromoteOp(Node->getOperand(1)); // Promote the condition.
// The top bits of the promoted condition are not necessarily zero, ensure
// that the value is properly zero extended.
+ unsigned BitWidth = Tmp2.getValueSizeInBits();
if (!DAG.MaskedValueIsZero(Tmp2,
- MVT::getIntVTBitMask(Tmp2.getValueType())^1))
+ APInt::getHighBitsSet(BitWidth, BitWidth-1)))
Tmp2 = DAG.getZeroExtendInReg(Tmp2, MVT::i1);
break;
}
+ }
// Basic block destination (Op#2) is always legal.
Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, Node->getOperand(2));
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses()) {
unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(MVT::getTypeForValueType(LD->getLoadedVT()));
+ getABITypeAlignment(MVT::getTypeForValueType(LD->getMemoryVT()));
if (LD->getAlignment() < ABIAlignment){
Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG,
TLI);
AddLegalizedOperand(SDOperand(Node, 1), Tmp4);
return Op.ResNo ? Tmp4 : Tmp3;
} else {
- MVT::ValueType SrcVT = LD->getLoadedVT();
- switch (TLI.getLoadXAction(ExtType, SrcVT)) {
- default: assert(0 && "This action is not supported yet!");
- case TargetLowering::Promote:
- assert(SrcVT == MVT::i1 &&
- "Can only promote extending LOAD from i1 -> i8!");
- Result = DAG.getExtLoad(ExtType, Node->getValueType(0), Tmp1, Tmp2,
- LD->getSrcValue(), LD->getSrcValueOffset(),
- MVT::i8, LD->isVolatile(), LD->getAlignment());
- Tmp1 = Result.getValue(0);
- Tmp2 = Result.getValue(1);
- break;
- case TargetLowering::Custom:
- isCustom = true;
- // FALLTHROUGH
- case TargetLowering::Legal:
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
- Tmp1 = Result.getValue(0);
- Tmp2 = Result.getValue(1);
-
- if (isCustom) {
- Tmp3 = TLI.LowerOperation(Result, DAG);
- if (Tmp3.Val) {
- Tmp1 = LegalizeOp(Tmp3);
- Tmp2 = LegalizeOp(Tmp3.getValue(1));
- }
+ MVT::ValueType SrcVT = LD->getMemoryVT();
+ unsigned SrcWidth = MVT::getSizeInBits(SrcVT);
+ int SVOffset = LD->getSrcValueOffset();
+ unsigned Alignment = LD->getAlignment();
+ bool isVolatile = LD->isVolatile();
+
+ if (SrcWidth != MVT::getStoreSizeInBits(SrcVT) &&
+ // Some targets pretend to have an i1 loading operation, and actually
+ // load an i8. This trick is correct for ZEXTLOAD because the top 7
+ // bits are guaranteed to be zero; it helps the optimizers understand
+ // that these bits are zero. It is also useful for EXTLOAD, since it
+ // tells the optimizers that those bits are undefined. It would be
+ // nice to have an effective generic way of getting these benefits...
+ // Until such a way is found, don't insist on promoting i1 here.
+ (SrcVT != MVT::i1 ||
+ TLI.getLoadXAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
+ // Promote to a byte-sized load if not loading an integral number of
+ // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
+ unsigned NewWidth = MVT::getStoreSizeInBits(SrcVT);
+ MVT::ValueType NVT = MVT::getIntegerType(NewWidth);
+ SDOperand Ch;
+
+ // The extra bits are guaranteed to be zero, since we stored them that
+ // way. A zext load from NVT thus automatically gives zext from SrcVT.
+
+ ISD::LoadExtType NewExtType =
+ ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
+
+ Result = DAG.getExtLoad(NewExtType, Node->getValueType(0),
+ Tmp1, Tmp2, LD->getSrcValue(), SVOffset,
+ NVT, isVolatile, Alignment);
+
+ Ch = Result.getValue(1); // The chain.
+
+ if (ExtType == ISD::SEXTLOAD)
+ // Having the top bits zero doesn't help when sign extending.
+ Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, Result.getValueType(),
+ Result, DAG.getValueType(SrcVT));
+ else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
+ // All the top bits are guaranteed to be zero - inform the optimizers.
+ Result = DAG.getNode(ISD::AssertZext, Result.getValueType(), Result,
+ DAG.getValueType(SrcVT));
+
+ Tmp1 = LegalizeOp(Result);
+ Tmp2 = LegalizeOp(Ch);
+ } else if (SrcWidth & (SrcWidth - 1)) {
+ // If not loading a power-of-2 number of bits, expand as two loads.
+ assert(MVT::isExtendedVT(SrcVT) && !MVT::isVector(SrcVT) &&
+ "Unsupported extload!");
+ unsigned RoundWidth = 1 << Log2_32(SrcWidth);
+ assert(RoundWidth < SrcWidth);
+ unsigned ExtraWidth = SrcWidth - RoundWidth;
+ assert(ExtraWidth < RoundWidth);
+ assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
+ "Load size not an integral number of bytes!");
+ MVT::ValueType RoundVT = MVT::getIntegerType(RoundWidth);
+ MVT::ValueType ExtraVT = MVT::getIntegerType(ExtraWidth);
+ SDOperand Lo, Hi, Ch;
+ unsigned IncrementSize;
+
+ if (TLI.isLittleEndian()) {
+ // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
+ // Load the bottom RoundWidth bits.
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, Node->getValueType(0), Tmp1, Tmp2,
+ LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
+ Alignment);
+
+ // Load the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
+ DAG.getIntPtrConstant(IncrementSize));
+ Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), Tmp1, Tmp2,
+ LD->getSrcValue(), SVOffset + IncrementSize,
+ ExtraVT, isVolatile,
+ MinAlign(Alignment, IncrementSize));
+
+ // Build a factor node to remember that this load is independent of the
+ // other one.
+ Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+ Hi.getValue(1));
+
+ // Move the top bits to the right place.
+ Hi = DAG.getNode(ISD::SHL, Hi.getValueType(), Hi,
+ DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
+
+ // Join the hi and lo parts.
+ Result = DAG.getNode(ISD::OR, Node->getValueType(0), Lo, Hi);
} else {
- // If this is an unaligned load and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses()) {
- unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(MVT::getTypeForValueType(LD->getLoadedVT()));
- if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG,
- TLI);
- Tmp1 = Result.getOperand(0);
- Tmp2 = Result.getOperand(1);
- Tmp1 = LegalizeOp(Tmp1);
- Tmp2 = LegalizeOp(Tmp2);
+ // Big endian - avoid unaligned loads.
+ // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
+ // Load the top RoundWidth bits.
+ Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), Tmp1, Tmp2,
+ LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
+ Alignment);
+
+ // Load the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
+ DAG.getIntPtrConstant(IncrementSize));
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, Node->getValueType(0), Tmp1, Tmp2,
+ LD->getSrcValue(), SVOffset + IncrementSize,
+ ExtraVT, isVolatile,
+ MinAlign(Alignment, IncrementSize));
+
+ // Build a factor node to remember that this load is independent of the
+ // other one.
+ Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+ Hi.getValue(1));
+
+ // Move the top bits to the right place.
+ Hi = DAG.getNode(ISD::SHL, Hi.getValueType(), Hi,
+ DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
+
+ // Join the hi and lo parts.
+ Result = DAG.getNode(ISD::OR, Node->getValueType(0), Lo, Hi);
+ }
+
+ Tmp1 = LegalizeOp(Result);
+ Tmp2 = LegalizeOp(Ch);
+ } else {
+ switch (TLI.getLoadXAction(ExtType, SrcVT)) {
+ default: assert(0 && "This action is not supported yet!");
+ case TargetLowering::Custom:
+ isCustom = true;
+ // FALLTHROUGH
+ case TargetLowering::Legal:
+ Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+ Tmp1 = Result.getValue(0);
+ Tmp2 = Result.getValue(1);
+
+ if (isCustom) {
+ Tmp3 = TLI.LowerOperation(Result, DAG);
+ if (Tmp3.Val) {
+ Tmp1 = LegalizeOp(Tmp3);
+ Tmp2 = LegalizeOp(Tmp3.getValue(1));
+ }
+ } else {
+ // If this is an unaligned load and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses()) {
+ unsigned ABIAlignment = TLI.getTargetData()->
+ getABITypeAlignment(MVT::getTypeForValueType(LD->getMemoryVT()));
+ if (LD->getAlignment() < ABIAlignment){
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG,
+ TLI);
+ Tmp1 = Result.getOperand(0);
+ Tmp2 = Result.getOperand(1);
+ Tmp1 = LegalizeOp(Tmp1);
+ Tmp2 = LegalizeOp(Tmp2);
+ }
}
}
- }
- break;
- case TargetLowering::Expand:
- // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
- if (SrcVT == MVT::f32 && Node->getValueType(0) == MVT::f64) {
- SDOperand Load = DAG.getLoad(SrcVT, Tmp1, Tmp2, LD->getSrcValue(),
- LD->getSrcValueOffset(),
- LD->isVolatile(), LD->getAlignment());
- Result = DAG.getNode(ISD::FP_EXTEND, Node->getValueType(0), Load);
- Tmp1 = LegalizeOp(Result); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Load.getValue(1));
+ break;
+ case TargetLowering::Expand:
+ // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
+ if (SrcVT == MVT::f32 && Node->getValueType(0) == MVT::f64) {
+ SDOperand Load = DAG.getLoad(SrcVT, Tmp1, Tmp2, LD->getSrcValue(),
+ LD->getSrcValueOffset(),
+ LD->isVolatile(), LD->getAlignment());
+ Result = DAG.getNode(ISD::FP_EXTEND, Node->getValueType(0), Load);
+ Tmp1 = LegalizeOp(Result); // Relegalize new nodes.
+ Tmp2 = LegalizeOp(Load.getValue(1));
+ break;
+ }
+ assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!");
+ // Turn the unsupported load into an EXTLOAD followed by an explicit
+ // zero/sign extend inreg.
+ Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0),
+ Tmp1, Tmp2, LD->getSrcValue(),
+ LD->getSrcValueOffset(), SrcVT,
+ LD->isVolatile(), LD->getAlignment());
+ SDOperand ValRes;
+ if (ExtType == ISD::SEXTLOAD)
+ ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, Result.getValueType(),
+ Result, DAG.getValueType(SrcVT));
+ else
+ ValRes = DAG.getZeroExtendInReg(Result, SrcVT);
+ Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes.
+ Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes.
break;
}
- assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!");
- // Turn the unsupported load into an EXTLOAD followed by an explicit
- // zero/sign extend inreg.
- Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0),
- Tmp1, Tmp2, LD->getSrcValue(),
- LD->getSrcValueOffset(), SrcVT,
- LD->isVolatile(), LD->getAlignment());
- SDOperand ValRes;
- if (ExtType == ISD::SEXTLOAD)
- ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, Result.getValueType(),
- Result, DAG.getValueType(SrcVT));
- else
- ValRes = DAG.getZeroExtendInReg(Result, SrcVT);
- Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes.
- break;
}
+
// Since loads produce two values, make sure to remember that we legalized
// both of them.
AddLegalizedOperand(SDOperand(Node, 0), Tmp1);
ExpandOp(Tmp2, Lo, Hi);
// Big endian systems want the hi reg first.
- if (!TLI.isLittleEndian())
+ if (TLI.isBigEndian())
std::swap(Lo, Hi);
if (Hi.Val)
uint64_t IntVal =CFP->getValueAPF().convertToAPInt().getZExtValue();
SDOperand Lo = DAG.getConstant(uint32_t(IntVal), MVT::i32);
SDOperand Hi = DAG.getConstant(uint32_t(IntVal >>32), MVT::i32);
- if (!TLI.isLittleEndian()) std::swap(Lo, Hi);
+ if (TLI.isBigEndian()) std::swap(Lo, Hi);
Lo = DAG.getStore(Tmp1, Lo, Tmp2, ST->getSrcValue(),
SVOffset, isVolatile, Alignment);
Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
- getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4));
Hi = DAG.getStore(Tmp1, Hi, Tmp2, ST->getSrcValue(), SVOffset+4,
isVolatile, MinAlign(Alignment, 4U));
}
}
- switch (getTypeAction(ST->getStoredVT())) {
+ switch (getTypeAction(ST->getMemoryVT())) {
case Legal: {
Tmp3 = LegalizeOp(ST->getValue());
Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses()) {
unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(MVT::getTypeForValueType(ST->getStoredVT()));
+ getABITypeAlignment(MVT::getTypeForValueType(ST->getMemoryVT()));
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.Val), DAG,
TLI);
// Truncate the value and store the result.
Tmp3 = PromoteOp(ST->getValue());
Result = DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset, ST->getStoredVT(),
+ SVOffset, ST->getMemoryVT(),
isVolatile, Alignment);
break;
if (MVT::isVector(ST->getValue().getValueType())) {
SDNode *InVal = ST->getValue().Val;
int InIx = ST->getValue().ResNo;
- unsigned NumElems = MVT::getVectorNumElements(InVal->getValueType(InIx));
- MVT::ValueType EVT = MVT::getVectorElementType(InVal->getValueType(InIx));
+ MVT::ValueType InVT = InVal->getValueType(InIx);
+ unsigned NumElems = MVT::getVectorNumElements(InVT);
+ MVT::ValueType EVT = MVT::getVectorElementType(InVT);
// Figure out if there is a simple type corresponding to this Vector
// type. If so, convert to the vector type.
MVT::ValueType TVT = MVT::getVectorType(EVT, NumElems);
if (TLI.isTypeLegal(TVT)) {
// Turn this into a normal store of the vector type.
- Tmp3 = LegalizeOp(Node->getOperand(1));
+ Tmp3 = LegalizeOp(ST->getValue());
Result = DAG.getStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
SVOffset, isVolatile, Alignment);
Result = LegalizeOp(Result);
break;
} else if (NumElems == 1) {
// Turn this into a normal store of the scalar type.
- Tmp3 = ScalarizeVectorOp(Node->getOperand(1));
+ Tmp3 = ScalarizeVectorOp(ST->getValue());
Result = DAG.getStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
SVOffset, isVolatile, Alignment);
// The scalarized value type may not be legal, e.g. it might require
Result = LegalizeOp(Result);
break;
} else {
- SplitVectorOp(Node->getOperand(1), Lo, Hi);
+ SplitVectorOp(ST->getValue(), Lo, Hi);
IncrementSize = MVT::getVectorNumElements(Lo.Val->getValueType(0)) *
MVT::getSizeInBits(EVT)/8;
}
} else {
- ExpandOp(Node->getOperand(1), Lo, Hi);
+ ExpandOp(ST->getValue(), Lo, Hi);
IncrementSize = Hi.Val ? MVT::getSizeInBits(Hi.getValueType())/8 : 0;
- if (!TLI.isLittleEndian())
+ if (TLI.isBigEndian())
std::swap(Lo, Hi);
}
}
Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
- getIntPtrConstant(IncrementSize));
+ DAG.getIntPtrConstant(IncrementSize));
assert(isTypeLegal(Tmp2.getValueType()) &&
"Pointers must be legal!");
SVOffset += IncrementSize;
break;
}
} else {
- // Truncating store
- assert(isTypeLegal(ST->getValue().getValueType()) &&
- "Cannot handle illegal TRUNCSTORE yet!");
- Tmp3 = LegalizeOp(ST->getValue());
-
- // The only promote case we handle is TRUNCSTORE:i1 X into
- // -> TRUNCSTORE:i8 (and X, 1)
- if (ST->getStoredVT() == MVT::i1 &&
- TLI.getStoreXAction(MVT::i1) == TargetLowering::Promote) {
- // Promote the bool to a mask then store.
- Tmp3 = DAG.getNode(ISD::AND, Tmp3.getValueType(), Tmp3,
- DAG.getConstant(1, Tmp3.getValueType()));
- Result = DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset, MVT::i8,
- isVolatile, Alignment);
- } else if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() ||
- Tmp2 != ST->getBasePtr()) {
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
- ST->getOffset());
+ switch (getTypeAction(ST->getValue().getValueType())) {
+ case Legal:
+ Tmp3 = LegalizeOp(ST->getValue());
+ break;
+ case Promote:
+ // We can promote the value, the truncstore will still take care of it.
+ Tmp3 = PromoteOp(ST->getValue());
+ break;
+ case Expand:
+ // Just store the low part. This may become a non-trunc store, so make
+ // sure to use getTruncStore, not UpdateNodeOperands below.
+ ExpandOp(ST->getValue(), Tmp3, Tmp4);
+ return DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
+ SVOffset, MVT::i8, isVolatile, Alignment);
}
- MVT::ValueType StVT = cast<StoreSDNode>(Result.Val)->getStoredVT();
- switch (TLI.getStoreXAction(StVT)) {
- default: assert(0 && "This action is not supported yet!");
- case TargetLowering::Legal:
- // If this is an unaligned store and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses()) {
- unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(MVT::getTypeForValueType(ST->getStoredVT()));
- if (ST->getAlignment() < ABIAlignment)
- Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.Val), DAG,
- TLI);
+ MVT::ValueType StVT = ST->getMemoryVT();
+ unsigned StWidth = MVT::getSizeInBits(StVT);
+
+ if (StWidth != MVT::getStoreSizeInBits(StVT)) {
+ // Promote to a byte-sized store with upper bits zero if not
+ // storing an integral number of bytes. For example, promote
+ // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
+ MVT::ValueType NVT = MVT::getIntegerType(MVT::getStoreSizeInBits(StVT));
+ Tmp3 = DAG.getZeroExtendInReg(Tmp3, StVT);
+ Result = DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
+ SVOffset, NVT, isVolatile, Alignment);
+ } else if (StWidth & (StWidth - 1)) {
+ // If not storing a power-of-2 number of bits, expand as two stores.
+ assert(MVT::isExtendedVT(StVT) && !MVT::isVector(StVT) &&
+ "Unsupported truncstore!");
+ unsigned RoundWidth = 1 << Log2_32(StWidth);
+ assert(RoundWidth < StWidth);
+ unsigned ExtraWidth = StWidth - RoundWidth;
+ assert(ExtraWidth < RoundWidth);
+ assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
+ "Store size not an integral number of bytes!");
+ MVT::ValueType RoundVT = MVT::getIntegerType(RoundWidth);
+ MVT::ValueType ExtraVT = MVT::getIntegerType(ExtraWidth);
+ SDOperand Lo, Hi;
+ unsigned IncrementSize;
+
+ if (TLI.isLittleEndian()) {
+ // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
+ // Store the bottom RoundWidth bits.
+ Lo = DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
+ SVOffset, RoundVT,
+ isVolatile, Alignment);
+
+ // Store the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
+ DAG.getIntPtrConstant(IncrementSize));
+ Hi = DAG.getNode(ISD::SRL, Tmp3.getValueType(), Tmp3,
+ DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
+ Hi = DAG.getTruncStore(Tmp1, Hi, Tmp2, ST->getSrcValue(),
+ SVOffset + IncrementSize, ExtraVT, isVolatile,
+ MinAlign(Alignment, IncrementSize));
+ } else {
+ // Big endian - avoid unaligned stores.
+ // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
+ // Store the top RoundWidth bits.
+ Hi = DAG.getNode(ISD::SRL, Tmp3.getValueType(), Tmp3,
+ DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
+ Hi = DAG.getTruncStore(Tmp1, Hi, Tmp2, ST->getSrcValue(), SVOffset,
+ RoundVT, isVolatile, Alignment);
+
+ // Store the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
+ DAG.getIntPtrConstant(IncrementSize));
+ Lo = DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
+ SVOffset + IncrementSize, ExtraVT, isVolatile,
+ MinAlign(Alignment, IncrementSize));
+ }
+
+ // The order of the stores doesn't matter.
+ Result = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
+ } else {
+ if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() ||
+ Tmp2 != ST->getBasePtr())
+ Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
+ ST->getOffset());
+
+ switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
+ default: assert(0 && "This action is not supported yet!");
+ case TargetLowering::Legal:
+ // If this is an unaligned store and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses()) {
+ unsigned ABIAlignment = TLI.getTargetData()->
+ getABITypeAlignment(MVT::getTypeForValueType(ST->getMemoryVT()));
+ if (ST->getAlignment() < ABIAlignment)
+ Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.Val), DAG,
+ TLI);
+ }
+ break;
+ case TargetLowering::Custom:
+ Result = TLI.LowerOperation(Result, DAG);
+ break;
+ case Expand:
+ // TRUNCSTORE:i16 i32 -> STORE i16
+ assert(isTypeLegal(StVT) && "Do not know how to expand this store!");
+ Tmp3 = DAG.getNode(ISD::TRUNCATE, StVT, Tmp3);
+ Result = DAG.getStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(), SVOffset,
+ isVolatile, Alignment);
+ break;
}
- break;
- case TargetLowering::Custom:
- Tmp1 = TLI.LowerOperation(Result, DAG);
- if (Tmp1.Val) Result = Tmp1;
- break;
}
}
break;
case Legal:
Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the condition.
break;
- case Promote:
+ case Promote: {
Tmp1 = PromoteOp(Node->getOperand(0)); // Promote the condition.
// Make sure the condition is either zero or one.
+ unsigned BitWidth = Tmp1.getValueSizeInBits();
if (!DAG.MaskedValueIsZero(Tmp1,
- MVT::getIntVTBitMask(Tmp1.getValueType())^1))
+ APInt::getHighBitsSet(BitWidth, BitWidth-1)))
Tmp1 = DAG.getZeroExtendInReg(Tmp1, MVT::i1);
break;
}
+ }
Tmp2 = LegalizeOp(Node->getOperand(1)); // TrueVal
Tmp3 = LegalizeOp(Node->getOperand(2)); // FalseVal
Tmp3 = DAG.getNode(ExtOp, NVT, Tmp3);
// Perform the larger operation, then round down.
Result = DAG.getNode(ISD::SELECT, NVT, Tmp1, Tmp2,Tmp3);
- Result = DAG.getNode(TruncOp, Node->getValueType(0), Result);
+ if (TruncOp != ISD::FP_ROUND)
+ Result = DAG.getNode(TruncOp, Node->getValueType(0), Result);
+ else
+ Result = DAG.getNode(TruncOp, Node->getValueType(0), Result,
+ DAG.getIntPtrConstant(0));
break;
}
}
}
std::pair<SDOperand,SDOperand> CallResult =
- TLI.LowerCallTo(Tmp1, Type::VoidTy, false, false, CallingConv::C, false,
+ TLI.LowerCallTo(Tmp1, Type::VoidTy,
+ false, false, false, CallingConv::C, false,
DAG.getExternalSymbol(FnName, IntPtr), Args, DAG);
Result = CallResult.second;
break;
}
break;
case TargetLowering::Expand: {
- SrcValueSDNode *SV = cast<SrcValueSDNode>(Node->getOperand(2));
- SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2,
- SV->getValue(), SV->getOffset());
+ const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
+ SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0);
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList,
DAG.getConstant(MVT::getSizeInBits(VT)/8,
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
- Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, SV->getValue(),
- SV->getOffset());
+ Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0);
// Load the actual argument out of the pointer VAList
Result = DAG.getLoad(VT, Tmp3, VAList, NULL, 0);
Tmp1 = LegalizeOp(Result.getValue(1));
case TargetLowering::Expand:
// This defaults to loading a pointer from the input and storing it to the
// output, returning the chain.
- SrcValueSDNode *SVD = cast<SrcValueSDNode>(Node->getOperand(3));
- SrcValueSDNode *SVS = cast<SrcValueSDNode>(Node->getOperand(4));
- Tmp4 = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp3, SVD->getValue(),
- SVD->getOffset());
- Result = DAG.getStore(Tmp4.getValue(1), Tmp4, Tmp2, SVS->getValue(),
- SVS->getOffset());
+ const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
+ const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
+ Tmp4 = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp3, VD, 0);
+ Result = DAG.getStore(Tmp4.getValue(1), Tmp4, Tmp2, VS, 0);
break;
}
break;
SDOperand True, False;
MVT::ValueType VT = Node->getOperand(0).getValueType();
MVT::ValueType NVT = Node->getValueType(0);
- unsigned ShiftAmt = MVT::getSizeInBits(NVT)-1;
const uint64_t zero[] = {0, 0};
APFloat apf = APFloat(APInt(MVT::getSizeInBits(VT), 2, zero));
- uint64_t x = 1ULL << ShiftAmt;
- (void)apf.convertFromZeroExtendedInteger
- (&x, MVT::getSizeInBits(NVT), false, APFloat::rmNearestTiesToEven);
+ APInt x = APInt::getSignBit(MVT::getSizeInBits(NVT));
+ (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven);
Tmp2 = DAG.getConstantFP(apf, VT);
Tmp3 = DAG.getSetCC(TLI.getSetCCResultTy(),
Node->getOperand(0), Tmp2, ISD::SETLT);
DAG.getNode(ISD::FSUB, VT, Node->getOperand(0),
Tmp2));
False = DAG.getNode(ISD::XOR, NVT, False,
- DAG.getConstant(1ULL << ShiftAmt, NVT));
+ DAG.getConstant(x, NVT));
Result = DAG.getNode(ISD::SELECT, NVT, Tmp3, True, False);
break;
} else {
MVT::ValueType OVT = Node->getOperand(0).getValueType();
// Convert ppcf128 to i32
if (OVT == MVT::ppcf128 && VT == MVT::i32) {
- if (Node->getOpcode()==ISD::FP_TO_SINT)
- Result = DAG.getNode(ISD::FP_TO_SINT, VT,
- DAG.getNode(ISD::FP_ROUND, MVT::f64,
- (DAG.getNode(ISD::FP_ROUND_INREG,
- MVT::ppcf128, Node->getOperand(0),
- DAG.getValueType(MVT::f64)))));
- else {
+ if (Node->getOpcode() == ISD::FP_TO_SINT) {
+ Result = DAG.getNode(ISD::FP_ROUND_INREG, MVT::ppcf128,
+ Node->getOperand(0), DAG.getValueType(MVT::f64));
+ Result = DAG.getNode(ISD::FP_ROUND, MVT::f64, Result,
+ DAG.getIntPtrConstant(1));
+ Result = DAG.getNode(ISD::FP_TO_SINT, VT, Result);
+ } else {
const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
APFloat apf = APFloat(APInt(128, 2, TwoE31));
Tmp2 = DAG.getConstantFP(apf, OVT);
break;
case ISD::FP_EXTEND: {
- MVT::ValueType newVT = Op.getValueType();
- MVT::ValueType oldVT = Op.getOperand(0).getValueType();
- if (TLI.getConvertAction(oldVT, newVT) == TargetLowering::Expand) {
- // The only other way we can lower this is to turn it into a STORE,
- // LOAD pair, targetting a temporary location (a stack slot).
-
- // NOTE: there is a choice here between constantly creating new stack
- // slots and always reusing the same one. We currently always create
- // new ones, as reuse may inhibit scheduling.
- SDOperand StackSlot = DAG.CreateStackTemporary(oldVT);
- Result = DAG.getStore(DAG.getEntryNode(), Node->getOperand(0),
- StackSlot, NULL, 0);
- Result = DAG.getExtLoad(ISD::EXTLOAD, newVT,
- Result, StackSlot, NULL, 0, oldVT);
- break;
- }
+ MVT::ValueType DstVT = Op.getValueType();
+ MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
+ if (TLI.getConvertAction(SrcVT, DstVT) == TargetLowering::Expand) {
+ // The only other way we can lower this is to turn it into a STORE,
+ // LOAD pair, targetting a temporary location (a stack slot).
+ Result = EmitStackConvert(Node->getOperand(0), SrcVT, DstVT);
+ break;
}
switch (getTypeAction(Node->getOperand(0).getValueType())) {
case Expand: assert(0 && "Shouldn't need to expand other operators here!");
break;
}
break;
+ }
case ISD::FP_ROUND: {
- MVT::ValueType newVT = Op.getValueType();
- MVT::ValueType oldVT = Op.getOperand(0).getValueType();
- if (TLI.getConvertAction(oldVT, newVT) == TargetLowering::Expand) {
- if (oldVT == MVT::ppcf128) {
- SDOperand Lo, Hi;
- ExpandOp(Node->getOperand(0), Lo, Hi);
- Result = DAG.getNode(ISD::FP_ROUND, newVT, Hi);
- break;
- } else {
- // The only other way we can lower this is to turn it into a STORE,
- // LOAD pair, targetting a temporary location (a stack slot).
-
- // NOTE: there is a choice here between constantly creating new stack
- // slots and always reusing the same one. We currently always create
- // new ones, as reuse may inhibit scheduling.
- SDOperand StackSlot = DAG.CreateStackTemporary(newVT);
- Result = DAG.getTruncStore(DAG.getEntryNode(), Node->getOperand(0),
- StackSlot, NULL, 0, newVT);
- Result = DAG.getLoad(newVT, Result, StackSlot, NULL, 0);
- break;
- }
+ MVT::ValueType DstVT = Op.getValueType();
+ MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
+ if (TLI.getConvertAction(SrcVT, DstVT) == TargetLowering::Expand) {
+ if (SrcVT == MVT::ppcf128) {
+ SDOperand Lo;
+ ExpandOp(Node->getOperand(0), Lo, Result);
+ // Round it the rest of the way (e.g. to f32) if needed.
+ if (DstVT!=MVT::f64)
+ Result = DAG.getNode(ISD::FP_ROUND, DstVT, Result, Op.getOperand(1));
+ break;
}
+ // The only other way we can lower this is to turn it into a STORE,
+ // LOAD pair, targetting a temporary location (a stack slot).
+ Result = EmitStackConvert(Node->getOperand(0), DstVT, DstVT);
+ break;
}
switch (getTypeAction(Node->getOperand(0).getValueType())) {
case Expand: assert(0 && "Shouldn't need to expand other operators here!");
case Legal:
Tmp1 = LegalizeOp(Node->getOperand(0));
- Result = DAG.UpdateNodeOperands(Result, Tmp1);
+ Result = DAG.UpdateNodeOperands(Result, Tmp1, Node->getOperand(1));
break;
case Promote:
Tmp1 = PromoteOp(Node->getOperand(0));
- Result = DAG.getNode(ISD::FP_ROUND, Op.getValueType(), Tmp1);
+ Result = DAG.getNode(ISD::FP_ROUND, Op.getValueType(), Tmp1,
+ Node->getOperand(1));
break;
}
break;
+ }
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND:
case Expand: assert(0 && "Shouldn't need to expand other operators here!");
case Legal:
Tmp1 = LegalizeOp(Node->getOperand(0));
+ if (TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)) ==
+ TargetLowering::Custom) {
+ Tmp2 = TLI.LowerOperation(Result, DAG);
+ if (Tmp2.Val) {
+ Tmp1 = Tmp2;
+ }
+ }
Result = DAG.UpdateNodeOperands(Result, Tmp1);
break;
case Promote:
// NOTE: there is a choice here between constantly creating new stack
// slots and always reusing the same one. We currently always create
// new ones, as reuse may inhibit scheduling.
- SDOperand StackSlot = DAG.CreateStackTemporary(ExtraVT);
- Result = DAG.getTruncStore(DAG.getEntryNode(), Node->getOperand(0),
- StackSlot, NULL, 0, ExtraVT);
- Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0),
- Result, StackSlot, NULL, 0, ExtraVT);
+ Result = EmitStackConvert(Node->getOperand(0), ExtraVT,
+ Node->getValueType(0));
} else {
assert(0 && "Unknown op");
}
AddLegalizedOperand(SDOperand(Node, 1), Tmp1);
return Op.ResNo ? Tmp1 : Result;
}
- case ISD::FLT_ROUNDS: {
+ case ISD::FLT_ROUNDS_: {
MVT::ValueType VT = Node->getValueType(0);
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
default: assert(0 && "This action not supported for this op yet!");
Tmp1 = LegalizeOp(Node->getOperand(0));
TargetLowering::ArgListTy Args;
std::pair<SDOperand,SDOperand> CallResult =
- TLI.LowerCallTo(Tmp1, Type::VoidTy, false, false, CallingConv::C, false,
+ TLI.LowerCallTo(Tmp1, Type::VoidTy,
+ false, false, false, CallingConv::C, false,
DAG.getExternalSymbol("abort", TLI.getPointerTy()),
Args, DAG);
Result = CallResult.second;
case Expand: assert(0 && "BUG: Cannot expand FP regs!");
case Promote: assert(0 && "Unreachable with 2 FP types!");
case Legal:
- // Input is legal? Do an FP_ROUND_INREG.
- Result = DAG.getNode(ISD::FP_ROUND_INREG, NVT, Node->getOperand(0),
- DAG.getValueType(VT));
+ if (Node->getConstantOperandVal(1) == 0) {
+ // Input is legal? Do an FP_ROUND_INREG.
+ Result = DAG.getNode(ISD::FP_ROUND_INREG, NVT, Node->getOperand(0),
+ DAG.getValueType(VT));
+ } else {
+ // Just remove the truncate, it isn't affecting the value.
+ Result = DAG.getNode(ISD::FP_ROUND, NVT, Node->getOperand(0),
+ Node->getOperand(1));
+ }
break;
}
break;
-
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
switch (getTypeAction(Node->getOperand(0).getValueType())) {
break;
}
+ case ISD::ATOMIC_LCS: {
+ Tmp2 = PromoteOp(Node->getOperand(2));
+ Tmp3 = PromoteOp(Node->getOperand(3));
+ Result = DAG.getAtomic(Node->getOpcode(), Node->getOperand(0),
+ Node->getOperand(1), Tmp2, Tmp3,
+ cast<AtomicSDNode>(Node)->getVT());
+ // Remember that we legalized the chain.
+ AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
+ break;
+ }
+ case ISD::ATOMIC_LAS:
+ case ISD::ATOMIC_SWAP: {
+ Tmp2 = PromoteOp(Node->getOperand(2));
+ Result = DAG.getAtomic(Node->getOpcode(), Node->getOperand(0),
+ Node->getOperand(1), Tmp2,
+ cast<AtomicSDNode>(Node)->getVT());
+ // Remember that we legalized the chain.
+ AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
+ break;
+ }
+
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::FCOPYSIGN:
// These operators require that their input be fp extended.
switch (getTypeAction(Node->getOperand(0).getValueType())) {
- case Legal:
- Tmp1 = LegalizeOp(Node->getOperand(0));
- break;
- case Promote:
- Tmp1 = PromoteOp(Node->getOperand(0));
- break;
- case Expand:
- assert(0 && "not implemented");
+ case Expand: assert(0 && "not implemented");
+ case Legal: Tmp1 = LegalizeOp(Node->getOperand(0)); break;
+ case Promote: Tmp1 = PromoteOp(Node->getOperand(0)); break;
}
switch (getTypeAction(Node->getOperand(1).getValueType())) {
- case Legal:
- Tmp2 = LegalizeOp(Node->getOperand(1));
- break;
- case Promote:
- Tmp2 = PromoteOp(Node->getOperand(1));
- break;
- case Expand:
- assert(0 && "not implemented");
+ case Expand: assert(0 && "not implemented");
+ case Legal: Tmp2 = LegalizeOp(Node->getOperand(1)); break;
+ case Promote: Tmp2 = PromoteOp(Node->getOperand(1)); break;
}
Result = DAG.getNode(Node->getOpcode(), NVT, Tmp1, Tmp2);
Tmp3 = DAG.getVAArg(VT, Tmp1, Tmp2, Node->getOperand(2));
Result = TLI.CustomPromoteOperation(Tmp3, DAG);
} else {
- SrcValueSDNode *SV = cast<SrcValueSDNode>(Node->getOperand(2));
- SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2,
- SV->getValue(), SV->getOffset());
+ const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
+ SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0);
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList,
DAG.getConstant(MVT::getSizeInBits(VT)/8,
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
- Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, SV->getValue(),
- SV->getOffset());
+ Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0);
// Load the actual argument out of the pointer VAList
Result = DAG.getExtLoad(ISD::EXTLOAD, NVT, Tmp3, VAList, NULL, 0, VT);
}
Result = DAG.getExtLoad(ExtType, NVT,
LD->getChain(), LD->getBasePtr(),
LD->getSrcValue(), LD->getSrcValueOffset(),
- LD->getLoadedVT(),
+ LD->getMemoryVT(),
LD->isVolatile(),
LD->getAlignment());
// Remember that we legalized the chain.
// This must be an access of the only element. Return it.
Op = ScalarizeVectorOp(Vec);
} else if (!TLI.isTypeLegal(TVT) && isa<ConstantSDNode>(Idx)) {
+ unsigned NumLoElts = 1 << Log2_32(NumElems-1);
ConstantSDNode *CIdx = cast<ConstantSDNode>(Idx);
SDOperand Lo, Hi;
SplitVectorOp(Vec, Lo, Hi);
- if (CIdx->getValue() < NumElems/2) {
+ if (CIdx->getValue() < NumLoElts) {
Vec = Lo;
} else {
Vec = Hi;
- Idx = DAG.getConstant(CIdx->getValue() - NumElems/2,
+ Idx = DAG.getConstant(CIdx->getValue() - NumLoElts,
Idx.getValueType());
}
// Create the stack frame object.
SDOperand FIPtr = DAG.CreateStackTemporary(SlotVT);
+ FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
+ int SPFI = StackPtrFI->getIndex();
+
unsigned SrcSize = MVT::getSizeInBits(SrcOp.getValueType());
unsigned SlotSize = MVT::getSizeInBits(SlotVT);
unsigned DestSize = MVT::getSizeInBits(DestVT);
// later than DestVT.
SDOperand Store;
if (SrcSize > SlotSize)
- Store = DAG.getTruncStore(DAG.getEntryNode(), SrcOp, FIPtr, NULL, 0,SlotVT);
+ Store = DAG.getTruncStore(DAG.getEntryNode(), SrcOp, FIPtr,
+ PseudoSourceValue::getFixedStack(),
+ SPFI, SlotVT);
else {
assert(SrcSize == SlotSize && "Invalid store");
- Store = DAG.getStore(DAG.getEntryNode(), SrcOp, FIPtr, NULL, 0);
+ Store = DAG.getStore(DAG.getEntryNode(), SrcOp, FIPtr,
+ PseudoSourceValue::getFixedStack(),
+ SPFI, SlotVT);
}
// Result is a load from the stack slot.
// Create a vector sized/aligned stack slot, store the value to element #0,
// then load the whole vector back out.
SDOperand StackPtr = DAG.CreateStackTemporary(Node->getValueType(0));
+
+ FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr);
+ int SPFI = StackPtrFI->getIndex();
+
SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Node->getOperand(0), StackPtr,
- NULL, 0);
- return DAG.getLoad(Node->getValueType(0), Ch, StackPtr, NULL, 0);
+ PseudoSourceValue::getFixedStack(), SPFI);
+ return DAG.getLoad(Node->getValueType(0), Ch, StackPtr,
+ PseudoSourceValue::getFixedStack(), SPFI);
}
}
Constant *CP = ConstantVector::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy());
- return DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0);
+ return DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0);
}
if (SplatValue.Val) { // Splat of one value?
MVT::ValueType NVT = TLI.getTypeToTransformTo(Op.getValueType());
SDOperand ShAmt = LegalizeOp(Amt);
MVT::ValueType ShTy = ShAmt.getValueType();
+ unsigned ShBits = MVT::getSizeInBits(ShTy);
unsigned VTBits = MVT::getSizeInBits(Op.getValueType());
unsigned NVTBits = MVT::getSizeInBits(NVT);
// Okay, the shift amount isn't constant. However, if we can tell that it is
// >= 32 or < 32, we can still simplify it, without knowing the actual value.
- uint64_t Mask = NVTBits, KnownZero, KnownOne;
+ APInt Mask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
+ APInt KnownZero, KnownOne;
DAG.ComputeMaskedBits(Amt, Mask, KnownZero, KnownOne);
- // If we know that the high bit of the shift amount is one, then we can do
- // this as a couple of simple shifts.
- if (KnownOne & Mask) {
+ // If we know that if any of the high bits of the shift amount are one, then
+ // we can do this as a couple of simple shifts.
+ if (KnownOne.intersects(Mask)) {
// Mask out the high bit, which we know is set.
Amt = DAG.getNode(ISD::AND, Amt.getValueType(), Amt,
- DAG.getConstant(NVTBits-1, Amt.getValueType()));
+ DAG.getConstant(~Mask, Amt.getValueType()));
// Expand the incoming operand to be shifted, so that we have its parts
SDOperand InL, InH;
}
}
- // If we know that the high bit of the shift amount is zero, then we can do
- // this as a couple of simple shifts.
- if (KnownZero & Mask) {
+ // If we know that the high bits of the shift amount are all zero, then we can
+ // do this as a couple of simple shifts.
+ if ((KnownZero & Mask) == Mask) {
// Compute 32-amt.
SDOperand Amt2 = DAG.getNode(ISD::SUB, Amt.getValueType(),
DAG.getConstant(NVTBits, Amt.getValueType()),
const Type *ArgTy = MVT::getTypeForValueType(ArgVT);
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
+ Entry.isZExt = !isSigned;
Args.push_back(Entry);
}
SDOperand Callee = DAG.getExternalSymbol(Name, TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
const Type *RetTy = MVT::getTypeForValueType(Node->getValueType(0));
std::pair<SDOperand,SDOperand> CallInfo =
- TLI.LowerCallTo(InChain, RetTy, isSigned, false, CallingConv::C, false,
- Callee, Args, DAG);
+ TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, CallingConv::C,
+ false, Callee, Args, DAG);
// Legalize the call sequence, starting with the chain. This will advance
// the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultTy(), Hi,
DAG.getConstant(0, Hi.getValueType()),
ISD::SETLT);
- SDOperand Zero = getIntPtrConstant(0), Four = getIntPtrConstant(4);
+ SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(),
SignSet, Four, Zero);
uint64_t FF = 0x5f800000ULL;
CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset);
SDOperand FudgeInReg;
if (DestTy == MVT::f32)
- FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, NULL, 0);
+ FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0);
else if (MVT::getSizeInBits(DestTy) > MVT::getSizeInBits(MVT::f32))
// FIXME: Avoid the extend by construction the right constantpool?
FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestTy, DAG.getEntryNode(),
- CPIdx, NULL, 0, MVT::f32);
+ CPIdx,
+ PseudoSourceValue::getConstantPool(), 0,
+ MVT::f32);
else
assert(0 && "Unexpected conversion");
// do nothing
Result = Sub;
} else if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(MVT::f64)) {
- Result = DAG.getNode(ISD::FP_ROUND, DestVT, Sub);
+ Result = DAG.getNode(ISD::FP_ROUND, DestVT, Sub,
+ DAG.getIntPtrConstant(0));
} else if (MVT::getSizeInBits(DestVT) > MVT::getSizeInBits(MVT::f64)) {
Result = DAG.getNode(ISD::FP_EXTEND, DestVT, Sub);
}
SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultTy(), Op0,
DAG.getConstant(0, Op0.getValueType()),
ISD::SETLT);
- SDOperand Zero = getIntPtrConstant(0), Four = getIntPtrConstant(4);
+ SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(),
SignSet, Four, Zero);
CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset);
SDOperand FudgeInReg;
if (DestVT == MVT::f32)
- FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, NULL, 0);
+ FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0);
else {
- FudgeInReg = LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, DestVT,
- DAG.getEntryNode(), CPIdx,
- NULL, 0, MVT::f32));
+ FudgeInReg =
+ LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, DestVT,
+ DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0,
+ MVT::f32));
}
return DAG.getNode(ISD::FADD, DestVT, Tmp1, FudgeInReg);
#endif
assert(0 && "Do not know how to expand this operator!");
abort();
+ case ISD::EXTRACT_ELEMENT:
+ ExpandOp(Node->getOperand(0), Lo, Hi);
+ if (cast<ConstantSDNode>(Node->getOperand(1))->getValue())
+ return ExpandOp(Hi, Lo, Hi);
+ return ExpandOp(Lo, Lo, Hi);
case ISD::EXTRACT_VECTOR_ELT:
assert(VT==MVT::i64 && "Do not know how to expand this operator!");
// ExpandEXTRACT_VECTOR_ELT tolerates invalid result types.
Hi = DAG.getNode(ISD::UNDEF, NVT);
break;
case ISD::Constant: {
- uint64_t Cst = cast<ConstantSDNode>(Node)->getValue();
- Lo = DAG.getConstant(Cst, NVT);
- Hi = DAG.getConstant(Cst >> MVT::getSizeInBits(NVT), NVT);
+ unsigned NVTBits = MVT::getSizeInBits(NVT);
+ const APInt &Cst = cast<ConstantSDNode>(Node)->getAPIntValue();
+ Lo = DAG.getConstant(APInt(Cst).trunc(NVTBits), NVT);
+ Hi = DAG.getConstant(Cst.lshr(NVTBits).trunc(NVTBits), NVT);
break;
}
case ISD::ConstantFP: {
// Remember that we legalized the chain.
Hi = LegalizeOp(Hi);
AddLegalizedOperand(Op.getValue(1), Hi.getValue(1));
- if (!TLI.isLittleEndian())
+ if (TLI.isBigEndian())
std::swap(Lo, Hi);
break;
}
// Increment the pointer to the other half.
unsigned IncrementSize = MVT::getSizeInBits(Lo.getValueType())/8;
Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
- getIntPtrConstant(IncrementSize));
+ DAG.getIntPtrConstant(IncrementSize));
SVOffset += IncrementSize;
Alignment = MinAlign(Alignment, IncrementSize);
Hi = DAG.getLoad(NVT, Ch, Ptr, LD->getSrcValue(), SVOffset,
// Remember that we legalized the chain.
AddLegalizedOperand(Op.getValue(1), LegalizeOp(TF));
- if (!TLI.isLittleEndian())
+ if (TLI.isBigEndian())
std::swap(Lo, Hi);
} else {
- MVT::ValueType EVT = LD->getLoadedVT();
+ MVT::ValueType EVT = LD->getMemoryVT();
if ((VT == MVT::f64 && EVT == MVT::f32) ||
(VT == MVT::ppcf128 && (EVT==MVT::f64 || EVT==MVT::f32))) {
SDOperand LL, LH, RL, RH;
ExpandOp(Node->getOperand(0), LL, LH);
ExpandOp(Node->getOperand(1), RL, RH);
- unsigned BitSize = MVT::getSizeInBits(RH.getValueType());
+ unsigned OuterBitSize = Op.getValueSizeInBits();
+ unsigned InnerBitSize = RH.getValueSizeInBits();
unsigned LHSSB = DAG.ComputeNumSignBits(Op.getOperand(0));
unsigned RHSSB = DAG.ComputeNumSignBits(Op.getOperand(1));
- // FIXME: generalize this to handle other bit sizes
- if (LHSSB == 32 && RHSSB == 32 &&
- DAG.MaskedValueIsZero(Op.getOperand(0), 0xFFFFFFFF00000000ULL) &&
- DAG.MaskedValueIsZero(Op.getOperand(1), 0xFFFFFFFF00000000ULL)) {
+ if (DAG.MaskedValueIsZero(Op.getOperand(0),
+ APInt::getHighBitsSet(OuterBitSize, LHSSB)) &&
+ DAG.MaskedValueIsZero(Op.getOperand(1),
+ APInt::getHighBitsSet(OuterBitSize, RHSSB))) {
// The inputs are both zero-extended.
if (HasUMUL_LOHI) {
// We can emit a umul_lohi.
break;
}
}
- if (LHSSB > BitSize && RHSSB > BitSize) {
+ if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
// The input values are both sign-extended.
if (HasSMUL_LOHI) {
// We can emit a smul_lohi.
bool isSigned = Node->getOpcode() == ISD::SINT_TO_FP;
MVT::ValueType SrcVT = Node->getOperand(0).getValueType();
if (VT == MVT::ppcf128 && SrcVT != MVT::i64) {
- static uint64_t zero = 0;
+ static const uint64_t zero = 0;
if (isSigned) {
Hi = LegalizeOp(DAG.getNode(ISD::SINT_TO_FP, MVT::f64,
Node->getOperand(0)));
Lo = DAG.getConstantFP(APFloat(APInt(64, 1, &zero)), MVT::f64);
} else {
- static uint64_t TwoE32[] = { 0x41f0000000000000LL, 0 };
+ static const uint64_t TwoE32[] = { 0x41f0000000000000LL, 0 };
Hi = LegalizeOp(DAG.getNode(ISD::SINT_TO_FP, MVT::f64,
Node->getOperand(0)));
Lo = DAG.getConstantFP(APFloat(APInt(64, 1, &zero)), MVT::f64);
}
if (VT == MVT::ppcf128 && SrcVT == MVT::i64 && !isSigned) {
// si64->ppcf128 done by libcall, below
- static uint64_t TwoE64[] = { 0x43f0000000000000LL, 0 };
+ static const uint64_t TwoE64[] = { 0x43f0000000000000LL, 0 };
ExpandOp(DAG.getNode(ISD::SINT_TO_FP, MVT::ppcf128, Node->getOperand(0)),
Lo, Hi);
Hi = DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi);
Lo = DAG.getLoad(NewVT_Lo, Ch, Ptr, SV, SVOffset, isVolatile, Alignment);
unsigned IncrementSize = NewNumElts_Lo * MVT::getSizeInBits(NewEltVT)/8;
Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
- getIntPtrConstant(IncrementSize));
+ DAG.getIntPtrConstant(IncrementSize));
SVOffset += IncrementSize;
Alignment = MinAlign(Alignment, IncrementSize);
Hi = DAG.getLoad(NewVT_Hi, Ch, Ptr, SV, SVOffset, isVolatile, Alignment);
// Lower to a store/load so that it can be split.
// FIXME: this could be improved probably.
SDOperand Ptr = DAG.CreateStackTemporary(InOp.getValueType());
+ FrameIndexSDNode *FI = cast<FrameIndexSDNode>(Ptr.Val);
SDOperand St = DAG.getStore(DAG.getEntryNode(),
- InOp, Ptr, NULL, 0);
- InOp = DAG.getLoad(Op.getValueType(), St, Ptr, NULL, 0);
+ InOp, Ptr,
+ PseudoSourceValue::getFixedStack(),
+ FI->getIndex());
+ InOp = DAG.getLoad(Op.getValueType(), St, Ptr,
+ PseudoSourceValue::getFixedStack(),
+ FI->getIndex());
}
// Split the vector and convert each of the pieces now.
SplitVectorOp(InOp, Lo, Hi);