#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/IntrinsicInst.h"
-#include "llvm/CodeGen/MachineDebugInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Compiler.h"
-#include <iostream>
#include <algorithm>
using namespace llvm;
const Type *Ty = AI->getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
+ std::max((unsigned)TLI.getTargetData()->getTypeAlignmentPref(Ty),
AI->getAlignment());
- // If the alignment of the value is smaller than the size of the
- // value, and if the size of the value is particularly small
- // (<= 8 bytes), round up to the size of the value for potentially
- // better performance.
- //
- // FIXME: This could be made better with a preferred alignment hook in
- // TargetData. It serves primarily to 8-byte align doubles for X86.
- if (Align < TySize && TySize <= 8) Align = TySize;
TySize *= CUI->getZExtValue(); // Get total allocated size.
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
StaticAllocaMap[AI] =
// If this value is represented with multiple target registers, make sure
// to create enough consecutive registers of the right (smaller) type.
- unsigned NT = VT-1; // Find the type to use.
- while (TLI.getNumElements((MVT::ValueType)NT) != 1)
- --NT;
-
- unsigned R = MakeReg((MVT::ValueType)NT);
+ VT = TLI.getTypeToExpandTo(VT);
+ unsigned R = MakeReg(VT);
for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
- MakeReg((MVT::ValueType)NT);
+ MakeReg(VT);
return R;
}
/// The comparison function for sorting Case values.
struct CaseCmp {
bool operator () (const Case& C1, const Case& C2) {
- if (const ConstantInt* I1 = dyn_cast<const ConstantInt>(C1.first))
- if (I1->getType()->isUnsigned())
- return I1->getZExtValue() <
- cast<const ConstantInt>(C2.first)->getZExtValue();
-
+ assert(isa<ConstantInt>(C1.first) && isa<ConstantInt>(C2.first));
return cast<const ConstantInt>(C1.first)->getSExtValue() <
- cast<const ConstantInt>(C2.first)->getSExtValue();
+ cast<const ConstantInt>(C2.first)->getSExtValue();
}
};
void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
- void visitIntBinary(User &I, unsigned IntOp, unsigned VecOp);
- void visitFPBinary(User &I, unsigned FPOp, unsigned VecOp);
+ void visitScalarBinary(User &I, unsigned OpCode);
+ void visitVectorBinary(User &I, unsigned OpCode);
+ void visitEitherBinary(User &I, unsigned ScalarOp, unsigned VectorOp);
void visitShift(User &I, unsigned Opcode);
void visitAdd(User &I) {
- if (I.getType()->isFloatingPoint())
- visitFPBinary(I, ISD::FADD, ISD::VADD);
+ if (isa<PackedType>(I.getType()))
+ visitVectorBinary(I, ISD::VADD);
+ else if (I.getType()->isFloatingPoint())
+ visitScalarBinary(I, ISD::FADD);
else
- visitIntBinary(I, ISD::ADD, ISD::VADD);
+ visitScalarBinary(I, ISD::ADD);
}
void visitSub(User &I);
void visitMul(User &I) {
- if (I.getType()->isFloatingPoint())
- visitFPBinary(I, ISD::FMUL, ISD::VMUL);
+ if (isa<PackedType>(I.getType()))
+ visitVectorBinary(I, ISD::VMUL);
+ else if (I.getType()->isFloatingPoint())
+ visitScalarBinary(I, ISD::FMUL);
else
- visitIntBinary(I, ISD::MUL, ISD::VMUL);
+ visitScalarBinary(I, ISD::MUL);
}
- void visitURem(User &I) { visitIntBinary(I, ISD::UREM, 0); }
- void visitSRem(User &I) { visitIntBinary(I, ISD::SREM, 0); }
- void visitFRem(User &I) { visitFPBinary (I, ISD::FREM, 0); }
- void visitUDiv(User &I) { visitIntBinary(I, ISD::UDIV, ISD::VUDIV); }
- void visitSDiv(User &I) { visitIntBinary(I, ISD::SDIV, ISD::VSDIV); }
- void visitFDiv(User &I) { visitFPBinary (I, ISD::FDIV, ISD::VSDIV); }
- void visitAnd(User &I) { visitIntBinary(I, ISD::AND, ISD::VAND); }
- void visitOr (User &I) { visitIntBinary(I, ISD::OR, ISD::VOR); }
- void visitXor(User &I) { visitIntBinary(I, ISD::XOR, ISD::VXOR); }
- void visitShl(User &I) { visitShift(I, ISD::SHL); }
+ void visitURem(User &I) { visitScalarBinary(I, ISD::UREM); }
+ void visitSRem(User &I) { visitScalarBinary(I, ISD::SREM); }
+ void visitFRem(User &I) { visitScalarBinary(I, ISD::FREM); }
+ void visitUDiv(User &I) { visitEitherBinary(I, ISD::UDIV, ISD::VUDIV); }
+ void visitSDiv(User &I) { visitEitherBinary(I, ISD::SDIV, ISD::VSDIV); }
+ void visitFDiv(User &I) { visitEitherBinary(I, ISD::FDIV, ISD::VSDIV); }
+ void visitAnd (User &I) { visitEitherBinary(I, ISD::AND, ISD::VAND ); }
+ void visitOr (User &I) { visitEitherBinary(I, ISD::OR, ISD::VOR ); }
+ void visitXor (User &I) { visitEitherBinary(I, ISD::XOR, ISD::VXOR ); }
+ void visitShl (User &I) { visitShift(I, ISD::SHL); }
void visitLShr(User &I) { visitShift(I, ISD::SRL); }
void visitAShr(User &I) { visitShift(I, ISD::SRA); }
void visitICmp(User &I);
void visitFCmp(User &I);
- void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc,
- ISD::CondCode FPOpc);
- void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ,
- ISD::SETOEQ); }
- void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE,
- ISD::SETUNE); }
- void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE,
- ISD::SETOLE); }
- void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE,
- ISD::SETOGE); }
- void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT,
- ISD::SETOLT); }
- void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT,
- ISD::SETOGT); }
// Visit the conversion instructions
void visitTrunc(User &I);
void visitZExt(User &I);
void visitVAArg(VAArgInst &I);
void visitVAEnd(CallInst &I);
void visitVACopy(CallInst &I);
- void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
void visitMemIntrinsic(CallInst &I, unsigned Op);
return N = DAG.getNode(ISD::VBUILD_VECTOR,MVT::Vector,&Ops[0],Ops.size());
} else {
// Canonicalize all constant ints to be unsigned.
- return N = DAG.getConstant(cast<ConstantIntegral>(C)->getZExtValue(),VT);
+ return N = DAG.getConstant(cast<ConstantInt>(C)->getZExtValue(),VT);
}
}
// If this type is not legal, make it so now.
if (VT != MVT::Vector) {
- MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
-
- N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
- if (DestVT < VT) {
+ if (TLI.getTypeAction(VT) == TargetLowering::Expand) {
// Source must be expanded. This input value is actually coming from the
// register pair VMI->second and VMI->second+1.
- N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
- DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
- } else if (DestVT > VT) { // Promotion case
- if (MVT::isFloatingPoint(VT))
- N = DAG.getNode(ISD::FP_ROUND, VT, N);
- else
- N = DAG.getNode(ISD::TRUNCATE, VT, N);
+ MVT::ValueType DestVT = TLI.getTypeToExpandTo(VT);
+ unsigned NumVals = TLI.getNumElements(VT);
+ N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
+ if (NumVals == 1)
+ N = DAG.getNode(ISD::BIT_CONVERT, VT, N);
+ else {
+ assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!");
+ N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
+ DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
+ }
+ } else {
+ MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
+ N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
+ if (TLI.getTypeAction(VT) == TargetLowering::Promote) // Promotion case
+ N = MVT::isFloatingPoint(VT)
+ ? DAG.getNode(ISD::FP_ROUND, VT, N)
+ : DAG.getNode(ISD::TRUNCATE, VT, N);
}
} else {
// Otherwise, if this is a vector, make it available as a generic vector
NewValues.push_back(getRoot());
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
SDOperand RetOp = getValue(I.getOperand(i));
- bool isSigned = I.getOperand(i)->getType()->isSigned();
// If this is an integer return value, we need to promote it ourselves to
// the full width of a register, since LegalizeOp will use ANY_EXTEND rather
TmpVT = TLI.getTypeToTransformTo(MVT::i32);
else
TmpVT = MVT::i32;
-
- if (isSigned)
- RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
- else
- RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
+ const FunctionType *FTy = I.getParent()->getParent()->getFunctionType();
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+ if (FTy->paramHasAttr(0, FunctionType::SExtAttribute))
+ ExtendKind = ISD::SIGN_EXTEND;
+ if (FTy->paramHasAttr(0, FunctionType::ZExtAttribute))
+ ExtendKind = ISD::ZERO_EXTEND;
+ RetOp = DAG.getNode(ExtendKind, TmpVT, RetOp);
}
NewValues.push_back(RetOp);
- NewValues.push_back(DAG.getConstant(isSigned, MVT::i32));
+ NewValues.push_back(DAG.getConstant(false, MVT::i32));
}
DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other,
&NewValues[0], NewValues.size()));
MachineBasicBlock *CurBB,
unsigned Opc) {
// If this node is not part of the or/and tree, emit it as a branch.
- BinaryOperator *BOp = dyn_cast<BinaryOperator>(Cond);
+ Instruction *BOp = dyn_cast<Instruction>(Cond);
- if (!BOp || (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
+ if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
+ (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
BOp->getParent() != CurBB->getBasicBlock() ||
!InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
!InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
const BasicBlock *BB = CurBB->getBasicBlock();
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cond))
- if ((II->getIntrinsicID() == Intrinsic::isunordered_f32 ||
- II->getIntrinsicID() == Intrinsic::isunordered_f64) &&
- // The operands of the setcc have to be in this block. We don't know
- // how to export them from some other block. If this is the first
- // block of the sequence, no exporting is needed.
- (CurBB == CurMBB ||
- (isExportableFromCurrentBlock(II->getOperand(1), BB) &&
- isExportableFromCurrentBlock(II->getOperand(2), BB)))) {
- SelectionDAGISel::CaseBlock CB(ISD::SETUO, II->getOperand(1),
- II->getOperand(2), TBB, FBB, CurBB);
- SwitchCases.push_back(CB);
- return;
- }
-
-
- // If the leaf of the tree is a setcond inst, merge the condition into the
- // caseblock.
- if (BOp && isa<SetCondInst>(BOp) &&
- // The operands of the setcc have to be in this block. We don't know
+ // If the leaf of the tree is a comparison, merge the condition into
+ // the caseblock.
+ if ((isa<ICmpInst>(Cond) || isa<FCmpInst>(Cond)) &&
+ // The operands of the cmp have to be in this block. We don't know
// how to export them from some other block. If this is the first block
// of the sequence, no exporting is needed.
(CurBB == CurMBB ||
(isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
isExportableFromCurrentBlock(BOp->getOperand(1), BB)))) {
- ISD::CondCode SignCond, UnsCond, FPCond, Condition;
- switch (BOp->getOpcode()) {
- default: assert(0 && "Unknown setcc opcode!");
- case Instruction::SetEQ:
- SignCond = ISD::SETEQ;
- UnsCond = ISD::SETEQ;
- FPCond = ISD::SETOEQ;
- break;
- case Instruction::SetNE:
- SignCond = ISD::SETNE;
- UnsCond = ISD::SETNE;
- FPCond = ISD::SETUNE;
- break;
- case Instruction::SetLE:
- SignCond = ISD::SETLE;
- UnsCond = ISD::SETULE;
- FPCond = ISD::SETOLE;
- break;
- case Instruction::SetGE:
- SignCond = ISD::SETGE;
- UnsCond = ISD::SETUGE;
- FPCond = ISD::SETOGE;
- break;
- case Instruction::SetLT:
- SignCond = ISD::SETLT;
- UnsCond = ISD::SETULT;
- FPCond = ISD::SETOLT;
- break;
- case Instruction::SetGT:
- SignCond = ISD::SETGT;
- UnsCond = ISD::SETUGT;
- FPCond = ISD::SETOGT;
- break;
+ BOp = cast<Instruction>(Cond);
+ ISD::CondCode Condition;
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
+ switch (IC->getPredicate()) {
+ default: assert(0 && "Unknown icmp predicate opcode!");
+ case ICmpInst::ICMP_EQ: Condition = ISD::SETEQ; break;
+ case ICmpInst::ICMP_NE: Condition = ISD::SETNE; break;
+ case ICmpInst::ICMP_SLE: Condition = ISD::SETLE; break;
+ case ICmpInst::ICMP_ULE: Condition = ISD::SETULE; break;
+ case ICmpInst::ICMP_SGE: Condition = ISD::SETGE; break;
+ case ICmpInst::ICMP_UGE: Condition = ISD::SETUGE; break;
+ case ICmpInst::ICMP_SLT: Condition = ISD::SETLT; break;
+ case ICmpInst::ICMP_ULT: Condition = ISD::SETULT; break;
+ case ICmpInst::ICMP_SGT: Condition = ISD::SETGT; break;
+ case ICmpInst::ICMP_UGT: Condition = ISD::SETUGT; break;
+ }
+ } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
+ ISD::CondCode FPC, FOC;
+ switch (FC->getPredicate()) {
+ default: assert(0 && "Unknown fcmp predicate opcode!");
+ case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
+ case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
+ case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
+ case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
+ case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
+ case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
+ case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
+ case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break;
+ case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break;
+ case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
+ case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
+ case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
+ case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
+ case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
+ case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
+ case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
+ }
+ if (FiniteOnlyFPMath())
+ Condition = FOC;
+ else
+ Condition = FPC;
+ } else {
+ assert(0 && "Unknown compare instruction");
}
- const Type *OpType = BOp->getOperand(0)->getType();
- if (const PackedType *PTy = dyn_cast<PackedType>(OpType))
- OpType = PTy->getElementType();
-
- if (!FiniteOnlyFPMath() && OpType->isFloatingPoint())
- Condition = FPCond;
- else if (OpType->isUnsigned())
- Condition = UnsCond;
- else
- Condition = SignCond;
-
SelectionDAGISel::CaseBlock CB(Condition, BOp->getOperand(0),
BOp->getOperand(1), TBB, FBB, CurBB);
SwitchCases.push_back(CB);
}
// Create a CaseBlock record representing this branch.
- SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantBool::getTrue(),
+ SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
TBB, FBB, CurBB);
SwitchCases.push_back(CB);
return;
}
// Create a CaseBlock record representing this branch.
- SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantBool::getTrue(),
+ SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
Succ0MBB, Succ1MBB, CurMBB);
// Use visitSwitchCase to actually insert the fast branch sequence for this
// cond branch.
// Build the setcc now, fold "(X == true)" to X and "(X == false)" to !X to
// handle common cases produced by branch lowering.
- if (CB.CmpRHS == ConstantBool::getTrue() && CB.CC == ISD::SETEQ)
+ if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
Cond = CondLHS;
- else if (CB.CmpRHS == ConstantBool::getFalse() && CB.CC == ISD::SETEQ) {
+ else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
SDOperand True = DAG.getConstant(1, CondLHS.getValueType());
Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True);
} else
if ((TLI.isOperationLegal(ISD::BR_JT, MVT::Other) ||
TLI.isOperationLegal(ISD::BRIND, MVT::Other)) &&
Cases.size() > 5) {
- uint64_t First =cast<ConstantIntegral>(Cases.front().first)->getZExtValue();
- uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getZExtValue();
+ uint64_t First =cast<ConstantInt>(Cases.front().first)->getZExtValue();
+ uint64_t Last = cast<ConstantInt>(Cases.back().first)->getZExtValue();
double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
if (Density >= 0.3125) {
std::vector<MachineBasicBlock*> DestBBs;
uint64_t TEI = First;
for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI)
- if (cast<ConstantIntegral>(ii->first)->getZExtValue() == TEI) {
+ if (cast<ConstantInt>(ii->first)->getZExtValue() == TEI) {
DestBBs.push_back(ii->second);
++ii;
} else {
// rather than creating a leaf node for it.
if ((LHSR.second - LHSR.first) == 1 &&
LHSR.first->first == CR.GE &&
- cast<ConstantIntegral>(C)->getZExtValue() ==
- (cast<ConstantIntegral>(CR.GE)->getZExtValue() + 1ULL)) {
+ cast<ConstantInt>(C)->getZExtValue() ==
+ (cast<ConstantInt>(CR.GE)->getZExtValue() + 1ULL)) {
TrueBB = LHSR.first->second;
} else {
TrueBB = new MachineBasicBlock(LLVMBB);
// is CR.LT - 1, then we can branch directly to the target block for
// the current Case Value, rather than emitting a RHS leaf node for it.
if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
- cast<ConstantIntegral>(RHSR.first->first)->getZExtValue() ==
- (cast<ConstantIntegral>(CR.LT)->getZExtValue() - 1ULL)) {
+ cast<ConstantInt>(RHSR.first->first)->getZExtValue() ==
+ (cast<ConstantInt>(CR.LT)->getZExtValue() - 1ULL)) {
FalseBB = RHSR.first->second;
} else {
FalseBB = new MachineBasicBlock(LLVMBB);
// Create a CaseBlock record representing a conditional branch to
// the LHS node if the value being switched on SV is less than C.
// Otherwise, branch to LHS.
- ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
+ ISD::CondCode CC = ISD::SETLT;
SelectionDAGISel::CaseBlock CB(CC, SV, C, TrueBB, FalseBB, CR.CaseBB);
if (CR.CaseBB == CurMBB)
void SelectionDAGLowering::visitSub(User &I) {
// -0.0 - X --> fneg
- if (I.getType()->isFloatingPoint()) {
+ const Type *Ty = I.getType();
+ if (isa<PackedType>(Ty)) {
+ visitVectorBinary(I, ISD::VSUB);
+ } else if (Ty->isFloatingPoint()) {
if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
if (CFP->isExactlyValue(-0.0)) {
SDOperand Op2 = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
return;
}
- visitFPBinary(I, ISD::FSUB, ISD::VSUB);
+ visitScalarBinary(I, ISD::FSUB);
} else
- visitIntBinary(I, ISD::SUB, ISD::VSUB);
+ visitScalarBinary(I, ISD::SUB);
}
-void
-SelectionDAGLowering::visitIntBinary(User &I, unsigned IntOp, unsigned VecOp) {
- const Type *Ty = I.getType();
+void SelectionDAGLowering::visitScalarBinary(User &I, unsigned OpCode) {
SDOperand Op1 = getValue(I.getOperand(0));
SDOperand Op2 = getValue(I.getOperand(1));
-
- if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
- SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
- SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
- setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
- } else {
- setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
- }
+
+ setValue(&I, DAG.getNode(OpCode, Op1.getValueType(), Op1, Op2));
}
-void
-SelectionDAGLowering::visitFPBinary(User &I, unsigned FPOp, unsigned VecOp) {
- const Type *Ty = I.getType();
- SDOperand Op1 = getValue(I.getOperand(0));
- SDOperand Op2 = getValue(I.getOperand(1));
+void
+SelectionDAGLowering::visitVectorBinary(User &I, unsigned OpCode) {
+ assert(isa<PackedType>(I.getType()));
+ const PackedType *Ty = cast<PackedType>(I.getType());
+ SDOperand Typ = DAG.getValueType(TLI.getValueType(Ty->getElementType()));
+
+ setValue(&I, DAG.getNode(OpCode, MVT::Vector,
+ getValue(I.getOperand(0)),
+ getValue(I.getOperand(1)),
+ DAG.getConstant(Ty->getNumElements(), MVT::i32),
+ Typ));
+}
- if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
- SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
- SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
- setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
- } else {
- setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
- }
+void SelectionDAGLowering::visitEitherBinary(User &I, unsigned ScalarOp,
+ unsigned VectorOp) {
+ if (isa<PackedType>(I.getType()))
+ visitVectorBinary(I, VectorOp);
+ else
+ visitScalarBinary(I, ScalarOp);
}
void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
}
void SelectionDAGLowering::visitICmp(User &I) {
- ICmpInst *IC = cast<ICmpInst>(&I);
- SDOperand Op1 = getValue(IC->getOperand(0));
- SDOperand Op2 = getValue(IC->getOperand(1));
+ ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
+ predicate = IC->getPredicate();
+ else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
+ predicate = ICmpInst::Predicate(IC->getPredicate());
+ SDOperand Op1 = getValue(I.getOperand(0));
+ SDOperand Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode;
- switch (IC->getPredicate()) {
+ switch (predicate) {
case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break;
case ICmpInst::ICMP_NE : Opcode = ISD::SETNE; break;
case ICmpInst::ICMP_UGT : Opcode = ISD::SETUGT; break;
}
void SelectionDAGLowering::visitFCmp(User &I) {
- FCmpInst *FC = cast<FCmpInst>(&I);
- SDOperand Op1 = getValue(FC->getOperand(0));
- SDOperand Op2 = getValue(FC->getOperand(1));
- ISD::CondCode Opcode;
- switch (FC->getPredicate()) {
- case FCmpInst::FCMP_FALSE : Opcode = ISD::SETFALSE;
- case FCmpInst::FCMP_OEQ : Opcode = ISD::SETOEQ;
- case FCmpInst::FCMP_OGT : Opcode = ISD::SETOGT;
- case FCmpInst::FCMP_OGE : Opcode = ISD::SETOGE;
- case FCmpInst::FCMP_OLT : Opcode = ISD::SETOLT;
- case FCmpInst::FCMP_OLE : Opcode = ISD::SETOLE;
- case FCmpInst::FCMP_ONE : Opcode = ISD::SETONE;
- case FCmpInst::FCMP_ORD : Opcode = ISD::SETO;
- case FCmpInst::FCMP_UNO : Opcode = ISD::SETUO;
- case FCmpInst::FCMP_UEQ : Opcode = ISD::SETUEQ;
- case FCmpInst::FCMP_UGT : Opcode = ISD::SETUGT;
- case FCmpInst::FCMP_UGE : Opcode = ISD::SETUGE;
- case FCmpInst::FCMP_ULT : Opcode = ISD::SETULT;
- case FCmpInst::FCMP_ULE : Opcode = ISD::SETULE;
- case FCmpInst::FCMP_UNE : Opcode = ISD::SETUNE;
- case FCmpInst::FCMP_TRUE : Opcode = ISD::SETTRUE;
+ FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
+ if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
+ predicate = FC->getPredicate();
+ else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
+ predicate = FCmpInst::Predicate(FC->getPredicate());
+ SDOperand Op1 = getValue(I.getOperand(0));
+ SDOperand Op2 = getValue(I.getOperand(1));
+ ISD::CondCode Condition, FOC, FPC;
+ switch (predicate) {
+ case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
+ case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
+ case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
+ case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
+ case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
+ case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
+ case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
+ case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break;
+ case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break;
+ case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
+ case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
+ case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
+ case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
+ case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
+ case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
+ case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
default:
assert(!"Invalid FCmp predicate value");
- Opcode = ISD::SETFALSE;
+ FOC = FPC = ISD::SETFALSE;
break;
}
- setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
-}
-
-void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
- ISD::CondCode UnsignedOpcode,
- ISD::CondCode FPOpcode) {
- SDOperand Op1 = getValue(I.getOperand(0));
- SDOperand Op2 = getValue(I.getOperand(1));
- ISD::CondCode Opcode = SignedOpcode;
- if (!FiniteOnlyFPMath() && I.getOperand(0)->getType()->isFloatingPoint())
- Opcode = FPOpcode;
- else if (I.getOperand(0)->getType()->isUnsigned())
- Opcode = UnsignedOpcode;
- setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
+ if (FiniteOnlyFPMath())
+ Condition = FOC;
+ else
+ Condition = FPC;
+ setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition));
}
void SelectionDAGLowering::visitSelect(User &I) {
// If this is a constant subscript, handle it quickly.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
- uint64_t Offs;
- if (CI->getType()->isSigned())
- Offs = (int64_t)
+ uint64_t Offs =
TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
- else
- Offs =
- TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getZExtValue();
N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
continue;
}
// If the index is smaller or larger than intptr_t, truncate or extend
// it.
if (IdxN.getValueType() < N.getValueType()) {
- if (Idx->getType()->isSigned())
- IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
- else
- IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
+ IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
} else if (IdxN.getValueType() > N.getValueType())
IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
const Type *Ty = I.getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
- unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
- I.getAlignment());
+ unsigned Align =
+ std::max((unsigned)TLI.getTargetData()->getTypeAlignmentPref(Ty),
+ I.getAlignment());
SDOperand AllocSize = getValue(I.getArraySize());
MVT::ValueType IntPtr = TLI.getPointerTy();
L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr,
DAG.getSrcValue(SV));
} else {
- L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, isVolatile);
+ L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, 0, isVolatile);
}
if (isVolatile)
Value *SrcV = I.getOperand(0);
SDOperand Src = getValue(SrcV);
SDOperand Ptr = getValue(I.getOperand(1));
- DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1),
+ DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1), 0,
I.isVolatile()));
}
case Intrinsic::vastart: visitVAStart(I); return 0;
case Intrinsic::vaend: visitVAEnd(I); return 0;
case Intrinsic::vacopy: visitVACopy(I); return 0;
- case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
- case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
+ case Intrinsic::returnaddress:
+ setValue(&I, DAG.getNode(ISD::RETURNADDR, TLI.getPointerTy(),
+ getValue(I.getOperand(1))));
+ return 0;
+ case Intrinsic::frameaddress:
+ setValue(&I, DAG.getNode(ISD::FRAMEADDR, TLI.getPointerTy(),
+ getValue(I.getOperand(1))));
+ return 0;
case Intrinsic::setjmp:
- return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
+ return "_setjmp"+!TLI.usesUnderscoreSetJmp();
break;
case Intrinsic::longjmp:
- return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
+ return "_longjmp"+!TLI.usesUnderscoreLongJmp();
break;
case Intrinsic::memcpy_i32:
case Intrinsic::memcpy_i64:
return 0;
case Intrinsic::dbg_stoppoint: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
- if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
+ if (MMI && SPI.getContext() && MMI->Verify(SPI.getContext())) {
SDOperand Ops[5];
Ops[0] = getRoot();
Ops[1] = getValue(SPI.getLineValue());
Ops[2] = getValue(SPI.getColumnValue());
- DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
+ DebugInfoDesc *DD = MMI->getDescFor(SPI.getContext());
assert(DD && "Not a debug information descriptor");
CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
return 0;
}
case Intrinsic::dbg_region_start: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
- if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
- unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
- DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, getRoot(),
+ if (MMI && RSI.getContext() && MMI->Verify(RSI.getContext())) {
+ unsigned LabelID = MMI->RecordRegionStart(RSI.getContext());
+ DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(),
DAG.getConstant(LabelID, MVT::i32)));
}
return 0;
}
case Intrinsic::dbg_region_end: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
- if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
- unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
- DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
+ if (MMI && REI.getContext() && MMI->Verify(REI.getContext())) {
+ unsigned LabelID = MMI->RecordRegionEnd(REI.getContext());
+ DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other,
getRoot(), DAG.getConstant(LabelID, MVT::i32)));
}
return 0;
}
case Intrinsic::dbg_func_start: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
- if (DebugInfo && FSI.getSubprogram() &&
- DebugInfo->Verify(FSI.getSubprogram())) {
- unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
- DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
+ if (MMI && FSI.getSubprogram() &&
+ MMI->Verify(FSI.getSubprogram())) {
+ unsigned LabelID = MMI->RecordRegionStart(FSI.getSubprogram());
+ DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other,
getRoot(), DAG.getConstant(LabelID, MVT::i32)));
}
return 0;
}
case Intrinsic::dbg_declare: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
- if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
+ if (MMI && DI.getVariable() && MMI->Verify(DI.getVariable())) {
SDOperand AddressOp = getValue(DI.getAddress());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp))
- DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
+ MMI->RecordVariable(DI.getVariable(), FI->getIndex());
}
return 0;
}
- case Intrinsic::isunordered_f32:
- case Intrinsic::isunordered_f64:
- setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
- getValue(I.getOperand(2)), ISD::SETUO));
- return 0;
-
case Intrinsic::sqrt_f32:
case Intrinsic::sqrt_f64:
setValue(&I, DAG.getNode(ISD::FSQRT,
return;
}
+ const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
+ const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+
SDOperand Callee;
if (!RenameFn)
Callee = getValue(I.getOperand(0));
else
Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
- std::vector<std::pair<SDOperand, const Type*> > Args;
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
Args.reserve(I.getNumOperands());
for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
Value *Arg = I.getOperand(i);
SDOperand ArgNode = getValue(Arg);
- Args.push_back(std::make_pair(ArgNode, Arg->getType()));
+ Entry.Node = ArgNode; Entry.Ty = Arg->getType();
+ Entry.isSigned = FTy->paramHasAttr(i, FunctionType::SExtAttribute);
+ Entry.isInReg = FTy->paramHasAttr(i, FunctionType::InRegAttribute);
+ Entry.isSRet = FTy->paramHasAttr(i, FunctionType::StructRetAttribute);
+ Args.push_back(Entry);
}
- const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
- const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
-
std::pair<SDOperand,SDOperand> Result =
- TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
- I.isTailCall(), Callee, Args, DAG);
+ TLI.LowerCallTo(getRoot(), I.getType(),
+ FTy->paramHasAttr(0,FunctionType::SExtAttribute),
+ FTy->isVarArg(), I.getCallingConv(), I.isTailCall(),
+ Callee, Args, DAG);
if (I.getType() != Type::VoidTy)
setValue(&I, Result.first);
DAG.setRoot(Result.second);
return RegsForValue();
}
+/// getConstraintGenerality - Return an integer indicating how general CT is.
+static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
+ switch (CT) {
+ default: assert(0 && "Unknown constraint type!");
+ case TargetLowering::C_Other:
+ case TargetLowering::C_Unknown:
+ return 0;
+ case TargetLowering::C_Register:
+ return 1;
+ case TargetLowering::C_RegisterClass:
+ return 2;
+ case TargetLowering::C_Memory:
+ return 3;
+ }
+}
+
+static std::string GetMostGeneralConstraint(std::vector<std::string> &C,
+ const TargetLowering &TLI) {
+ assert(!C.empty() && "Must have at least one constraint");
+ if (C.size() == 1) return C[0];
+
+ std::string *Current = &C[0];
+ // If we have multiple constraints, try to pick the most general one ahead
+ // of time. This isn't a wonderful solution, but handles common cases.
+ TargetLowering::ConstraintType Flavor = TLI.getConstraintType(Current[0][0]);
+ for (unsigned j = 1, e = C.size(); j != e; ++j) {
+ TargetLowering::ConstraintType ThisFlavor = TLI.getConstraintType(C[j][0]);
+ if (getConstraintGenerality(ThisFlavor) >
+ getConstraintGenerality(Flavor)) {
+ // This constraint letter is more general than the previous one,
+ // use it.
+ Flavor = ThisFlavor;
+ Current = &C[j];
+ }
+ }
+ return *Current;
+}
+
/// visitInlineAsm - Handle a call to an InlineAsm object.
///
std::set<unsigned> OutputRegs, InputRegs;
unsigned OpNum = 1;
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
- assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
- std::string &ConstraintCode = Constraints[i].Codes[0];
+ std::string ConstraintCode =
+ GetMostGeneralConstraint(Constraints[i].Codes, TLI);
MVT::ValueType OpVT;
OpNum = 1;
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
- assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
- std::string &ConstraintCode = Constraints[i].Codes[0];
+ std::string ConstraintCode =
+ GetMostGeneralConstraint(Constraints[i].Codes, TLI);
switch (Constraints[i].Type) {
case InlineAsm::isOutput: {
true, UsesInputRegister,
OutputRegs, InputRegs);
if (Regs.Regs.empty()) {
- std::cerr << "Couldn't allocate output reg for contraint '"
- << ConstraintCode << "'!\n";
+ cerr << "Couldn't allocate output reg for contraint '"
+ << ConstraintCode << "'!\n";
exit(1);
}
InOperandVal = TLI.isOperandValidForConstraint(InOperandVal,
ConstraintCode[0], DAG);
if (!InOperandVal.Val) {
- std::cerr << "Invalid operand for inline asm constraint '"
- << ConstraintCode << "'!\n";
+ cerr << "Invalid operand for inline asm constraint '"
+ << ConstraintCode << "'!\n";
exit(1);
}
Src = DAG.getNode(ISD::MUL, Src.getValueType(),
Src, getIntPtrConstant(ElementSize));
- std::vector<std::pair<SDOperand, const Type*> > Args;
- Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Node = Src;
+ Entry.Ty = TLI.getTargetData()->getIntPtrType();
+ Entry.isSigned = false;
+ Entry.isInReg = false;
+ Entry.isSRet = false;
+ Args.push_back(Entry);
std::pair<SDOperand,SDOperand> Result =
- TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
+ TLI.LowerCallTo(getRoot(), I.getType(), false, false, CallingConv::C, true,
DAG.getExternalSymbol("malloc", IntPtr),
Args, DAG);
setValue(&I, Result.first); // Pointers always fit in registers
}
void SelectionDAGLowering::visitFree(FreeInst &I) {
- std::vector<std::pair<SDOperand, const Type*> > Args;
- Args.push_back(std::make_pair(getValue(I.getOperand(0)),
- TLI.getTargetData()->getIntPtrType()));
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Node = getValue(I.getOperand(0));
+ Entry.Ty = TLI.getTargetData()->getIntPtrType();
+ Entry.isSigned = false;
+ Entry.isInReg = false;
+ Entry.isSRet = false;
+ Args.push_back(Entry);
MVT::ValueType IntPtr = TLI.getPointerTy();
std::pair<SDOperand,SDOperand> Result =
- TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
+ TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, CallingConv::C, true,
DAG.getExternalSymbol("free", IntPtr), Args, DAG);
DAG.setRoot(Result.second);
}
// basic blocks, and the scheduler passes ownership of it to this method.
MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
MachineBasicBlock *MBB) {
- std::cerr << "If a target marks an instruction with "
- "'usesCustomDAGSchedInserter', it must implement "
- "TargetLowering::InsertAtEndOfBasicBlock!\n";
+ cerr << "If a target marks an instruction with "
+ << "'usesCustomDAGSchedInserter', it must implement "
+ << "TargetLowering::InsertAtEndOfBasicBlock!\n";
abort();
return 0;
}
DAG.getSrcValue(I.getOperand(2))));
}
+/// ExpandScalarFormalArgs - Recursively expand the formal_argument node, either
+/// bit_convert it or join a pair of them with a BUILD_PAIR when appropriate.
+static SDOperand ExpandScalarFormalArgs(MVT::ValueType VT, SDNode *Arg,
+ unsigned &i, SelectionDAG &DAG,
+ TargetLowering &TLI) {
+ if (TLI.getTypeAction(VT) != TargetLowering::Expand)
+ return SDOperand(Arg, i++);
+
+ MVT::ValueType EVT = TLI.getTypeToTransformTo(VT);
+ unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT);
+ if (NumVals == 1) {
+ return DAG.getNode(ISD::BIT_CONVERT, VT,
+ ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI));
+ } else if (NumVals == 2) {
+ SDOperand Lo = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI);
+ SDOperand Hi = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI);
+ if (!TLI.isLittleEndian())
+ std::swap(Lo, Hi);
+ return DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi);
+ } else {
+ // Value scalarized into many values. Unimp for now.
+ assert(0 && "Cannot expand i64 -> i16 yet!");
+ }
+ return SDOperand();
+}
+
/// TargetLowering::LowerArguments - This is the default LowerArguments
/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
/// integrated into SDISel.
std::vector<SDOperand>
TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
+ const FunctionType *FTy = F.getFunctionType();
// Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
std::vector<SDOperand> Ops;
Ops.push_back(DAG.getRoot());
// Add one result value for each formal argument.
std::vector<MVT::ValueType> RetVals;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
+ unsigned j = 1;
+ for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
+ I != E; ++I, ++j) {
MVT::ValueType VT = getValueType(I->getType());
+ bool isInReg = FTy->paramHasAttr(j, FunctionType::InRegAttribute);
+ bool isSRet = FTy->paramHasAttr(j, FunctionType::StructRetAttribute);
+ unsigned Flags = (isInReg << 1) | (isSRet << 2);
switch (getTypeAction(VT)) {
default: assert(0 && "Unknown type action!");
case Legal:
RetVals.push_back(VT);
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Promote:
RetVals.push_back(getTypeToTransformTo(VT));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Expand:
if (VT != MVT::Vector) {
// If this is a large integer, it needs to be broken up into small
// integers. Figure out what the destination type is and how many small
// integers it turns into.
- MVT::ValueType NVT = getTypeToTransformTo(VT);
- unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
- for (unsigned i = 0; i != NumVals; ++i)
+ MVT::ValueType NVT = getTypeToExpandTo(VT);
+ unsigned NumVals = getNumElements(VT);
+ for (unsigned i = 0; i != NumVals; ++i) {
RetVals.push_back(NVT);
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
+ }
} else {
// Otherwise, this is a vector type. We only support legal vectors
// right now.
MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
if (TVT != MVT::Other && isTypeLegal(TVT)) {
RetVals.push_back(TVT);
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
} else {
assert(0 && "Don't support illegal by-val vector arguments yet!");
}
// Set up the return result vector.
Ops.clear();
unsigned i = 0;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
+ unsigned Idx = 1;
+ for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
+ ++I, ++Idx) {
MVT::ValueType VT = getValueType(I->getType());
switch (getTypeAction(VT)) {
case Promote: {
SDOperand Op(Result, i++);
if (MVT::isInteger(VT)) {
- unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
- : ISD::AssertZext;
- Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT));
+ if (FTy->paramHasAttr(Idx, FunctionType::SExtAttribute))
+ Op = DAG.getNode(ISD::AssertSext, Op.getValueType(), Op,
+ DAG.getValueType(VT));
+ else if (FTy->paramHasAttr(Idx, FunctionType::ZExtAttribute))
+ Op = DAG.getNode(ISD::AssertZext, Op.getValueType(), Op,
+ DAG.getValueType(VT));
Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
} else {
assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
}
case Expand:
if (VT != MVT::Vector) {
- // If this is a large integer, it needs to be reassembled from small
- // integers. Figure out what the source elt type is and how many small
- // integers it is.
- MVT::ValueType NVT = getTypeToTransformTo(VT);
- unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
- if (NumVals == 2) {
- SDOperand Lo = SDOperand(Result, i++);
- SDOperand Hi = SDOperand(Result, i++);
-
- if (!isLittleEndian())
- std::swap(Lo, Hi);
-
- Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi));
- } else {
- // Value scalarized into many values. Unimp for now.
- assert(0 && "Cannot expand i64 -> i16 yet!");
- }
+ // If this is a large integer or a floating point node that needs to be
+ // expanded, it needs to be reassembled from small integers. Figure out
+ // what the source elt type is and how many small integers it is.
+ Ops.push_back(ExpandScalarFormalArgs(VT, Result, i, DAG, *this));
} else {
// Otherwise, this is a vector type. We only support legal vectors
// right now.
}
+/// ExpandScalarCallArgs - Recursively expand call argument node by
+/// bit_converting it or extract a pair of elements from the larger node.
+static void ExpandScalarCallArgs(MVT::ValueType VT, SDOperand Arg,
+ unsigned Flags,
+ SmallVector<SDOperand, 32> &Ops,
+ SelectionDAG &DAG,
+ TargetLowering &TLI) {
+ if (TLI.getTypeAction(VT) != TargetLowering::Expand) {
+ Ops.push_back(Arg);
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
+ return;
+ }
+
+ MVT::ValueType EVT = TLI.getTypeToTransformTo(VT);
+ unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT);
+ if (NumVals == 1) {
+ Arg = DAG.getNode(ISD::BIT_CONVERT, EVT, Arg);
+ ExpandScalarCallArgs(EVT, Arg, Flags, Ops, DAG, TLI);
+ } else if (NumVals == 2) {
+ SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg,
+ DAG.getConstant(0, TLI.getPointerTy()));
+ SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg,
+ DAG.getConstant(1, TLI.getPointerTy()));
+ if (!TLI.isLittleEndian())
+ std::swap(Lo, Hi);
+ ExpandScalarCallArgs(EVT, Lo, Flags, Ops, DAG, TLI);
+ ExpandScalarCallArgs(EVT, Hi, Flags, Ops, DAG, TLI);
+ } else {
+ // Value scalarized into many values. Unimp for now.
+ assert(0 && "Cannot expand i64 -> i16 yet!");
+ }
+}
+
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
/// implementation, which just inserts an ISD::CALL node, which is later custom
/// lowered by the target to something concrete. FIXME: When all targets are
/// migrated to using ISD::CALL, this hook should be integrated into SDISel.
std::pair<SDOperand, SDOperand>
-TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
+TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
+ bool RetTyIsSigned, bool isVarArg,
unsigned CallingConv, bool isTailCall,
SDOperand Callee,
ArgListTy &Args, SelectionDAG &DAG) {
// Handle all of the outgoing arguments.
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
- MVT::ValueType VT = getValueType(Args[i].second);
- SDOperand Op = Args[i].first;
- bool isSigned = Args[i].second->isSigned();
+ MVT::ValueType VT = getValueType(Args[i].Ty);
+ SDOperand Op = Args[i].Node;
+ bool isSigned = Args[i].isSigned;
+ bool isInReg = Args[i].isInReg;
+ bool isSRet = Args[i].isSRet;
+ unsigned Flags = (isSRet << 2) | (isInReg << 1) | isSigned;
switch (getTypeAction(VT)) {
default: assert(0 && "Unknown type action!");
case Legal:
Ops.push_back(Op);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Promote:
if (MVT::isInteger(VT)) {
Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op);
}
Ops.push_back(Op);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Expand:
if (VT != MVT::Vector) {
// If this is a large integer, it needs to be broken down into small
// integers. Figure out what the source elt type is and how many small
// integers it is.
- MVT::ValueType NVT = getTypeToTransformTo(VT);
- unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
- if (NumVals == 2) {
- SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
- DAG.getConstant(0, getPointerTy()));
- SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
- DAG.getConstant(1, getPointerTy()));
- if (!isLittleEndian())
- std::swap(Lo, Hi);
-
- Ops.push_back(Lo);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
- Ops.push_back(Hi);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
- } else {
- // Value scalarized into many values. Unimp for now.
- assert(0 && "Cannot expand i64 -> i16 yet!");
- }
+ ExpandScalarCallArgs(VT, Op, Flags, Ops, DAG, *this);
} else {
// Otherwise, this is a vector type. We only support legal vectors
// right now.
- const PackedType *PTy = cast<PackedType>(Args[i].second);
+ const PackedType *PTy = cast<PackedType>(Args[i].Ty);
unsigned NumElems = PTy->getNumElements();
const Type *EltTy = PTy->getElementType();
// Insert a VBIT_CONVERT of the MVT::Vector type to the packed type.
Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op);
Ops.push_back(Op);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
} else {
assert(0 && "Don't support illegal by-val vector call args yet!");
abort();
// If this is a large integer, it needs to be reassembled from small
// integers. Figure out what the source elt type is and how many small
// integers it is.
- MVT::ValueType NVT = getTypeToTransformTo(VT);
- unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
+ MVT::ValueType NVT = getTypeToExpandTo(VT);
+ unsigned NumVals = getNumElements(VT);
for (unsigned i = 0; i != NumVals; ++i)
RetTys.push_back(NVT);
} else {
abort();
}
} else if (MVT::isInteger(VT)) {
- unsigned AssertOp = RetTy->isSigned() ?
- ISD::AssertSext : ISD::AssertZext;
+ unsigned AssertOp = ISD::AssertSext;
+ if (!RetTyIsSigned)
+ AssertOp = ISD::AssertZext;
ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal,
DAG.getValueType(VT));
ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal);
} else {
assert(MVT::isFloatingPoint(VT));
- ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal);
+ if (getTypeAction(VT) == Expand)
+ ResVal = DAG.getNode(ISD::BIT_CONVERT, VT, ResVal);
+ else
+ ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal);
}
}
} else if (RetTys.size() == 3) {
return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1));
}
-
-
-// It is always conservatively correct for llvm.returnaddress and
-// llvm.frameaddress to return 0.
-//
-// FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be
-// expanded to 0 if the target wants.
-std::pair<SDOperand, SDOperand>
-TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
- unsigned Depth, SelectionDAG &DAG) {
- return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
-}
-
SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
assert(0 && "LowerOperation not implemented for this target!");
abort();
return SDOperand();
}
-void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
- unsigned Depth = (unsigned)cast<ConstantInt>(I.getOperand(1))->getZExtValue();
- std::pair<SDOperand,SDOperand> Result =
- TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
- setValue(&I, Result.first);
- DAG.setRoot(Result.second);
-}
-
/// getMemsetValue - Vectorized representation of the memset value
/// operand.
static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
if (TLI.isLittleEndian())
Offset = Offset + MSB - 1;
for (unsigned i = 0; i != MSB; ++i) {
- Val = (Val << 8) | Str[Offset];
+ Val = (Val << 8) | (unsigned char)Str[Offset];
Offset += TLI.isLittleEndian() ? -1 : 1;
}
return DAG.getConstant(Val, VT);
}
if (G) {
GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
- if (GV) {
+ if (GV && GV->isConstant()) {
Str = GV->getStringValue(false);
if (!Str.empty()) {
CopyFromStr = true;
while (isa<PHINode>(InsertPt)) ++InsertPt;
InsertedCast =
- CastInst::createInferredCast(CI->getOperand(0), CI->getType(), "",
- InsertPt);
+ CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
+ InsertPt);
MadeChange = true;
}
// operand).
if (CastInst *CI = dyn_cast<CastInst>(Ptr))
if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
- Ptr = CastInst::createInferredCast(CI->getOperand(0), CI->getType(), "",
- InsertPt);
+ Ptr = CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(),
+ "", InsertPt);
// Add the offset, cast it to the right type.
Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
// Handle constant subscripts.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
- if (CI->getType()->isSigned())
- ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue();
- else
- ConstantOffset += TD->getTypeSize(Ty)*CI->getZExtValue();
+ ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue();
continue;
}
// Ptr = Ptr + Idx * ElementSize;
// Cast Idx to UIntPtrTy if needed.
- Idx = CastInst::createInferredCast(Idx, UIntPtrTy, "", GEPI);
+ Idx = CastInst::createIntegerCast(Idx, UIntPtrTy, true/*SExt*/, "", GEPI);
uint64_t ElementSize = TD->getTypeSize(Ty);
// Mask off bits that should not be set.
bool SelectionDAGISel::runOnFunction(Function &Fn) {
MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
RegMap = MF.getSSARegMap();
- DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
+ DOUT << "\n\n\n=== " << Fn.getName() << "\n";
// First, split all critical edges.
//
}
return DAG.getNode(ISD::TokenFactor, MVT::Other,
&OutChains[0], OutChains.size());
- } else if (SrcVT < DestVT) {
+ } else if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) {
// The src value is promoted to the register.
if (MVT::isFloatingPoint(SrcVT))
Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
return DAG.getCopyToReg(getRoot(), Reg, Op);
} else {
+ DestVT = TLI.getTypeToExpandTo(SrcVT);
+ unsigned NumVals = TLI.getNumElements(SrcVT);
+ if (NumVals == 1)
+ return DAG.getCopyToReg(getRoot(), Reg,
+ DAG.getNode(ISD::BIT_CONVERT, DestVT, Op));
+ assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!");
// The src value is expanded into multiple registers.
SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
Op, DAG.getConstant(0, TLI.getPointerTy()));
// Run the DAG combiner in pre-legalize mode.
DAG.Combine(false, AA);
- DEBUG(std::cerr << "Lowered selection DAG:\n");
+ DOUT << "Lowered selection DAG:\n";
DEBUG(DAG.dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
DAG.Legalize();
- DEBUG(std::cerr << "Legalized selection DAG:\n");
+ DOUT << "Legalized selection DAG:\n";
DEBUG(DAG.dump());
// Run the DAG combiner in post-legalize mode.
// code to the MachineBasicBlock.
InstructionSelectBasicBlock(DAG);
- DEBUG(std::cerr << "Selected machine code:\n");
+ DOUT << "Selected machine code:\n";
DEBUG(BB->dump());
}
FunctionLoweringInfo &FuncInfo) {
std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
{
- SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
CurDAG = &DAG;
// First step, lower LLVM code to some DAG. This DAG may use operations and
// whether the PHI is a successor of the range check MBB or the jump table MBB
if (JT.Reg) {
assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
- SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
CurDAG = &SDAG;
SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
MachineBasicBlock *RangeBB = BB;
// If we generated any switch lowering information, build and codegen any
// additional DAGs necessary.
for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
- SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
CurDAG = &SDAG;
SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
// Otherwise, this is a memory operand. Ask the target to select it.
std::vector<SDOperand> SelOps;
if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
- std::cerr << "Could not match memory address. Inline asm failure!\n";
+ cerr << "Could not match memory address. Inline asm failure!\n";
exit(1);
}
// Add this to the output node.
- Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(4/*MEM*/ | (SelOps.size() << 3),
+ MVT::i32));
Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
i += 2;
}