#include "ARMRegisterInfo.h"
#include "ARMTargetMachine.h"
#include "ARMSubtarget.h"
+#include "ARMConstantPoolValue.h"
#include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
static cl::opt<bool>
EnableARMFastISel("arm-fast-isel",
- cl::desc("Turn on experimental ARM fast-isel support"),
- cl::init(false), cl::Hidden);
+ cl::desc("Turn on experimental ARM fast-isel support"),
+ cl::init(false), cl::Hidden);
namespace {
const TargetMachine &TM;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- const ARMFunctionInfo *AFI;
+ ARMFunctionInfo *AFI;
- // Convenience variable to avoid checking all the time.
+ // Convenience variables to avoid some queries.
bool isThumb;
+ LLVMContext *Context;
public:
explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
Subtarget = &TM.getSubtarget<ARMSubtarget>();
AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
isThumb = AFI->isThumbFunction();
+ Context = &funcInfo.Fn->getContext();
}
// Code from FastISel.cpp.
// Backend specific FastISel code.
virtual bool TargetSelectInstruction(const Instruction *I);
virtual unsigned TargetMaterializeConstant(const Constant *C);
+ virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
#include "ARMGenFastISel.inc"
virtual bool SelectSIToFP(const Instruction *I);
virtual bool SelectFPToSI(const Instruction *I);
virtual bool SelectSDiv(const Instruction *I);
+ virtual bool SelectSRem(const Instruction *I);
+ virtual bool SelectCall(const Instruction *I);
+ virtual bool SelectSelect(const Instruction *I);
// Utility routines.
private:
bool isLoadTypeLegal(const Type *Ty, EVT &VT);
bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset);
bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset);
- bool ARMLoadAlloca(const Instruction *I, EVT VT);
- bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT);
bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
- unsigned ARMMaterializeInt(const Constant *C);
+ unsigned ARMMaterializeInt(const Constant *C, EVT VT);
+ unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
// Call handling routines.
private:
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return);
- bool ARMEmitLibcall(const Instruction *I, Function *F);
+ bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
+ SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<EVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs,
+ CallingConv::ID CC,
+ unsigned &NumBytes);
+ bool FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC,
+ unsigned &NumBytes);
+ bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
// OptionalDef handling routines.
private:
// checks from the various callers.
unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0;
-
+
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::VMOVRS), MoveReg)
unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0;
-
+
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::VMOVSR), MoveReg)
.addFPImm(CFP));
return DestReg;
}
-
+
// Require VFP2 for loading fp constants.
if (!Subtarget->hasVFP2()) return false;
-
+
// MachineConstantPool wants an explicit alignment.
unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
if (Align == 0) {
unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
-
+
// The extra reg is for addrmode5.
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
- .addReg(DestReg).addConstantPoolIndex(Idx)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
+ DestReg)
+ .addConstantPoolIndex(Idx)
.addReg(0));
return DestReg;
}
-// TODO: Verify 64-bit.
-unsigned ARMFastISel::ARMMaterializeInt(const Constant *C) {
+unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
+
+ // For now 32-bit only.
+ if (VT.getSimpleVT().SimpleTy != MVT::i32) return false;
+
// MachineConstantPool wants an explicit alignment.
unsigned Align = TD.getPrefTypeAlignment(C->getType());
if (Align == 0) {
Align = TD.getTypeAllocSize(C->getType());
}
unsigned Idx = MCP.getConstantPoolIndex(C, Align);
- unsigned DestReg = createResultReg(TLI.getRegClassFor(MVT::i32));
-
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+
if (isThumb)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(ARM::t2LDRpci))
- .addReg(DestReg).addConstantPoolIndex(Idx));
+ TII.get(ARM::t2LDRpci), DestReg)
+ .addConstantPoolIndex(Idx));
else
// The extra reg and immediate are for addrmode2.
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(ARM::LDRcp))
- .addReg(DestReg).addConstantPoolIndex(Idx)
+ TII.get(ARM::LDRcp), DestReg)
+ .addConstantPoolIndex(Idx)
.addReg(0).addImm(0));
return DestReg;
}
+unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
+ // For now 32-bit only.
+ if (VT.getSimpleVT().SimpleTy != MVT::i32) return 0;
+
+ Reloc::Model RelocM = TM.getRelocationModel();
+
+ // TODO: No external globals for now.
+ if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0;
+
+ // TODO: Need more magic for ARM PIC.
+ if (!isThumb && (RelocM == Reloc::PIC_)) return 0;
+
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = TD.getPrefTypeAlignment(GV->getType());
+ if (Align == 0) {
+ // TODO: Figure out if this is correct.
+ Align = TD.getTypeAllocSize(GV->getType());
+ }
+
+ // Grab index.
+ unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8);
+ unsigned Id = AFI->createConstPoolEntryUId();
+ ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id,
+ ARMCP::CPValue, PCAdj);
+ unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
+
+ // Load value.
+ MachineInstrBuilder MIB;
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ if (isThumb) {
+ unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addConstantPoolIndex(Idx);
+ if (RelocM == Reloc::PIC_)
+ MIB.addImm(Id);
+ } else {
+ // The extra reg and immediate are for addrmode2.
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
+ DestReg)
+ .addConstantPoolIndex(Idx)
+ .addReg(0).addImm(0);
+ }
+ AddOptionalDefs(MIB);
+ return DestReg;
+}
+
unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
EVT VT = TLI.getValueType(C->getType(), true);
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return ARMMaterializeFP(CFP, VT);
- return ARMMaterializeInt(C);
+ else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return ARMMaterializeGV(GV, VT);
+ else if (isa<ConstantInt>(C))
+ return ARMMaterializeInt(C, VT);
+
+ return 0;
+}
+
+unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
+ // Don't handle dynamic allocas.
+ if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
+
+ EVT VT;
+ if (!isTypeLegal(AI->getType(), VT)) return false;
+
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ // This will get lowered later into the correct offsets and registers
+ // via rewriteXFrameIndex.
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ TargetRegisterClass* RC = TLI.getRegClassFor(VT);
+ unsigned ResultReg = createResultReg(RC);
+ unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg)
+ .addFrameIndex(SI->second)
+ .addImm(0));
+ return ResultReg;
+ }
+
+ return 0;
}
bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
switch (Opcode) {
default:
break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return ARMComputeRegOffset(U->getOperand(0), Reg, Offset);
+ }
+ case Instruction::IntToPtr: {
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
+ return ARMComputeRegOffset(U->getOperand(0), Reg, Offset);
+ break;
+ }
+ case Instruction::PtrToInt: {
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
+ return ARMComputeRegOffset(U->getOperand(0), Reg, Offset);
+ break;
+ }
case Instruction::Alloca: {
- assert(false && "Alloca should have been handled earlier!");
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Reg = ARM::SP;
+ Offset = SI->second;
+ return true;
+ }
+ // Don't handle dynamic allocas.
+ assert(!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Obj)) &&
+ "Alloca should have been handled earlier!");
return false;
}
}
return true;
}
-bool ARMFastISel::ARMLoadAlloca(const Instruction *I, EVT VT) {
- Value *Op0 = I->getOperand(0);
-
- // Verify it's an alloca.
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op0)) {
- DenseMap<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
-
- if (SI != FuncInfo.StaticAllocaMap.end()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(VT);
- unsigned ResultReg = createResultReg(RC);
- TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
- ResultReg, SI->second, RC,
- TM.getRegisterInfo());
- UpdateValueMap(I, ResultReg);
- return true;
- }
- }
- return false;
-}
-
bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
unsigned Reg, int Offset) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
+ TargetRegisterClass *RC;
bool isFloat = false;
switch (VT.getSimpleVT().SimpleTy) {
default:
- assert(false && "Trying to emit for an unhandled type!");
+ // This is mostly going to be Neon/vector support.
return false;
case MVT::i16:
- Opc = isThumb ? ARM::tLDRH : ARM::LDRH;
+ Opc = isThumb ? ARM::t2LDRHi8 : ARM::LDRH;
+ RC = ARM::GPRRegisterClass;
VT = MVT::i32;
break;
case MVT::i8:
- Opc = isThumb ? ARM::tLDRB : ARM::LDRB;
+ Opc = isThumb ? ARM::t2LDRBi8 : ARM::LDRB;
+ RC = ARM::GPRRegisterClass;
VT = MVT::i32;
break;
case MVT::i32:
- Opc = isThumb ? ARM::tLDR : ARM::LDR;
+ Opc = isThumb ? ARM::t2LDRi8 : ARM::LDR;
+ RC = ARM::GPRRegisterClass;
break;
case MVT::f32:
Opc = ARM::VLDRS;
+ RC = TLI.getRegClassFor(VT);
isFloat = true;
break;
case MVT::f64:
Opc = ARM::VLDRD;
+ RC = TLI.getRegClassFor(VT);
isFloat = true;
break;
}
- ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ ResultReg = createResultReg(RC);
- // TODO: Fix the Addressing modes so that these can share some code.
- // Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
- // The thumb addressing mode has operands swapped from the arm addressing
- // mode, the floating point one only has two operands.
- if (isFloat)
+ // For now with the additions above the offset should be zero - thus we
+ // can always fit into an i8.
+ assert((Reg == ARM::SP || Offset == 0) &&
+ "Offset not zero and not a stack load!");
+
+ if (Reg == ARM::SP)
+ TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
+ ResultReg, Offset, RC,
+ TM.getRegisterInfo());
+ // The thumb and floating point instructions both take 2 operands, ARM takes
+ // another register.
+ else if (isFloat || isThumb)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addReg(Reg).addImm(Offset));
- else if (isThumb)
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg)
- .addReg(Reg).addImm(Offset).addReg(0));
else
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
if (!isLoadTypeLegal(I->getType(), VT))
return false;
- // If we're an alloca we know we have a frame index and can emit the load
- // directly in short order.
- if (ARMLoadAlloca(I, VT))
- return true;
-
// Our register and offset with innocuous defaults.
unsigned Reg = 0;
int Offset = 0;
return true;
}
-bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg, EVT VT){
- Value *Op1 = I->getOperand(1);
-
- // Verify it's an alloca.
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
- DenseMap<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
-
- if (SI != FuncInfo.StaticAllocaMap.end()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(VT);
- assert(SrcReg != 0 && "Nothing to store!");
- TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
- SrcReg, true /*isKill*/, SI->second, RC,
- TM.getRegisterInfo());
- return true;
- }
- }
- return false;
-}
-
bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
unsigned DstReg, int Offset) {
unsigned StrOpc;
bool isFloat = false;
+ // VT is set here only for use in the alloca stores below - those are promoted
+ // to reg size always.
switch (VT.getSimpleVT().SimpleTy) {
default: return false;
case MVT::i1:
- case MVT::i8: StrOpc = isThumb ? ARM::tSTRB : ARM::STRB; break;
- case MVT::i16: StrOpc = isThumb ? ARM::tSTRH : ARM::STRH; break;
- case MVT::i32: StrOpc = isThumb ? ARM::tSTR : ARM::STR; break;
+ case MVT::i8:
+ VT = MVT::i32;
+ StrOpc = isThumb ? ARM::t2STRBi8 : ARM::STRB;
+ break;
+ case MVT::i16:
+ VT = MVT::i32;
+ StrOpc = isThumb ? ARM::t2STRHi8 : ARM::STRH;
+ break;
+ case MVT::i32: StrOpc = isThumb ? ARM::t2STRi8 : ARM::STR; break;
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
StrOpc = ARM::VSTRS;
break;
}
+ if (SrcReg == ARM::SP)
+ TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
+ SrcReg, true /*isKill*/, Offset,
+ TLI.getRegClassFor(VT), TM.getRegisterInfo());
// The thumb addressing mode has operands swapped from the arm addressing
// mode, the floating point one only has two operands.
- if (isFloat)
+ if (isFloat || isThumb)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(StrOpc), SrcReg)
- .addReg(DstReg).addImm(Offset));
- else if (isThumb)
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(StrOpc), SrcReg)
- .addReg(DstReg).addImm(Offset).addReg(0));
-
+ TII.get(StrOpc))
+ .addReg(SrcReg).addReg(DstReg).addImm(Offset));
else
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(StrOpc), SrcReg)
- .addReg(DstReg).addReg(0).addImm(Offset));
+ TII.get(StrOpc))
+ .addReg(SrcReg).addReg(DstReg).addReg(0).addImm(Offset));
return true;
}
if (SrcReg == 0)
return false;
- // If we're an alloca we know we have a frame index and can emit the store
- // quickly.
- if (ARMStoreAlloca(I, SrcReg, VT))
- return true;
-
// Our register and offset with innocuous defaults.
unsigned Reg = 0;
int Offset = 0;
switch (Pred) {
// Needs two compares...
case CmpInst::FCMP_ONE:
- case CmpInst::FCMP_UEQ:
+ case CmpInst::FCMP_UEQ:
default:
assert(false && "Unhandled CmpInst::Predicate!");
return ARMCC::AL;
return ARMCC::PL;
case CmpInst::ICMP_SLT:
case CmpInst::FCMP_ULT:
- return ARMCC::LT;
+ return ARMCC::LT;
case CmpInst::ICMP_SLE:
case CmpInst::FCMP_ULE:
return ARMCC::LE;
MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
// Simple branch support.
- // TODO: Hopefully we've already handled the condition since we won't
- // have left an update in the value map. See the TODO below in SelectCMP.
- Value *Cond = BI->getCondition();
- unsigned CondReg = getRegForValue(Cond);
+ // TODO: Try to avoid the re-computation in some places.
+ unsigned CondReg = getRegForValue(BI->getCondition());
if (CondReg == 0) return false;
- ARMCC::CondCodes ARMPred = ARMCC::NE;
- CmpInst *CI = dyn_cast<CmpInst>(Cond);
- if (!CI) return false;
-
- // Get the compare predicate.
- ARMPred = getComparePred(CI->getPredicate());
-
- // We may not handle every CC for now.
- if (ARMPred == ARMCC::AL) return false;
+ // Re-set the flags just in case.
+ unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
+ .addReg(CondReg).addImm(1));
unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
- .addMBB(TBB).addImm(ARMPred).addReg(CondReg);
+ .addMBB(TBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
FastEmitBranch(FBB, DL);
FuncInfo.MBB->addSuccessor(TBB);
- return true;
+ return true;
}
bool ARMFastISel::SelectCmp(const Instruction *I) {
return false;
unsigned CmpOpc;
- unsigned DestReg;
+ unsigned CondReg;
switch (VT.getSimpleVT().SimpleTy) {
default: return false;
// TODO: Verify compares.
case MVT::f32:
CmpOpc = ARM::VCMPES;
- DestReg = ARM::FPSCR;
+ CondReg = ARM::FPSCR;
break;
case MVT::f64:
CmpOpc = ARM::VCMPED;
- DestReg = ARM::FPSCR;
+ CondReg = ARM::FPSCR;
break;
case MVT::i32:
CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
- DestReg = ARM::CPSR;
+ CondReg = ARM::CPSR;
break;
}
+ // Get the compare predicate.
+ ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
+
+ // We may not handle every CC for now.
+ if (ARMPred == ARMCC::AL) return false;
+
unsigned Arg1 = getRegForValue(CI->getOperand(0));
if (Arg1 == 0) return false;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::FMSTAT)));
- // Update the value to the implicit def reg.
+ // Now set a register based on the comparison. Explicitly set the predicates
+ // here.
+ unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi;
+ TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass
+ : ARM::GPRRegisterClass;
+ unsigned DestReg = createResultReg(RC);
+ Constant *Zero
+ = ConstantInt::get(Type::getInt32Ty(*Context), 0);
+ unsigned ZeroReg = TargetMaterializeConstant(Zero);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
+ .addReg(ZeroReg).addImm(1)
+ .addImm(ARMPred).addReg(CondReg);
+
UpdateValueMap(I, DestReg);
return true;
}
if (!Subtarget->hasVFP2()) return false;
Value *V = I->getOperand(0);
- if (!I->getType()->isFloatTy() ||
- !V->getType()->isDoubleTy()) return false;
+ if (!(I->getType()->isFloatTy() &&
+ V->getType()->isDoubleTy())) return false;
unsigned Op = getRegForValue(V);
if (Op == 0) return false;
bool ARMFastISel::SelectSIToFP(const Instruction *I) {
// Make sure we have VFP.
if (!Subtarget->hasVFP2()) return false;
-
+
EVT DstVT;
const Type *Ty = I->getType();
if (!isTypeLegal(Ty, DstVT))
return false;
-
+
unsigned Op = getRegForValue(I->getOperand(0));
if (Op == 0) return false;
-
+
// The conversion routine works on fp-reg to fp-reg and the operand above
// was an integer, move it to the fp registers if possible.
- unsigned FP = ARMMoveToFPReg(DstVT, Op);
+ unsigned FP = ARMMoveToFPReg(MVT::f32, Op);
if (FP == 0) return false;
-
+
unsigned Opc;
if (Ty->isFloatTy()) Opc = ARM::VSITOS;
else if (Ty->isDoubleTy()) Opc = ARM::VSITOD;
else return 0;
-
+
unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
ResultReg)
bool ARMFastISel::SelectFPToSI(const Instruction *I) {
// Make sure we have VFP.
if (!Subtarget->hasVFP2()) return false;
-
+
EVT DstVT;
const Type *RetTy = I->getType();
if (!isTypeLegal(RetTy, DstVT))
return false;
-
+
unsigned Op = getRegForValue(I->getOperand(0));
if (Op == 0) return false;
-
+
unsigned Opc;
const Type *OpTy = I->getOperand(0)->getType();
if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS;
else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD;
else return 0;
- EVT OpVT = TLI.getValueType(OpTy, true);
-
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(OpVT));
+
+ // f64->s32 or f32->s32 both need an intermediate f32 reg.
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
ResultReg)
.addReg(Op));
-
+
// This result needs to be in an integer register, but the conversion only
// takes place in fp-regs.
unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
if (IntReg == 0) return false;
-
+
UpdateValueMap(I, IntReg);
return true;
}
+bool ARMFastISel::SelectSelect(const Instruction *I) {
+ EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
+ if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+ return false;
+
+ // Things need to be register sized for register moves.
+ if (VT.getSimpleVT().SimpleTy != MVT::i32) return false;
+ const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
+
+ unsigned CondReg = getRegForValue(I->getOperand(0));
+ if (CondReg == 0) return false;
+ unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ if (Op1Reg == 0) return false;
+ unsigned Op2Reg = getRegForValue(I->getOperand(2));
+ if (Op2Reg == 0) return false;
+
+ unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
+ .addReg(CondReg).addImm(1));
+ unsigned ResultReg = createResultReg(RC);
+ unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op1Reg).addReg(Op2Reg)
+ .addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool ARMFastISel::SelectSDiv(const Instruction *I) {
+ EVT VT;
+ const Type *Ty = I->getType();
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ // If we have integer div support we should have selected this automagically.
+ // In case we have a real miss go ahead and return false and we'll pick
+ // it up later.
+ if (Subtarget->hasDivide()) return false;
+
+ // Otherwise emit a libcall.
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ if (VT == MVT::i8)
+ LC = RTLIB::SDIV_I8;
+ else if (VT == MVT::i16)
+ LC = RTLIB::SDIV_I16;
+ else if (VT == MVT::i32)
+ LC = RTLIB::SDIV_I32;
+ else if (VT == MVT::i64)
+ LC = RTLIB::SDIV_I64;
+ else if (VT == MVT::i128)
+ LC = RTLIB::SDIV_I128;
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
+
+ return ARMEmitLibcall(I, LC);
+}
+
+bool ARMFastISel::SelectSRem(const Instruction *I) {
+ EVT VT;
+ const Type *Ty = I->getType();
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ if (VT == MVT::i8)
+ LC = RTLIB::SREM_I8;
+ else if (VT == MVT::i16)
+ LC = RTLIB::SREM_I16;
+ else if (VT == MVT::i32)
+ LC = RTLIB::SREM_I32;
+ else if (VT == MVT::i64)
+ LC = RTLIB::SREM_I64;
+ else if (VT == MVT::i128)
+ LC = RTLIB::SREM_I128;
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
+
+ return ARMEmitLibcall(I, LC);
+}
+
bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
EVT VT = TLI.getValueType(I->getType(), true);
}
}
+bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
+ SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<EVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs,
+ CallingConv::ID CC,
+ unsigned &NumBytes) {
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, false, TM, ArgLocs, *Context);
+ CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ NumBytes = CCInfo.getNextStackOffset();
+
+ // Issue CALLSEQ_START
+ unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(AdjStackDown))
+ .addImm(NumBytes));
+
+ // Process the args.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ unsigned Arg = ArgRegs[VA.getValNo()];
+ EVT ArgVT = ArgVTs[VA.getValNo()];
+
+ // Handle arg promotion, etc.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::Full: break;
+ default:
+ // TODO: Handle arg promotion.
+ return false;
+ }
+
+ // Now copy/store arg to correct locations.
+ // TODO: We need custom lowering for f64 args.
+ if (VA.isRegLoc() && !VA.needsCustom()) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ VA.getLocReg())
+ .addReg(Arg);
+ RegArgs.push_back(VA.getLocReg());
+ } else {
+ // Need to store
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC,
+ unsigned &NumBytes) {
+ // Issue CALLSEQ_END
+ unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(AdjStackUp))
+ .addImm(NumBytes).addImm(0));
+
+ // Now the return value.
+ if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, false, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
+
+ // Copy all of the result registers out of their specified physreg.
+ if (RVLocs.size() == 2 && RetVT.getSimpleVT().SimpleTy == MVT::f64) {
+ // For this move we copy into two registers and then move into the
+ // double fp reg we want.
+ // TODO: Are the copies necessary?
+ TargetRegisterClass *CopyRC = TLI.getRegClassFor(MVT::i32);
+ unsigned Copy1 = createResultReg(CopyRC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ Copy1).addReg(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+
+ unsigned Copy2 = createResultReg(CopyRC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ Copy2).addReg(RVLocs[1].getLocReg());
+ UsedRegs.push_back(RVLocs[1].getLocReg());
+
+ EVT DestVT = RVLocs[0].getValVT();
+ TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
+ unsigned ResultReg = createResultReg(DstRC);
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VMOVDRR), ResultReg)
+ .addReg(Copy1).addReg(Copy2));
+
+ // Finally update the result.
+ UpdateValueMap(I, ResultReg);
+ } else {
+ assert(RVLocs.size() == 1 && "Can't handle non-double multi-reg retvals!");
+ EVT CopyVT = RVLocs[0].getValVT();
+ TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
+
+ unsigned ResultReg = createResultReg(DstRC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+
+ // Finally update the result.
+ UpdateValueMap(I, ResultReg);
+ }
+ }
+
+ return true;
+}
+
// A quick function that will emit a call for a named libcall in F with the
// vector of passed arguments for the Instruction in I. We can assume that we
-// can emit a call for any libcall we can produce. This is an abridged version
-// of the full call infrastructure since we won't need to worry about things
+// can emit a call for any libcall we can produce. This is an abridged version
+// of the full call infrastructure since we won't need to worry about things
// like computed function pointers or strange arguments at call sites.
// TODO: Try to unify this and the normal call bits for ARM, then try to unify
// with X86.
-bool ARMFastISel::ARMEmitLibcall(const Instruction *I, Function *F) {
- CallingConv::ID CC = F->getCallingConv();
-
+bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
+ CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
+
// Handle *simple* calls for now.
- const Type *RetTy = F->getReturnType();
+ const Type *RetTy = I->getType();
EVT RetVT;
if (RetTy->isVoidTy())
RetVT = MVT::isVoid;
else if (!isTypeLegal(RetTy, RetVT))
return false;
-
- assert(!F->isVarArg() && "Vararg libcall?!");
- // Abridged from the X86 FastISel call selection mechanism
+ // For now we're using BLX etc on the assumption that we have v5t ops.
+ if (!Subtarget->hasV5TOps()) return false;
+
+ // Set up the argument vectors.
SmallVector<Value*, 8> Args;
SmallVector<unsigned, 8> ArgRegs;
SmallVector<EVT, 8> ArgVTs;
ArgRegs.reserve(I->getNumOperands());
ArgVTs.reserve(I->getNumOperands());
ArgFlags.reserve(I->getNumOperands());
- for (unsigned i = 0; i < Args.size(); ++i) {
+ for (unsigned i = 0; i < I->getNumOperands(); ++i) {
Value *Op = I->getOperand(i);
unsigned Arg = getRegForValue(Op);
if (Arg == 0) return false;
-
+
const Type *ArgTy = Op->getType();
EVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT)) return false;
-
+
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
-
+
Args.push_back(Op);
ArgRegs.push_back(Arg);
ArgVTs.push_back(ArgVT);
ArgFlags.push_back(Flags);
}
-
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, false, TM, ArgLocs, F->getContext());
- CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
-
- // Process the args.
+
+ // Handle the arguments now that we've gotten them.
SmallVector<unsigned, 4> RegArgs;
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
- unsigned Arg = ArgRegs[VA.getValNo()];
- EVT ArgVT = ArgVTs[VA.getValNo()];
-
- // Should we ever have to promote?
- switch (VA.getLocInfo()) {
- case CCValAssign::Full: break;
- default:
- assert(false && "Handle arg promotion for libcalls?");
- return false;
- }
-
- // Now copy/store arg to correct locations.
- if (VA.isRegLoc()) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- VA.getLocReg()).addReg(Arg);
- RegArgs.push_back(VA.getLocReg());
- } else {
- // Need to store
- return false;
- }
- }
-
- // Issue the call, BLr9 for darwin, BL otherwise.
+ unsigned NumBytes;
+ if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
+ return false;
+
+ // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops.
+ // TODO: Turn this into the table of arm call ops.
MachineInstrBuilder MIB;
unsigned CallOpc;
if(isThumb)
- CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLr9 : ARM::tBL;
+ CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi;
else
CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
- .addGlobalAddress(F, 0, 0);
-
+ .addExternalSymbol(TLI.getLibcallName(Call));
+
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
-
- // Now the return value.
+
+ // Finish off the call including any return values.
SmallVector<unsigned, 4> UsedRegs;
- if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CC, false, TM, RVLocs, F->getContext());
- CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
+ if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
- // Copy all of the result registers out of their specified physreg.
- assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
- EVT CopyVT = RVLocs[0].getValVT();
- TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
-
- unsigned ResultReg = createResultReg(DstRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(RVLocs[0].getLocReg());
- UsedRegs.push_back(RVLocs[0].getLocReg());
-
- // Finally update the result.
- UpdateValueMap(I, ResultReg);
- }
-
// Set all unused physreg defs as dead.
static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
return true;
}
-bool ARMFastISel::SelectSDiv(const Instruction *I) {
- EVT VT;
- const Type *Ty = I->getType();
- if (!isTypeLegal(Ty, VT))
+bool ARMFastISel::SelectCall(const Instruction *I) {
+ const CallInst *CI = cast<CallInst>(I);
+ const Value *Callee = CI->getCalledValue();
+
+ // Can't handle inline asm or worry about intrinsics yet.
+ if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false;
+
+ // Only handle global variable Callees that are direct calls.
+ const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
+ if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel()))
return false;
-
- // If we have integer div support we should have gotten already, emit a
- // libcall.
- RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
- if (VT == MVT::i16)
- LC = RTLIB::SDIV_I16;
- else if (VT == MVT::i32)
- LC = RTLIB::SDIV_I32;
- else if (VT == MVT::i64)
- LC = RTLIB::SDIV_I64;
- else if (VT == MVT::i128)
- LC = RTLIB::SDIV_I128;
- assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
-
- // Binary operand with all the same type.
- std::vector<const Type*> ArgTys;
- ArgTys.push_back(Ty);
- ArgTys.push_back(Ty);
- const FunctionType *FTy = FunctionType::get(Ty, ArgTys, false);
- Function *F = Function::Create(FTy, GlobalValue::ExternalLinkage,
- TLI.getLibcallName(LC));
- if (Subtarget->isAAPCS_ABI())
- F->setCallingConv(CallingConv::ARM_AAPCS);
+
+ // Check the calling convention.
+ ImmutableCallSite CS(CI);
+ CallingConv::ID CC = CS.getCallingConv();
+ // TODO: Avoid some calling conventions?
+ if (CC != CallingConv::C) {
+ // errs() << "Can't handle calling convention: " << CC << "\n";
+ return false;
+ }
+
+ // Let SDISel handle vararg functions.
+ const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ if (FTy->isVarArg())
+ return false;
+
+ // Handle *simple* calls for now.
+ const Type *RetTy = I->getType();
+ EVT RetVT;
+ if (RetTy->isVoidTy())
+ RetVT = MVT::isVoid;
+ else if (!isTypeLegal(RetTy, RetVT))
+ return false;
+
+ // For now we're using BLX etc on the assumption that we have v5t ops.
+ // TODO: Maybe?
+ if (!Subtarget->hasV5TOps()) return false;
+
+ // Set up the argument vectors.
+ SmallVector<Value*, 8> Args;
+ SmallVector<unsigned, 8> ArgRegs;
+ SmallVector<EVT, 8> ArgVTs;
+ SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
+ Args.reserve(CS.arg_size());
+ ArgRegs.reserve(CS.arg_size());
+ ArgVTs.reserve(CS.arg_size());
+ ArgFlags.reserve(CS.arg_size());
+ for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ i != e; ++i) {
+ unsigned Arg = getRegForValue(*i);
+
+ if (Arg == 0)
+ return false;
+ ISD::ArgFlagsTy Flags;
+ unsigned AttrInd = i - CS.arg_begin() + 1;
+ if (CS.paramHasAttr(AttrInd, Attribute::SExt))
+ Flags.setSExt();
+ if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
+ Flags.setZExt();
+
+ // FIXME: Only handle *easy* calls for now.
+ if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
+ CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
+ CS.paramHasAttr(AttrInd, Attribute::Nest) ||
+ CS.paramHasAttr(AttrInd, Attribute::ByVal))
+ return false;
+
+ const Type *ArgTy = (*i)->getType();
+ EVT ArgVT;
+ if (!isTypeLegal(ArgTy, ArgVT))
+ return false;
+ unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ Flags.setOrigAlign(OriginalAlignment);
+
+ Args.push_back(*i);
+ ArgRegs.push_back(Arg);
+ ArgVTs.push_back(ArgVT);
+ ArgFlags.push_back(Flags);
+ }
+
+ // Handle the arguments now that we've gotten them.
+ SmallVector<unsigned, 4> RegArgs;
+ unsigned NumBytes;
+ if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
+ return false;
+
+ // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops.
+ // TODO: Turn this into the table of arm call ops.
+ MachineInstrBuilder MIB;
+ unsigned CallOpc;
+ if(isThumb)
+ CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi;
else
- F->setCallingConv(I->getParent()->getParent()->getCallingConv());
-
- return ARMEmitLibcall(I, F);
+ CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, 0);
+
+ // Add implicit physical register uses to the call.
+ for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
+ MIB.addReg(RegArgs[i]);
+
+ // Finish off the call including any return values.
+ SmallVector<unsigned, 4> UsedRegs;
+ if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
+
+ // Set all unused physreg defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
+ return true;
+
}
// TODO: SoftFP support.
return SelectBinaryOp(I, ISD::FMUL);
case Instruction::SDiv:
return SelectSDiv(I);
+ case Instruction::SRem:
+ return SelectSRem(I);
+ case Instruction::Call:
+ return SelectCall(I);
+ case Instruction::Select:
+ return SelectSelect(I);
default: break;
}
return false;
namespace llvm {
llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
- if (EnableARMFastISel) return new ARMFastISel(funcInfo);
+ // Completely untested on non-darwin.
+ const TargetMachine &TM = funcInfo.MF->getTarget();
+ const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
+ if (Subtarget->isTargetDarwin() && EnableARMFastISel)
+ return new ARMFastISel(funcInfo);
return 0;
}
}