namespace {
-class AArch64FastISel : public FastISel {
+class AArch64FastISel final : public FastISel {
class Address {
public:
typedef enum {
const AArch64Subtarget *Subtarget;
LLVMContext *Context;
- bool FastLowerArguments() override;
- bool FastLowerCall(CallLoweringInfo &CLI) override;
- bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
+ bool fastLowerArguments() override;
+ bool fastLowerCall(CallLoweringInfo &CLI) override;
+ bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
private:
// Selection routines.
- bool SelectLoad(const Instruction *I);
- bool SelectStore(const Instruction *I);
- bool SelectBranch(const Instruction *I);
- bool SelectIndirectBr(const Instruction *I);
- bool SelectCmp(const Instruction *I);
- bool SelectSelect(const Instruction *I);
- bool SelectFPExt(const Instruction *I);
- bool SelectFPTrunc(const Instruction *I);
- bool SelectFPToInt(const Instruction *I, bool Signed);
- bool SelectIntToFP(const Instruction *I, bool Signed);
- bool SelectRem(const Instruction *I, unsigned ISDOpcode);
- bool SelectRet(const Instruction *I);
- bool SelectTrunc(const Instruction *I);
- bool SelectIntExt(const Instruction *I);
- bool SelectMul(const Instruction *I);
- bool SelectShift(const Instruction *I);
- bool SelectBitCast(const Instruction *I);
+ bool selectAddSub(const Instruction *I);
+ bool selectLogicalOp(const Instruction *I);
+ bool selectLoad(const Instruction *I);
+ bool selectStore(const Instruction *I);
+ bool selectBranch(const Instruction *I);
+ bool selectIndirectBr(const Instruction *I);
+ bool selectCmp(const Instruction *I);
+ bool selectSelect(const Instruction *I);
+ bool selectFPExt(const Instruction *I);
+ bool selectFPTrunc(const Instruction *I);
+ bool selectFPToInt(const Instruction *I, bool Signed);
+ bool selectIntToFP(const Instruction *I, bool Signed);
+ bool selectRem(const Instruction *I, unsigned ISDOpcode);
+ bool selectRet(const Instruction *I);
+ bool selectTrunc(const Instruction *I);
+ bool selectIntExt(const Instruction *I);
+ bool selectMul(const Instruction *I);
+ bool selectShift(const Instruction *I);
+ bool selectBitCast(const Instruction *I);
+ bool selectFRem(const Instruction *I);
// Utility helper routines.
bool isTypeLegal(Type *Ty, MVT &VT);
- bool isLoadStoreTypeLegal(Type *Ty, MVT &VT);
+ bool isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed = false);
bool isValueAvailable(const Value *V) const;
- bool ComputeAddress(const Value *Obj, Address &Addr, Type *Ty = nullptr);
- bool ComputeCallAddress(const Value *V, Address &Addr);
- bool SimplifyAddress(Address &Addr, MVT VT);
- void AddLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB,
+ bool computeAddress(const Value *Obj, Address &Addr, Type *Ty = nullptr);
+ bool computeCallAddress(const Value *V, Address &Addr);
+ bool simplifyAddress(Address &Addr, MVT VT);
+ void addLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB,
unsigned Flags, unsigned ScaleFactor,
MachineMemOperand *MMO);
- bool IsMemCpySmall(uint64_t Len, unsigned Alignment);
- bool TryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
+ bool isMemCpySmall(uint64_t Len, unsigned Alignment);
+ bool tryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
unsigned Alignment);
bool foldXALUIntrinsic(AArch64CC::CondCode &CC, const Instruction *I,
const Value *Cond);
// Emit helper routines.
- unsigned emitAddsSubs(bool UseAdds, MVT RetVT, const Value *LHS,
- const Value *RHS, bool IsZExt = false,
- bool WantResult = true);
- unsigned emitAddsSubs_rr(bool UseAdds, MVT RetVT, unsigned LHSReg,
- bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
- bool WantResult = true);
- unsigned emitAddsSubs_ri(bool UseAdds, MVT RetVT, unsigned LHSReg,
- bool LHSIsKill, uint64_t Imm,
- bool WantResult = true);
+ unsigned emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
+ const Value *RHS, bool SetFlags = false,
+ bool WantResult = true, bool IsZExt = false);
+ unsigned emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
+ bool SetFlags = false, bool WantResult = true);
+ unsigned emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, uint64_t Imm, bool SetFlags = false,
+ bool WantResult = true);
unsigned emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg,
bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
AArch64_AM::ShiftExtendType ShiftType,
- uint64_t ShiftImm, bool WantResult = true);
- unsigned emitAddsSubs_rs(bool UseAdds, MVT RetVT, unsigned LHSReg,
- bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
- AArch64_AM::ShiftExtendType ShiftType,
- uint64_t ShiftImm, bool WantResult = true);
+ uint64_t ShiftImm, bool SetFlags = false,
+ bool WantResult = true);
unsigned emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg,
bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
AArch64_AM::ShiftExtendType ExtType,
- uint64_t ShiftImm, bool WantResult = true);
-
- unsigned emitAddsSubs_rx(bool UseAdds, MVT RetVT, unsigned LHSReg,
- bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
- AArch64_AM::ShiftExtendType ExtType,
- uint64_t ShiftImm, bool WantResult = true);
+ uint64_t ShiftImm, bool SetFlags = false,
+ bool WantResult = true);
// Emit functions.
bool emitCmp(const Value *LHS, const Value *RHS, bool IsZExt);
bool emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, bool IsZExt);
bool emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
bool emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS);
- bool EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
+ bool emitLoad(MVT VT, unsigned &ResultReg, Address Addr,
MachineMemOperand *MMO = nullptr);
- bool EmitStore(MVT VT, unsigned SrcReg, Address Addr,
+ bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
MachineMemOperand *MMO = nullptr);
- unsigned EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
- unsigned Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
- unsigned emitAdds(MVT RetVT, const Value *LHS, const Value *RHS,
- bool IsZExt = false, bool WantResult = true);
- unsigned emitSubs(MVT RetVT, const Value *LHS, const Value *RHS,
- bool IsZExt = false, bool WantResult = true);
+ unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
+ unsigned emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
+ unsigned emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
+ bool SetFlags = false, bool WantResult = true,
+ bool IsZExt = false);
+ unsigned emitSub(MVT RetVT, const Value *LHS, const Value *RHS,
+ bool SetFlags = false, bool WantResult = true,
+ bool IsZExt = false);
unsigned emitSubs_rr(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
unsigned RHSReg, bool RHSIsKill, bool WantResult = true);
unsigned emitSubs_rs(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
unsigned RHSReg, bool RHSIsKill,
AArch64_AM::ShiftExtendType ShiftType, uint64_t ShiftImm,
bool WantResult = true);
- unsigned emitAND_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
- unsigned Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill);
- unsigned Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill);
- unsigned Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill);
+ unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
+ const Value *RHS);
+ unsigned emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, uint64_t Imm);
+ unsigned emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
+ uint64_t ShiftImm);
+ unsigned emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
+ unsigned emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
+ unsigned emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
+ unsigned emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
unsigned emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
unsigned Op1Reg, bool Op1IsKill);
unsigned emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
unsigned emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
uint64_t Imm, bool IsZExt = false);
- unsigned AArch64MaterializeInt(const ConstantInt *CI, MVT VT);
- unsigned AArch64MaterializeFP(const ConstantFP *CFP, MVT VT);
- unsigned AArch64MaterializeGV(const GlobalValue *GV);
+ unsigned materializeInt(const ConstantInt *CI, MVT VT);
+ unsigned materializeFP(const ConstantFP *CFP, MVT VT);
+ unsigned materializeGV(const GlobalValue *GV);
// Call handling routines.
private:
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
- bool ProcessCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
+ bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
unsigned &NumBytes);
- bool FinishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
+ bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
public:
// Backend specific FastISel code.
- unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
- unsigned TargetMaterializeConstant(const Constant *C) override;
- unsigned TargetMaterializeFloatZero(const ConstantFP* CF) override;
+ unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
+ unsigned fastMaterializeConstant(const Constant *C) override;
+ unsigned fastMaterializeFloatZero(const ConstantFP* CF) override;
- explicit AArch64FastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo)
- : FastISel(funcInfo, libInfo) {
+ explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo)
+ : FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
Subtarget = &TM.getSubtarget<AArch64Subtarget>();
- Context = &funcInfo.Fn->getContext();
+ Context = &FuncInfo.Fn->getContext();
}
- bool TargetSelectInstruction(const Instruction *I) override;
+ bool fastSelectInstruction(const Instruction *I) override;
#include "AArch64GenFastISel.inc"
};
return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
}
-unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
+unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) {
assert(TLI.getValueType(AI->getType(), true) == MVT::i64 &&
"Alloca should always return a pointer.");
return 0;
}
-unsigned AArch64FastISel::AArch64MaterializeInt(const ConstantInt *CI, MVT VT) {
+unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) {
if (VT > MVT::i64)
return 0;
if (!CI->isZero())
- return FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ return fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
// Create a copy from the zero register to materialize a "0" value.
const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
return ResultReg;
}
-unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) {
+unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
// Positive zero (+0.0) has to be materialized with a fmov from the zero
// register, because the immediate version of fmov cannot encode zero.
if (CFP->isNullValue())
- return TargetMaterializeFloatZero(CFP);
+ return fastMaterializeFloatZero(CFP);
if (VT != MVT::f32 && VT != MVT::f64)
return 0;
Is64Bit ? AArch64_AM::getFP64Imm(Val) : AArch64_AM::getFP32Imm(Val);
assert((Imm != -1) && "Cannot encode floating-point constant.");
unsigned Opc = Is64Bit ? AArch64::FMOVDi : AArch64::FMOVSi;
- return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+ return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
// Materialize via constant pool. MachineConstantPool wants an explicit
return ResultReg;
}
-unsigned AArch64FastISel::AArch64MaterializeGV(const GlobalValue *GV) {
+unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
// We can't handle thread-local variables quickly yet.
if (GV->isThreadLocal())
return 0;
.addReg(ADRPReg)
.addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
AArch64II::MO_NC);
+ } else if (OpFlags & AArch64II::MO_CONSTPOOL) {
+ // We can't handle addresses loaded from a constant pool quickly yet.
+ return 0;
} else {
// ADRP + ADDX
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
return ResultReg;
}
-unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) {
+unsigned AArch64FastISel::fastMaterializeConstant(const Constant *C) {
EVT CEVT = TLI.getValueType(C->getType(), true);
// Only handle simple types.
MVT VT = CEVT.getSimpleVT();
if (const auto *CI = dyn_cast<ConstantInt>(C))
- return AArch64MaterializeInt(CI, VT);
+ return materializeInt(CI, VT);
else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
- return AArch64MaterializeFP(CFP, VT);
+ return materializeFP(CFP, VT);
else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
- return AArch64MaterializeGV(GV);
+ return materializeGV(GV);
return 0;
}
-unsigned AArch64FastISel::TargetMaterializeFloatZero(const ConstantFP* CFP) {
+unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {
assert(CFP->isNullValue() &&
"Floating-point constant is not a positive zero.");
MVT VT;
bool Is64Bit = (VT == MVT::f64);
unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr;
- return FastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
+ return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
}
// Computes the address to get to an object.
-bool AArch64FastISel::ComputeAddress(const Value *Obj, Address &Addr, Type *Ty)
+bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
{
const User *U = nullptr;
unsigned Opcode = Instruction::UserOp1;
break;
case Instruction::BitCast: {
// Look through bitcasts.
- return ComputeAddress(U->getOperand(0), Addr, Ty);
+ return computeAddress(U->getOperand(0), Addr, Ty);
}
case Instruction::IntToPtr: {
// Look past no-op inttoptrs.
if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
- return ComputeAddress(U->getOperand(0), Addr, Ty);
+ return computeAddress(U->getOperand(0), Addr, Ty);
break;
}
case Instruction::PtrToInt: {
// Look past no-op ptrtoints.
if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
- return ComputeAddress(U->getOperand(0), Addr, Ty);
+ return computeAddress(U->getOperand(0), Addr, Ty);
break;
}
case Instruction::GetElementPtr: {
// Try to grab the base operand now.
Addr.setOffset(TmpOffset);
- if (ComputeAddress(U->getOperand(0), Addr, Ty))
+ if (computeAddress(U->getOperand(0), Addr, Ty))
return true;
// We failed, restore everything and try the other options.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
Addr.setOffset(Addr.getOffset() + (uint64_t)CI->getSExtValue());
- return ComputeAddress(LHS, Addr, Ty);
+ return computeAddress(LHS, Addr, Ty);
}
Address Backup = Addr;
- if (ComputeAddress(LHS, Addr, Ty) && ComputeAddress(RHS, Addr, Ty))
+ if (computeAddress(LHS, Addr, Ty) && computeAddress(RHS, Addr, Ty))
return true;
Addr = Backup;
return true;
}
-bool AArch64FastISel::ComputeCallAddress(const Value *V, Address &Addr) {
+bool AArch64FastISel::computeCallAddress(const Value *V, Address &Addr) {
const User *U = nullptr;
unsigned Opcode = Instruction::UserOp1;
bool InMBB = true;
case Instruction::BitCast:
// Look past bitcasts if its operand is in the same BB.
if (InMBB)
- return ComputeCallAddress(U->getOperand(0), Addr);
+ return computeCallAddress(U->getOperand(0), Addr);
break;
case Instruction::IntToPtr:
// Look past no-op inttoptrs if its operand is in the same BB.
if (InMBB &&
TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
- return ComputeCallAddress(U->getOperand(0), Addr);
+ return computeCallAddress(U->getOperand(0), Addr);
break;
case Instruction::PtrToInt:
// Look past no-op ptrtoints if its operand is in the same BB.
if (InMBB &&
TLI.getValueType(U->getType()) == TLI.getPointerTy())
- return ComputeCallAddress(U->getOperand(0), Addr);
+ return computeCallAddress(U->getOperand(0), Addr);
break;
}
return TLI.isTypeLegal(VT);
}
-bool AArch64FastISel::isLoadStoreTypeLegal(Type *Ty, MVT &VT) {
+/// \brief Determine if the value type is supported by FastISel.
+///
+/// FastISel for AArch64 can handle more value types than are legal. This adds
+/// simple value type such as i1, i8, and i16.
+bool AArch64FastISel::isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed) {
+ if (Ty->isVectorTy() && !IsVectorAllowed)
+ return false;
+
if (isTypeLegal(Ty, VT))
return true;
// If this is a type than can be sign or zero-extended to a basic operation
- // go ahead and accept it now. For stores, this reflects truncation.
+ // go ahead and accept it now.
if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
return true;
return false;
}
-bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT) {
+bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
unsigned ScaleFactor;
switch (VT.SimpleTy) {
default: return false;
if (ImmediateOffsetNeedsLowering) {
unsigned ResultReg = 0;
if (Addr.getReg())
- ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
+ ResultReg = fastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
/*IsKill=*/false, Offset, MVT::i64);
else
- ResultReg = FastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
+ ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
if (!ResultReg)
return false;
return true;
}
-void AArch64FastISel::AddLoadStoreOperands(Address &Addr,
+void AArch64FastISel::addLoadStoreOperands(Address &Addr,
const MachineInstrBuilder &MIB,
unsigned Flags,
unsigned ScaleFactor,
MIB.addMemOperand(MMO);
}
-unsigned AArch64FastISel::emitAddsSubs(bool UseAdds, MVT RetVT,
- const Value *LHS, const Value *RHS,
- bool IsZExt, bool WantResult) {
+unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
+ const Value *RHS, bool SetFlags,
+ bool WantResult, bool IsZExt) {
AArch64_AM::ShiftExtendType ExtendType = AArch64_AM::InvalidShiftExtend;
bool NeedExtend = false;
switch (RetVT.SimpleTy) {
RetVT.SimpleTy = std::max(RetVT.SimpleTy, MVT::i32);
// Canonicalize immediates to the RHS first.
- if (UseAdds && isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
+ if (UseAdd && isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
std::swap(LHS, RHS);
// Canonicalize shift immediate to the RHS.
- if (UseAdds && isValueAvailable(LHS))
+ if (UseAdd && isValueAvailable(LHS))
if (const auto *SI = dyn_cast<BinaryOperator>(LHS))
if (isa<ConstantInt>(SI->getOperand(1)))
if (SI->getOpcode() == Instruction::Shl ||
bool LHSIsKill = hasTrivialKill(LHS);
if (NeedExtend)
- LHSReg = EmitIntExt(SrcVT, LHSReg, RetVT, IsZExt);
+ LHSReg = emitIntExt(SrcVT, LHSReg, RetVT, IsZExt);
unsigned ResultReg = 0;
if (const auto *C = dyn_cast<ConstantInt>(RHS)) {
uint64_t Imm = IsZExt ? C->getZExtValue() : C->getSExtValue();
if (C->isNegative())
- ResultReg =
- emitAddsSubs_ri(!UseAdds, RetVT, LHSReg, LHSIsKill, -Imm, WantResult);
+ ResultReg = emitAddSub_ri(!UseAdd, RetVT, LHSReg, LHSIsKill, -Imm,
+ SetFlags, WantResult);
else
- ResultReg =
- emitAddsSubs_ri(UseAdds, RetVT, LHSReg, LHSIsKill, Imm, WantResult);
+ ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, LHSIsKill, Imm, SetFlags,
+ WantResult);
}
if (ResultReg)
return ResultReg;
if (!RHSReg)
return 0;
bool RHSIsKill = hasTrivialKill(SI->getOperand(0));
- return emitAddsSubs_rx(UseAdds, RetVT, LHSReg, LHSIsKill, RHSReg,
- RHSIsKill, ExtendType, C->getZExtValue(),
- WantResult);
+ return emitAddSub_rx(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg,
+ RHSIsKill, ExtendType, C->getZExtValue(),
+ SetFlags, WantResult);
}
unsigned RHSReg = getRegForValue(RHS);
if (!RHSReg)
return 0;
bool RHSIsKill = hasTrivialKill(RHS);
- return emitAddsSubs_rx(UseAdds, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
- ExtendType, 0, WantResult);
+ return emitAddSub_rx(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
+ ExtendType, 0, SetFlags, WantResult);
}
// Check if the shift can be folded into the instruction.
if (!RHSReg)
return 0;
bool RHSIsKill = hasTrivialKill(SI->getOperand(0));
- return emitAddsSubs_rs(UseAdds, RetVT, LHSReg, LHSIsKill, RHSReg,
- RHSIsKill, ShiftType, ShiftVal, WantResult);
+ return emitAddSub_rs(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg,
+ RHSIsKill, ShiftType, ShiftVal, SetFlags,
+ WantResult);
}
}
}
bool RHSIsKill = hasTrivialKill(RHS);
if (NeedExtend)
- RHSReg = EmitIntExt(SrcVT, RHSReg, RetVT, IsZExt);
+ RHSReg = emitIntExt(SrcVT, RHSReg, RetVT, IsZExt);
- return emitAddsSubs_rr(UseAdds, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
- WantResult);
+ return emitAddSub_rr(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
+ SetFlags, WantResult);
}
-unsigned AArch64FastISel::emitAddsSubs_rr(bool UseAdds, MVT RetVT,
- unsigned LHSReg, bool LHSIsKill,
- unsigned RHSReg, bool RHSIsKill,
- bool WantResult) {
+unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, unsigned RHSReg,
+ bool RHSIsKill, bool SetFlags,
+ bool WantResult) {
assert(LHSReg && RHSReg && "Invalid register number.");
if (RetVT != MVT::i32 && RetVT != MVT::i64)
return 0;
- static const unsigned OpcTable[2][2] = {
- { AArch64::ADDSWrr, AArch64::ADDSXrr },
- { AArch64::SUBSWrr, AArch64::SUBSXrr }
+ static const unsigned OpcTable[2][2][2] = {
+ { { AArch64::SUBWrr, AArch64::SUBXrr },
+ { AArch64::ADDWrr, AArch64::ADDXrr } },
+ { { AArch64::SUBSWrr, AArch64::SUBSXrr },
+ { AArch64::ADDSWrr, AArch64::ADDSXrr } }
};
- unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
+ bool Is64Bit = RetVT == MVT::i64;
+ unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
+ const TargetRegisterClass *RC =
+ Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
unsigned ResultReg;
- if (WantResult) {
- const TargetRegisterClass *RC =
- (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ if (WantResult)
ResultReg = createResultReg(RC);
- } else
- ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
+ else
+ ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
const MCInstrDesc &II = TII.get(Opc);
LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addReg(RHSReg, getKillRegState(RHSIsKill));
-
return ResultReg;
}
-unsigned AArch64FastISel::emitAddsSubs_ri(bool UseAdds, MVT RetVT,
- unsigned LHSReg, bool LHSIsKill,
- uint64_t Imm, bool WantResult) {
+unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, uint64_t Imm,
+ bool SetFlags, bool WantResult) {
assert(LHSReg && "Invalid register number.");
if (RetVT != MVT::i32 && RetVT != MVT::i64)
} else
return 0;
- static const unsigned OpcTable[2][2] = {
- { AArch64::ADDSWri, AArch64::ADDSXri },
- { AArch64::SUBSWri, AArch64::SUBSXri }
+ static const unsigned OpcTable[2][2][2] = {
+ { { AArch64::SUBWri, AArch64::SUBXri },
+ { AArch64::ADDWri, AArch64::ADDXri } },
+ { { AArch64::SUBSWri, AArch64::SUBSXri },
+ { AArch64::ADDSWri, AArch64::ADDSXri } }
};
- unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
+ bool Is64Bit = RetVT == MVT::i64;
+ unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
+ const TargetRegisterClass *RC;
+ if (SetFlags)
+ RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ else
+ RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
unsigned ResultReg;
- if (WantResult) {
- const TargetRegisterClass *RC =
- (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ if (WantResult)
ResultReg = createResultReg(RC);
- } else
- ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
+ else
+ ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
const MCInstrDesc &II = TII.get(Opc);
LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addImm(Imm)
.addImm(getShifterImm(AArch64_AM::LSL, ShiftImm));
-
return ResultReg;
}
-unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT,
- unsigned LHSReg, bool LHSIsKill,
- unsigned RHSReg, bool RHSIsKill,
+unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, unsigned RHSReg,
+ bool RHSIsKill,
AArch64_AM::ShiftExtendType ShiftType,
- uint64_t ShiftImm, bool WantResult) {
+ uint64_t ShiftImm, bool SetFlags,
+ bool WantResult) {
assert(LHSReg && RHSReg && "Invalid register number.");
if (RetVT != MVT::i32 && RetVT != MVT::i64)
return 0;
- static const unsigned OpcTable[2][2] = {
- { AArch64::ADDWrs, AArch64::ADDXrs },
- { AArch64::SUBWrs, AArch64::SUBXrs }
+ static const unsigned OpcTable[2][2][2] = {
+ { { AArch64::SUBWrs, AArch64::SUBXrs },
+ { AArch64::ADDWrs, AArch64::ADDXrs } },
+ { { AArch64::SUBSWrs, AArch64::SUBSXrs },
+ { AArch64::ADDSWrs, AArch64::ADDSXrs } }
};
- unsigned Opc = OpcTable[!UseAdd][(RetVT == MVT::i64)];
- unsigned ResultReg;
- if (WantResult) {
- const TargetRegisterClass *RC =
- (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- ResultReg = createResultReg(RC);
- } else
- ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
-
- const MCInstrDesc &II = TII.get(Opc);
- LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
- RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
- .addReg(LHSReg, getKillRegState(LHSIsKill))
- .addReg(RHSReg, getKillRegState(RHSIsKill))
- .addImm(getShifterImm(ShiftType, ShiftImm));
-
- return ResultReg;
-}
-
-unsigned AArch64FastISel::emitAddsSubs_rs(bool UseAdds, MVT RetVT,
- unsigned LHSReg, bool LHSIsKill,
- unsigned RHSReg, bool RHSIsKill,
- AArch64_AM::ShiftExtendType ShiftType,
- uint64_t ShiftImm, bool WantResult) {
- assert(LHSReg && RHSReg && "Invalid register number.");
-
- if (RetVT != MVT::i32 && RetVT != MVT::i64)
- return 0;
-
- static const unsigned OpcTable[2][2] = {
- { AArch64::ADDSWrs, AArch64::ADDSXrs },
- { AArch64::SUBSWrs, AArch64::SUBSXrs }
- };
- unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
+ bool Is64Bit = RetVT == MVT::i64;
+ unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
+ const TargetRegisterClass *RC =
+ Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
unsigned ResultReg;
- if (WantResult) {
- const TargetRegisterClass *RC =
- (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ if (WantResult)
ResultReg = createResultReg(RC);
- } else
- ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
+ else
+ ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
const MCInstrDesc &II = TII.get(Opc);
LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addReg(RHSReg, getKillRegState(RHSIsKill))
.addImm(getShifterImm(ShiftType, ShiftImm));
-
return ResultReg;
}
-unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT,
- unsigned LHSReg, bool LHSIsKill,
- unsigned RHSReg, bool RHSIsKill,
+unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg,
+ bool LHSIsKill, unsigned RHSReg,
+ bool RHSIsKill,
AArch64_AM::ShiftExtendType ExtType,
- uint64_t ShiftImm, bool WantResult) {
+ uint64_t ShiftImm, bool SetFlags,
+ bool WantResult) {
assert(LHSReg && RHSReg && "Invalid register number.");
if (RetVT != MVT::i32 && RetVT != MVT::i64)
return 0;
- static const unsigned OpcTable[2][2] = {
- { AArch64::ADDWrx, AArch64::ADDXrx },
- { AArch64::SUBWrx, AArch64::SUBXrx }
+ static const unsigned OpcTable[2][2][2] = {
+ { { AArch64::SUBWrx, AArch64::SUBXrx },
+ { AArch64::ADDWrx, AArch64::ADDXrx } },
+ { { AArch64::SUBSWrx, AArch64::SUBSXrx },
+ { AArch64::ADDSWrx, AArch64::ADDSXrx } }
};
- unsigned Opc = OpcTable[!UseAdd][(RetVT == MVT::i64)];
- unsigned ResultReg;
- if (WantResult) {
- const TargetRegisterClass *RC =
- (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- ResultReg = createResultReg(RC);
- } else
- ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
-
- const MCInstrDesc &II = TII.get(Opc);
- LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
- RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
- .addReg(LHSReg, getKillRegState(LHSIsKill))
- .addReg(RHSReg, getKillRegState(RHSIsKill))
- .addImm(getArithExtendImm(ExtType, ShiftImm));
-
- return ResultReg;
-}
-
-unsigned AArch64FastISel::emitAddsSubs_rx(bool UseAdds, MVT RetVT,
- unsigned LHSReg, bool LHSIsKill,
- unsigned RHSReg, bool RHSIsKill,
- AArch64_AM::ShiftExtendType ExtType,
- uint64_t ShiftImm, bool WantResult) {
- assert(LHSReg && RHSReg && "Invalid register number.");
-
- if (RetVT != MVT::i32 && RetVT != MVT::i64)
- return 0;
-
- static const unsigned OpcTable[2][2] = {
- { AArch64::ADDSWrx, AArch64::ADDSXrx },
- { AArch64::SUBSWrx, AArch64::SUBSXrx }
- };
- unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
+ bool Is64Bit = RetVT == MVT::i64;
+ unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
+ const TargetRegisterClass *RC = nullptr;
+ if (SetFlags)
+ RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ else
+ RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
unsigned ResultReg;
- if (WantResult) {
- const TargetRegisterClass *RC =
- (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ if (WantResult)
ResultReg = createResultReg(RC);
- } else
- ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
+ else
+ ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
const MCInstrDesc &II = TII.get(Opc);
LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addReg(RHSReg, getKillRegState(RHSIsKill))
.addImm(getArithExtendImm(ExtType, ShiftImm));
-
return ResultReg;
}
bool AArch64FastISel::emitICmp(MVT RetVT, const Value *LHS, const Value *RHS,
bool IsZExt) {
- return emitSubs(RetVT, LHS, RHS, IsZExt, /*WantResult=*/false) != 0;
+ return emitSub(RetVT, LHS, RHS, /*SetFlags=*/true, /*WantResult=*/false,
+ IsZExt) != 0;
}
bool AArch64FastISel::emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
uint64_t Imm) {
- return emitAddsSubs_ri(false, RetVT, LHSReg, LHSIsKill, Imm,
- /*WantResult=*/false) != 0;
+ return emitAddSub_ri(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, Imm,
+ /*SetFlags=*/true, /*WantResult=*/false) != 0;
}
bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) {
return true;
}
-unsigned AArch64FastISel::emitAdds(MVT RetVT, const Value *LHS,
- const Value *RHS, bool IsZExt,
- bool WantResult) {
- return emitAddsSubs(true, RetVT, LHS, RHS, IsZExt, WantResult);
+unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
+ bool SetFlags, bool WantResult, bool IsZExt) {
+ return emitAddSub(/*UseAdd=*/true, RetVT, LHS, RHS, SetFlags, WantResult,
+ IsZExt);
}
-unsigned AArch64FastISel::emitSubs(MVT RetVT, const Value *LHS,
- const Value *RHS, bool IsZExt,
- bool WantResult) {
- return emitAddsSubs(false, RetVT, LHS, RHS, IsZExt, WantResult);
+unsigned AArch64FastISel::emitSub(MVT RetVT, const Value *LHS, const Value *RHS,
+ bool SetFlags, bool WantResult, bool IsZExt) {
+ return emitAddSub(/*UseAdd=*/false, RetVT, LHS, RHS, SetFlags, WantResult,
+ IsZExt);
}
unsigned AArch64FastISel::emitSubs_rr(MVT RetVT, unsigned LHSReg,
bool LHSIsKill, unsigned RHSReg,
bool RHSIsKill, bool WantResult) {
- return emitAddsSubs_rr(false, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
- WantResult);
+ return emitAddSub_rr(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, RHSReg,
+ RHSIsKill, /*SetFlags=*/true, WantResult);
}
unsigned AArch64FastISel::emitSubs_rs(MVT RetVT, unsigned LHSReg,
bool RHSIsKill,
AArch64_AM::ShiftExtendType ShiftType,
uint64_t ShiftImm, bool WantResult) {
- return emitAddsSubs_rs(false, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
- ShiftType, ShiftImm, WantResult);
+ return emitAddSub_rs(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, RHSReg,
+ RHSIsKill, ShiftType, ShiftImm, /*SetFlags=*/true,
+ WantResult);
}
-// FIXME: This should be eventually generated automatically by tblgen.
-unsigned AArch64FastISel::emitAND_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
- uint64_t Imm) {
- const TargetRegisterClass *RC = nullptr;
- unsigned Opc = 0;
- unsigned RegSize = 0;
+unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
+ const Value *LHS, const Value *RHS) {
+ // Canonicalize immediates to the RHS first.
+ if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
+ std::swap(LHS, RHS);
+
+ // Canonicalize shift immediate to the RHS.
+ if (isValueAvailable(LHS))
+ if (const auto *SI = dyn_cast<BinaryOperator>(LHS))
+ if (isa<ConstantInt>(SI->getOperand(1)))
+ if (SI->getOpcode() == Instruction::Shl)
+ std::swap(LHS, RHS);
+
+ unsigned LHSReg = getRegForValue(LHS);
+ if (!LHSReg)
+ return 0;
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ unsigned ResultReg = 0;
+ if (const auto *C = dyn_cast<ConstantInt>(RHS)) {
+ uint64_t Imm = C->getZExtValue();
+ ResultReg = emitLogicalOp_ri(ISDOpc, RetVT, LHSReg, LHSIsKill, Imm);
+ }
+ if (ResultReg)
+ return ResultReg;
+
+ // Check if the shift can be folded into the instruction.
+ if (isValueAvailable(RHS))
+ if (const auto *SI = dyn_cast<BinaryOperator>(RHS))
+ if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1)))
+ if (SI->getOpcode() == Instruction::Shl) {
+ uint64_t ShiftVal = C->getZExtValue();
+ unsigned RHSReg = getRegForValue(SI->getOperand(0));
+ if (!RHSReg)
+ return 0;
+ bool RHSIsKill = hasTrivialKill(SI->getOperand(0));
+ return emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, LHSIsKill, RHSReg,
+ RHSIsKill, ShiftVal);
+ }
+
+ unsigned RHSReg = getRegForValue(RHS);
+ if (!RHSReg)
+ return 0;
+ bool RHSIsKill = hasTrivialKill(RHS);
+
+ MVT VT = std::max(MVT::i32, RetVT.SimpleTy);
+ ResultReg = fastEmit_rr(VT, VT, ISDOpc, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
+ if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
+ uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
+ ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
+ }
+ return ResultReg;
+}
+
+unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT,
+ unsigned LHSReg, bool LHSIsKill,
+ uint64_t Imm) {
+ assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR) &&
+ "ISD nodes are not consecutive!");
+ static const unsigned OpcTable[3][2] = {
+ { AArch64::ANDWri, AArch64::ANDXri },
+ { AArch64::ORRWri, AArch64::ORRXri },
+ { AArch64::EORWri, AArch64::EORXri }
+ };
+ const TargetRegisterClass *RC;
+ unsigned Opc;
+ unsigned RegSize;
switch (RetVT.SimpleTy) {
default:
return 0;
- case MVT::i32:
- Opc = AArch64::ANDWri;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32: {
+ unsigned Idx = ISDOpc - ISD::AND;
+ Opc = OpcTable[Idx][0];
RC = &AArch64::GPR32spRegClass;
RegSize = 32;
break;
+ }
case MVT::i64:
- Opc = AArch64::ANDXri;
+ Opc = OpcTable[ISDOpc - ISD::AND][1];
RC = &AArch64::GPR64spRegClass;
RegSize = 64;
break;
if (!AArch64_AM::isLogicalImmediate(Imm, RegSize))
return 0;
- return FastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
- AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
+ unsigned ResultReg =
+ fastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
+ AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
+ if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc != ISD::AND) {
+ uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
+ ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
+ }
+ return ResultReg;
+}
+
+unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT,
+ unsigned LHSReg, bool LHSIsKill,
+ unsigned RHSReg, bool RHSIsKill,
+ uint64_t ShiftImm) {
+ assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR) &&
+ "ISD nodes are not consecutive!");
+ static const unsigned OpcTable[3][2] = {
+ { AArch64::ANDWrs, AArch64::ANDXrs },
+ { AArch64::ORRWrs, AArch64::ORRXrs },
+ { AArch64::EORWrs, AArch64::EORXrs }
+ };
+ const TargetRegisterClass *RC;
+ unsigned Opc;
+ switch (RetVT.SimpleTy) {
+ default:
+ return 0;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = OpcTable[ISDOpc - ISD::AND][0];
+ RC = &AArch64::GPR32RegClass;
+ break;
+ case MVT::i64:
+ Opc = OpcTable[ISDOpc - ISD::AND][1];
+ RC = &AArch64::GPR64RegClass;
+ break;
+ }
+ unsigned ResultReg =
+ fastEmitInst_rri(Opc, RC, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
+ AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm));
+ if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
+ uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
+ ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
+ }
+ return ResultReg;
+}
+
+unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
+ uint64_t Imm) {
+ return emitLogicalOp_ri(ISD::AND, RetVT, LHSReg, LHSIsKill, Imm);
}
-bool AArch64FastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
+bool AArch64FastISel::emitLoad(MVT VT, unsigned &ResultReg, Address Addr,
MachineMemOperand *MMO) {
// Simplify this down to something we can handle.
- if (!SimplifyAddress(Addr, VT))
+ if (!simplifyAddress(Addr, VT))
return false;
unsigned ScaleFactor;
ResultReg = createResultReg(RC);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
- AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, ScaleFactor, MMO);
+ addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, ScaleFactor, MMO);
// Loading an i1 requires special handling.
if (VTIsi1) {
- unsigned ANDReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, 1);
+ unsigned ANDReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, 1);
assert(ANDReg && "Unexpected AND instruction emission failure.");
ResultReg = ANDReg;
}
return true;
}
-bool AArch64FastISel::SelectLoad(const Instruction *I) {
+bool AArch64FastISel::selectAddSub(const Instruction *I) {
+ MVT VT;
+ if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true))
+ return false;
+
+ if (VT.isVector())
+ return selectOperator(I, I->getOpcode());
+
+ unsigned ResultReg;
+ switch (I->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected instruction.");
+ case Instruction::Add:
+ ResultReg = emitAdd(VT, I->getOperand(0), I->getOperand(1));
+ break;
+ case Instruction::Sub:
+ ResultReg = emitSub(VT, I->getOperand(0), I->getOperand(1));
+ break;
+ }
+ if (!ResultReg)
+ return false;
+
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::selectLogicalOp(const Instruction *I) {
+ MVT VT;
+ if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true))
+ return false;
+
+ if (VT.isVector())
+ return selectOperator(I, I->getOpcode());
+
+ unsigned ResultReg;
+ switch (I->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected instruction.");
+ case Instruction::And:
+ ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
+ break;
+ case Instruction::Or:
+ ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
+ break;
+ case Instruction::Xor:
+ ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
+ break;
+ }
+ if (!ResultReg)
+ return false;
+
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::selectLoad(const Instruction *I) {
MVT VT;
// Verify we have a legal type before going any further. Currently, we handle
// simple types that will directly fit in a register (i32/f32/i64/f64) or
// those that can be sign or zero-extended to a basic operation (i1/i8/i16).
- if (!isLoadStoreTypeLegal(I->getType(), VT) || cast<LoadInst>(I)->isAtomic())
+ if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true) ||
+ cast<LoadInst>(I)->isAtomic())
return false;
// See if we can handle this address.
Address Addr;
- if (!ComputeAddress(I->getOperand(0), Addr, I->getType()))
+ if (!computeAddress(I->getOperand(0), Addr, I->getType()))
return false;
unsigned ResultReg;
- if (!EmitLoad(VT, ResultReg, Addr, createMachineMemOperandFor(I)))
+ if (!emitLoad(VT, ResultReg, Addr, createMachineMemOperandFor(I)))
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::EmitStore(MVT VT, unsigned SrcReg, Address Addr,
+bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
MachineMemOperand *MMO) {
// Simplify this down to something we can handle.
- if (!SimplifyAddress(Addr, VT))
+ if (!simplifyAddress(Addr, VT))
return false;
unsigned ScaleFactor;
// Storing an i1 requires special handling.
if (VTIsi1 && SrcReg != AArch64::WZR) {
- unsigned ANDReg = emitAND_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1);
+ unsigned ANDReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1);
assert(ANDReg && "Unexpected AND instruction emission failure.");
SrcReg = ANDReg;
}
SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs());
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(SrcReg);
- AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, ScaleFactor, MMO);
+ addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, ScaleFactor, MMO);
return true;
}
-bool AArch64FastISel::SelectStore(const Instruction *I) {
+bool AArch64FastISel::selectStore(const Instruction *I) {
MVT VT;
const Value *Op0 = I->getOperand(0);
// Verify we have a legal type before going any further. Currently, we handle
// simple types that will directly fit in a register (i32/f32/i64/f64) or
// those that can be sign or zero-extended to a basic operation (i1/i8/i16).
- if (!isLoadStoreTypeLegal(Op0->getType(), VT) ||
+ if (!isTypeSupported(Op0->getType(), VT, /*IsVectorAllowed=*/true) ||
cast<StoreInst>(I)->isAtomic())
return false;
// See if we can handle this address.
Address Addr;
- if (!ComputeAddress(I->getOperand(1), Addr, I->getOperand(0)->getType()))
+ if (!computeAddress(I->getOperand(1), Addr, I->getOperand(0)->getType()))
return false;
- if (!EmitStore(VT, SrcReg, Addr, createMachineMemOperandFor(I)))
+ if (!emitStore(VT, SrcReg, Addr, createMachineMemOperandFor(I)))
return false;
return true;
}
}
}
-bool AArch64FastISel::SelectBranch(const Instruction *I) {
+/// \brief Check if the comparison against zero and the following branch can be
+/// folded into a single instruction (CBZ or CBNZ).
+static bool canFoldZeroIntoBranch(const CmpInst *CI) {
+ CmpInst::Predicate Predicate = CI->getPredicate();
+ if ((Predicate != CmpInst::ICMP_EQ) && (Predicate != CmpInst::ICMP_NE))
+ return false;
+
+ Type *Ty = CI->getOperand(0)->getType();
+ if (!Ty->isIntegerTy())
+ return false;
+
+ unsigned BW = cast<IntegerType>(Ty)->getBitWidth();
+ if (BW != 1 && BW != 8 && BW != 16 && BW != 32 && BW != 64)
+ return false;
+
+ if (const auto *C = dyn_cast<ConstantInt>(CI->getOperand(0)))
+ if (C->isNullValue())
+ return true;
+
+ if (const auto *C = dyn_cast<ConstantInt>(CI->getOperand(1)))
+ if (C->isNullValue())
+ return true;
+
+ return false;
+}
+
+bool AArch64FastISel::selectBranch(const Instruction *I) {
const BranchInst *BI = cast<BranchInst>(I);
+ if (BI->isUnconditional()) {
+ MachineBasicBlock *MSucc = FuncInfo.MBBMap[BI->getSuccessor(0)];
+ fastEmitBranch(MSucc, BI->getDebugLoc());
+ return true;
+ }
+
MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
AArch64CC::CondCode CC = AArch64CC::NE;
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
- if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
- // We may not handle every CC for now.
- CC = getCompareCC(CI->getPredicate());
- if (CC == AArch64CC::AL)
- return false;
+ if (CI->hasOneUse() && isValueAvailable(CI)) {
+ // Try to optimize or fold the cmp.
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ switch (Predicate) {
+ default:
+ break;
+ case CmpInst::FCMP_FALSE:
+ fastEmitBranch(FBB, DbgLoc);
+ return true;
+ case CmpInst::FCMP_TRUE:
+ fastEmitBranch(TBB, DbgLoc);
+ return true;
+ }
+
+ // Try to take advantage of fallthrough opportunities.
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ Predicate = CmpInst::getInversePredicate(Predicate);
+ }
+
+ // Try to optimize comparisons against zero.
+ if (canFoldZeroIntoBranch(CI)) {
+ const Value *LHS = CI->getOperand(0);
+ const Value *RHS = CI->getOperand(1);
+
+ // Canonicalize zero values to the RHS.
+ if (const auto *C = dyn_cast<ConstantInt>(LHS))
+ if (C->isNullValue())
+ std::swap(LHS, RHS);
+
+ static const unsigned OpcTable[2][2] = {
+ {AArch64::CBZW, AArch64::CBZX }, {AArch64::CBNZW, AArch64::CBNZX}
+ };
+ bool IsCmpNE = Predicate == CmpInst::ICMP_NE;
+ bool Is64Bit = LHS->getType()->isIntegerTy(64);
+ unsigned Opc = OpcTable[IsCmpNE][Is64Bit];
+
+ unsigned SrcReg = getRegForValue(LHS);
+ if (!SrcReg)
+ return false;
+ bool SrcIsKill = hasTrivialKill(LHS);
+
+ // Emit the combined compare and branch instruction.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addReg(SrcReg, getKillRegState(SrcIsKill))
+ .addMBB(TBB);
+
+ // Obtain the branch weight and add the TrueBB to the successor list.
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
+
+ fastEmitBranch(FBB, DbgLoc);
+ return true;
+ }
// Emit the cmp.
if (!emitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
return false;
+ // FCMP_UEQ and FCMP_ONE cannot be checked with a single branch
+ // instruction.
+ CC = getCompareCC(Predicate);
+ AArch64CC::CondCode ExtraCC = AArch64CC::AL;
+ switch (Predicate) {
+ default:
+ break;
+ case CmpInst::FCMP_UEQ:
+ ExtraCC = AArch64CC::EQ;
+ CC = AArch64CC::VS;
+ break;
+ case CmpInst::FCMP_ONE:
+ ExtraCC = AArch64CC::MI;
+ CC = AArch64CC::GT;
+ break;
+ }
+ assert((CC != AArch64CC::AL) && "Unexpected condition code.");
+
+ // Emit the extra branch for FCMP_UEQ and FCMP_ONE.
+ if (ExtraCC != AArch64CC::AL) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
+ .addImm(ExtraCC)
+ .addMBB(TBB);
+ }
+
// Emit the branch.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
.addImm(CC)
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
} else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
MVT SrcVT;
- if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
- (isLoadStoreTypeLegal(TI->getOperand(0)->getType(), SrcVT))) {
+ if (TI->hasOneUse() && isValueAvailable(TI) &&
+ isTypeSupported(TI->getOperand(0)->getType(), SrcVT)) {
unsigned CondReg = getRegForValue(TI->getOperand(0));
if (!CondReg)
return false;
// Issue an extract_subreg to get the lower 32-bits.
if (SrcVT == MVT::i64) {
- CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
+ CondReg = fastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
AArch64::sub_32);
CondIsKill = true;
}
- unsigned ANDReg = emitAND_ri(MVT::i32, CondReg, CondIsKill, 1);
+ unsigned ANDReg = emitAnd_ri(MVT::i32, CondReg, CondIsKill, 1);
assert(ANDReg && "Unexpected AND instruction emission failure.");
emitICmp_ri(MVT::i32, ANDReg, /*IsKill=*/true, 0);
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
- } else if (const ConstantInt *CI =
- dyn_cast<ConstantInt>(BI->getCondition())) {
+ } else if (const auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
uint64_t Imm = CI->getZExtValue();
MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B))
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
-bool AArch64FastISel::SelectIndirectBr(const Instruction *I) {
+bool AArch64FastISel::selectIndirectBr(const Instruction *I) {
const IndirectBrInst *BI = cast<IndirectBrInst>(I);
unsigned AddrReg = getRegForValue(BI->getOperand(0));
if (AddrReg == 0)
return true;
}
-bool AArch64FastISel::SelectCmp(const Instruction *I) {
+bool AArch64FastISel::selectCmp(const Instruction *I) {
const CmpInst *CI = cast<CmpInst>(I);
- // We may not handle every CC for now.
- AArch64CC::CondCode CC = getCompareCC(CI->getPredicate());
- if (CC == AArch64CC::AL)
- return false;
+ // Try to optimize or fold the cmp.
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ unsigned ResultReg = 0;
+ switch (Predicate) {
+ default:
+ break;
+ case CmpInst::FCMP_FALSE:
+ ResultReg = createResultReg(&AArch64::GPR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(AArch64::WZR, getKillRegState(true));
+ break;
+ case CmpInst::FCMP_TRUE:
+ ResultReg = fastEmit_i(MVT::i32, MVT::i32, ISD::Constant, 1);
+ break;
+ }
+
+ if (ResultReg) {
+ updateValueMap(I, ResultReg);
+ return true;
+ }
// Emit the cmp.
if (!emitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
return false;
+ ResultReg = createResultReg(&AArch64::GPR32RegClass);
+
+ // FCMP_UEQ and FCMP_ONE cannot be checked with a single instruction. These
+ // condition codes are inverted, because they are used by CSINC.
+ static unsigned CondCodeTable[2][2] = {
+ { AArch64CC::NE, AArch64CC::VC },
+ { AArch64CC::PL, AArch64CC::LE }
+ };
+ unsigned *CondCodes = nullptr;
+ switch (Predicate) {
+ default:
+ break;
+ case CmpInst::FCMP_UEQ:
+ CondCodes = &CondCodeTable[0][0];
+ break;
+ case CmpInst::FCMP_ONE:
+ CondCodes = &CondCodeTable[1][0];
+ break;
+ }
+
+ if (CondCodes) {
+ unsigned TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
+ TmpReg1)
+ .addReg(AArch64::WZR, getKillRegState(true))
+ .addReg(AArch64::WZR, getKillRegState(true))
+ .addImm(CondCodes[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
+ ResultReg)
+ .addReg(TmpReg1, getKillRegState(true))
+ .addReg(AArch64::WZR, getKillRegState(true))
+ .addImm(CondCodes[1]);
+
+ updateValueMap(I, ResultReg);
+ return true;
+ }
+
// Now set a register based on the comparison.
+ AArch64CC::CondCode CC = getCompareCC(Predicate);
+ assert((CC != AArch64CC::AL) && "Unexpected condition code.");
AArch64CC::CondCode invertedCC = getInvertedCondCode(CC);
- unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
ResultReg)
- .addReg(AArch64::WZR)
- .addReg(AArch64::WZR)
+ .addReg(AArch64::WZR, getKillRegState(true))
+ .addReg(AArch64::WZR, getKillRegState(true))
.addImm(invertedCC);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectSelect(const Instruction *I) {
+bool AArch64FastISel::selectSelect(const Instruction *I) {
const SelectInst *SI = cast<SelectInst>(I);
EVT DestEVT = TLI.getValueType(SI->getType(), true);
bool CondIsKill = hasTrivialKill(Cond);
if (NeedTest) {
- unsigned ANDReg = emitAND_ri(MVT::i32, CondReg, CondIsKill, 1);
+ unsigned ANDReg = emitAnd_ri(MVT::i32, CondReg, CondIsKill, 1);
assert(ANDReg && "Unexpected AND instruction emission failure.");
emitICmp_ri(MVT::i32, ANDReg, /*IsKill=*/true, 0);
}
if (!TrueReg || !FalseReg)
return false;
- unsigned ResultReg = FastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
+ unsigned ResultReg = fastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
FalseReg, FalseIsKill, CC);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectFPExt(const Instruction *I) {
+bool AArch64FastISel::selectFPExt(const Instruction *I) {
Value *V = I->getOperand(0);
if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy())
return false;
unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
ResultReg).addReg(Op);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectFPTrunc(const Instruction *I) {
+bool AArch64FastISel::selectFPTrunc(const Instruction *I) {
Value *V = I->getOperand(0);
if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy())
return false;
unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
ResultReg).addReg(Op);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
// FPToUI and FPToSI
-bool AArch64FastISel::SelectFPToInt(const Instruction *I, bool Signed) {
+bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) {
MVT DestVT;
if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
return false;
DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
+bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) {
MVT DestVT;
if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
return false;
// Handle sign-extension.
if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
SrcReg =
- EmitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed);
+ emitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed);
if (!SrcReg)
return false;
SrcIsKill = true;
Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
}
- unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
+ unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
SrcIsKill);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::FastLowerArguments() {
+bool AArch64FastISel::fastLowerArguments() {
if (!FuncInfo.CanLowerReturn)
return false;
if (CC != CallingConv::C)
return false;
- // Only handle simple cases like i1/i8/i16/i32/i64/f32/f64 of up to 8 GPR and
- // FPR each.
+ // Only handle simple cases of up to 8 GPR and FPR each.
unsigned GPRCnt = 0;
unsigned FPRCnt = 0;
unsigned Idx = 0;
return false;
Type *ArgTy = Arg.getType();
- if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
+ if (ArgTy->isStructTy() || ArgTy->isArrayTy())
return false;
EVT ArgVT = TLI.getValueType(ArgTy);
- if (!ArgVT.isSimple()) return false;
- switch (ArgVT.getSimpleVT().SimpleTy) {
- default: return false;
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::i64:
+ if (!ArgVT.isSimple())
+ return false;
+
+ MVT VT = ArgVT.getSimpleVT().SimpleTy;
+ if (VT.isFloatingPoint() && !Subtarget->hasFPARMv8())
+ return false;
+
+ if (VT.isVector() &&
+ (!Subtarget->hasNEON() || !Subtarget->isLittleEndian()))
+ return false;
+
+ if (VT >= MVT::i1 && VT <= MVT::i64)
++GPRCnt;
- break;
- case MVT::f16:
- case MVT::f32:
- case MVT::f64:
+ else if ((VT >= MVT::f16 && VT <= MVT::f64) || VT.is64BitVector() ||
+ VT.is128BitVector())
++FPRCnt;
- break;
- }
+ else
+ return false;
if (GPRCnt > 8 || FPRCnt > 8)
return false;
}
- static const MCPhysReg Registers[5][8] = {
+ static const MCPhysReg Registers[6][8] = {
{ AArch64::W0, AArch64::W1, AArch64::W2, AArch64::W3, AArch64::W4,
AArch64::W5, AArch64::W6, AArch64::W7 },
{ AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4,
{ AArch64::S0, AArch64::S1, AArch64::S2, AArch64::S3, AArch64::S4,
AArch64::S5, AArch64::S6, AArch64::S7 },
{ AArch64::D0, AArch64::D1, AArch64::D2, AArch64::D3, AArch64::D4,
- AArch64::D5, AArch64::D6, AArch64::D7 }
+ AArch64::D5, AArch64::D6, AArch64::D7 },
+ { AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
+ AArch64::Q5, AArch64::Q6, AArch64::Q7 }
};
unsigned GPRIdx = 0;
for (auto const &Arg : F->args()) {
MVT VT = TLI.getSimpleValueType(Arg.getType());
unsigned SrcReg;
- const TargetRegisterClass *RC = nullptr;
- switch (VT.SimpleTy) {
- default: llvm_unreachable("Unexpected value type.");
- case MVT::i1:
- case MVT::i8:
- case MVT::i16: VT = MVT::i32; // fall-through
- case MVT::i32:
- SrcReg = Registers[0][GPRIdx++]; RC = &AArch64::GPR32RegClass; break;
- case MVT::i64:
- SrcReg = Registers[1][GPRIdx++]; RC = &AArch64::GPR64RegClass; break;
- case MVT::f16:
- SrcReg = Registers[2][FPRIdx++]; RC = &AArch64::FPR16RegClass; break;
- case MVT::f32:
- SrcReg = Registers[3][FPRIdx++]; RC = &AArch64::FPR32RegClass; break;
- case MVT::f64:
- SrcReg = Registers[4][FPRIdx++]; RC = &AArch64::FPR64RegClass; break;
- }
-
- // Skip unused arguments.
- if (Arg.use_empty()) {
- UpdateValueMap(&Arg, 0);
- continue;
- }
+ const TargetRegisterClass *RC;
+ if (VT >= MVT::i1 && VT <= MVT::i32) {
+ SrcReg = Registers[0][GPRIdx++];
+ RC = &AArch64::GPR32RegClass;
+ VT = MVT::i32;
+ } else if (VT == MVT::i64) {
+ SrcReg = Registers[1][GPRIdx++];
+ RC = &AArch64::GPR64RegClass;
+ } else if (VT == MVT::f16) {
+ SrcReg = Registers[2][FPRIdx++];
+ RC = &AArch64::FPR16RegClass;
+ } else if (VT == MVT::f32) {
+ SrcReg = Registers[3][FPRIdx++];
+ RC = &AArch64::FPR32RegClass;
+ } else if ((VT == MVT::f64) || VT.is64BitVector()) {
+ SrcReg = Registers[4][FPRIdx++];
+ RC = &AArch64::FPR64RegClass;
+ } else if (VT.is128BitVector()) {
+ SrcReg = Registers[5][FPRIdx++];
+ RC = &AArch64::FPR128RegClass;
+ } else
+ llvm_unreachable("Unexpected value type.");
unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
- UpdateValueMap(&Arg, ResultReg);
+ updateValueMap(&Arg, ResultReg);
}
return true;
}
-bool AArch64FastISel::ProcessCallArgs(CallLoweringInfo &CLI,
+bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
SmallVectorImpl<MVT> &OutVTs,
unsigned &NumBytes) {
CallingConv::ID CC = CLI.CallConv;
case CCValAssign::SExt: {
MVT DestVT = VA.getLocVT();
MVT SrcVT = ArgVT;
- ArgReg = EmitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
+ ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
if (!ArgReg)
return false;
break;
case CCValAssign::ZExt: {
MVT DestVT = VA.getLocVT();
MVT SrcVT = ArgVT;
- ArgReg = EmitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
+ ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
if (!ArgReg)
return false;
break;
MachinePointerInfo::getStack(Addr.getOffset()),
MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
- if (!EmitStore(ArgVT, ArgReg, Addr, MMO))
+ if (!emitStore(ArgVT, ArgReg, Addr, MMO))
return false;
}
}
return true;
}
-bool AArch64FastISel::FinishCall(CallLoweringInfo &CLI, MVT RetVT,
+bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
unsigned NumBytes) {
CallingConv::ID CC = CLI.CallConv;
return true;
}
-bool AArch64FastISel::FastLowerCall(CallLoweringInfo &CLI) {
+bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
CallingConv::ID CC = CLI.CallConv;
bool IsTailCall = CLI.IsTailCall;
bool IsVarArg = CLI.IsVarArg;
const Value *Callee = CLI.Callee;
const char *SymName = CLI.SymName;
+ if (!Callee && !SymName)
+ return false;
+
// Allow SelectionDAG isel to handle tail calls.
if (IsTailCall)
return false;
}
Address Addr;
- if (!ComputeCallAddress(Callee, Addr))
+ if (Callee && !computeCallAddress(Callee, Addr))
return false;
// Handle the arguments now that we've gotten them.
unsigned NumBytes;
- if (!ProcessCallArgs(CLI, OutVTs, NumBytes))
+ if (!processCallArgs(CLI, OutVTs, NumBytes))
return false;
// Issue the call.
.addReg(ADRPReg)
.addExternalSymbol(SymName, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
AArch64II::MO_NC);
- } else if (Addr.getGlobalValue()) {
- CallReg = AArch64MaterializeGV(Addr.getGlobalValue());
- } else if (Addr.getReg())
+ } else if (Addr.getGlobalValue())
+ CallReg = materializeGV(Addr.getGlobalValue());
+ else if (Addr.getReg())
CallReg = Addr.getReg();
if (!CallReg)
CLI.Call = MIB;
// Finish off the call including any return values.
- return FinishCall(CLI, RetVT, NumBytes);
+ return finishCall(CLI, RetVT, NumBytes);
}
-bool AArch64FastISel::IsMemCpySmall(uint64_t Len, unsigned Alignment) {
+bool AArch64FastISel::isMemCpySmall(uint64_t Len, unsigned Alignment) {
if (Alignment)
return Len / Alignment <= 4;
else
return Len < 32;
}
-bool AArch64FastISel::TryEmitSmallMemCpy(Address Dest, Address Src,
+bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len, unsigned Alignment) {
// Make sure we don't bloat code by inlining very large memcpy's.
- if (!IsMemCpySmall(Len, Alignment))
+ if (!isMemCpySmall(Len, Alignment))
return false;
int64_t UnscaledOffset = 0;
bool RV;
unsigned ResultReg;
- RV = EmitLoad(VT, ResultReg, Src);
+ RV = emitLoad(VT, ResultReg, Src);
if (!RV)
return false;
- RV = EmitStore(VT, ResultReg, Dest);
+ RV = emitStore(VT, ResultReg, Dest);
if (!RV)
return false;
}
// Check if both instructions are in the same basic block.
- if (II->getParent() != I->getParent())
+ if (!isValueAvailable(II))
return false;
// Make sure nothing is in the way
return true;
}
-bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
+bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// FIXME: Handle more intrinsics.
switch (II->getIntrinsicID()) {
default: return false;
unsigned DestReg;
unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
while (Depth--) {
- DestReg = FastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
+ DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
SrcReg, /*IsKill=*/true, 0);
assert(DestReg && "Unexpected LDR instruction emission failure.");
SrcReg = DestReg;
}
- UpdateValueMap(II, SrcReg);
+ updateValueMap(II, SrcReg);
return true;
}
case Intrinsic::memcpy:
// if possible.
uint64_t Len = cast<ConstantInt>(MTI->getLength())->getZExtValue();
unsigned Alignment = MTI->getAlignment();
- if (IsMemCpySmall(Len, Alignment)) {
+ if (isMemCpySmall(Len, Alignment)) {
Address Dest, Src;
- if (!ComputeAddress(MTI->getRawDest(), Dest) ||
- !ComputeAddress(MTI->getRawSource(), Src))
+ if (!computeAddress(MTI->getRawDest(), Dest) ||
+ !computeAddress(MTI->getRawSource(), Src))
return false;
- if (TryEmitSmallMemCpy(Dest, Src, Len, Alignment))
+ if (tryEmitSmallMemCpy(Dest, Src, Len, Alignment))
return true;
}
}
return false;
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
- return LowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
+ return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
// address spaces.
return false;
- return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
+ return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
+ }
+ case Intrinsic::sin:
+ case Intrinsic::cos:
+ case Intrinsic::pow: {
+ MVT RetVT;
+ if (!isTypeLegal(II->getType(), RetVT))
+ return false;
+
+ if (RetVT != MVT::f32 && RetVT != MVT::f64)
+ return false;
+
+ static const RTLIB::Libcall LibCallTable[3][2] = {
+ { RTLIB::SIN_F32, RTLIB::SIN_F64 },
+ { RTLIB::COS_F32, RTLIB::COS_F64 },
+ { RTLIB::POW_F32, RTLIB::POW_F64 }
+ };
+ RTLIB::Libcall LC;
+ bool Is64Bit = RetVT == MVT::f64;
+ switch (II->getIntrinsicID()) {
+ default:
+ llvm_unreachable("Unexpected intrinsic.");
+ case Intrinsic::sin:
+ LC = LibCallTable[0][Is64Bit];
+ break;
+ case Intrinsic::cos:
+ LC = LibCallTable[1][Is64Bit];
+ break;
+ case Intrinsic::pow:
+ LC = LibCallTable[2][Is64Bit];
+ break;
+ }
+
+ ArgListTy Args;
+ Args.reserve(II->getNumArgOperands());
+
+ // Populate the argument list.
+ for (auto &Arg : II->arg_operands()) {
+ ArgListEntry Entry;
+ Entry.Val = Arg;
+ Entry.Ty = Arg->getType();
+ Args.push_back(Entry);
+ }
+
+ CallLoweringInfo CLI;
+ CLI.setCallee(TLI.getLibcallCallingConv(LC), II->getType(),
+ TLI.getLibcallName(LC), std::move(Args));
+ if (!lowerCallTo(CLI))
+ return false;
+ updateValueMap(II, CLI.ResultReg);
+ return true;
}
case Intrinsic::trap: {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK))
return false;
bool Op0IsKill = hasTrivialKill(II->getOperand(0));
- unsigned ResultReg = FastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
+ unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
if (!ResultReg)
return false;
- UpdateValueMap(II, ResultReg);
+ updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::sadd_with_overflow:
switch (II->getIntrinsicID()) {
default: llvm_unreachable("Unexpected intrinsic!");
case Intrinsic::sadd_with_overflow:
- ResultReg1 = emitAdds(VT, LHS, RHS); CC = AArch64CC::VS; break;
+ ResultReg1 = emitAdd(VT, LHS, RHS, /*SetFlags=*/true);
+ CC = AArch64CC::VS;
+ break;
case Intrinsic::uadd_with_overflow:
- ResultReg1 = emitAdds(VT, LHS, RHS); CC = AArch64CC::HS; break;
+ ResultReg1 = emitAdd(VT, LHS, RHS, /*SetFlags=*/true);
+ CC = AArch64CC::HS;
+ break;
case Intrinsic::ssub_with_overflow:
- ResultReg1 = emitSubs(VT, LHS, RHS); CC = AArch64CC::VS; break;
+ ResultReg1 = emitSub(VT, LHS, RHS, /*SetFlags=*/true);
+ CC = AArch64CC::VS;
+ break;
case Intrinsic::usub_with_overflow:
- ResultReg1 = emitSubs(VT, LHS, RHS); CC = AArch64CC::LO; break;
+ ResultReg1 = emitSub(VT, LHS, RHS, /*SetFlags=*/true);
+ CC = AArch64CC::LO;
+ break;
case Intrinsic::smul_with_overflow: {
CC = AArch64CC::NE;
unsigned LHSReg = getRegForValue(LHS);
bool RHSIsKill = hasTrivialKill(RHS);
if (VT == MVT::i32) {
- MulReg = Emit_SMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
+ MulReg = emitSMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg,
/*IsKill=*/false, 32);
- MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
+ MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
AArch64::sub_32);
- ShiftReg = FastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
+ ShiftReg = fastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
AArch64::sub_32);
emitSubs_rs(VT, ShiftReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
AArch64_AM::ASR, 31, /*WantResult=*/false);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
- MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
- unsigned SMULHReg = FastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
+ MulReg = emitMul_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
+ unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rs(VT, SMULHReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
AArch64_AM::ASR, 63, /*WantResult=*/false);
bool RHSIsKill = hasTrivialKill(RHS);
if (VT == MVT::i32) {
- MulReg = Emit_UMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
+ MulReg = emitUMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg,
/*IsKill=*/false, AArch64_AM::LSR, 32,
/*WantResult=*/false);
- MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
+ MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
AArch64::sub_32);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
- MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
- unsigned UMULHReg = FastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
+ MulReg = emitMul_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
+ unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rr(VT, AArch64::XZR, /*IsKill=*/true, UMULHReg,
/*IsKill=*/false, /*WantResult=*/false);
TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg);
}
- ResultReg2 = FastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
+ ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
AArch64::WZR, /*IsKill=*/true, AArch64::WZR,
/*IsKill=*/true, getInvertedCondCode(CC));
assert((ResultReg1 + 1) == ResultReg2 &&
"Nonconsecutive result registers.");
- UpdateValueMap(II, ResultReg1, 2);
+ updateValueMap(II, ResultReg1, 2);
return true;
}
}
return false;
}
-bool AArch64FastISel::SelectRet(const Instruction *I) {
+bool AArch64FastISel::selectRet(const Instruction *I) {
const ReturnInst *Ret = cast<ReturnInst>(I);
const Function &F = *I->getParent()->getParent();
const Value *RV = Ret->getOperand(0);
// Don't bother handling odd stuff for now.
- if (VA.getLocInfo() != CCValAssign::Full)
+ if ((VA.getLocInfo() != CCValAssign::Full) &&
+ (VA.getLocInfo() != CCValAssign::BCvt))
return false;
+
// Only handle register returns for now.
if (!VA.isRegLoc())
return false;
+
unsigned Reg = getRegForValue(RV);
if (Reg == 0)
return false;
return false;
// Vectors (of > 1 lane) in big endian need tricky handling.
- if (RVEVT.isVector() && RVEVT.getVectorNumElements() > 1)
+ if (RVEVT.isVector() && RVEVT.getVectorNumElements() > 1 &&
+ !Subtarget->isLittleEndian())
return false;
MVT RVVT = RVEVT.getSimpleVT();
if (RVVT == MVT::f128)
return false;
+
MVT DestVT = VA.getValVT();
// Special handling for extended integers.
if (RVVT != DestVT) {
if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
return false;
- bool isZExt = Outs[0].Flags.isZExt();
- SrcReg = EmitIntExt(RVVT, SrcReg, DestVT, isZExt);
+ bool IsZExt = Outs[0].Flags.isZExt();
+ SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
if (SrcReg == 0)
return false;
}
return true;
}
-bool AArch64FastISel::SelectTrunc(const Instruction *I) {
+bool AArch64FastISel::selectTrunc(const Instruction *I) {
Type *DestTy = I->getType();
Value *Op = I->getOperand(0);
Type *SrcTy = Op->getType();
break;
}
// Issue an extract_subreg to get the lower 32-bits.
- unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
+ unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
AArch64::sub_32);
// Create the AND instruction which performs the actual truncation.
- ResultReg = emitAND_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask);
+ ResultReg = emitAnd_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask);
assert(ResultReg && "Unexpected AND instruction emission failure.");
} else {
ResultReg = createResultReg(&AArch64::GPR32RegClass);
.addReg(SrcReg, getKillRegState(SrcIsKill));
}
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) {
+unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) {
assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 ||
DestVT == MVT::i64) &&
"Unexpected value type.");
if (DestVT == MVT::i8 || DestVT == MVT::i16)
DestVT = MVT::i32;
- if (isZExt) {
- unsigned ResultReg = emitAND_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1);
+ if (IsZExt) {
+ unsigned ResultReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1);
assert(ResultReg && "Unexpected AND instruction emission failure.");
if (DestVT == MVT::i64) {
// We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
// FIXME: We're SExt i1 to i64.
return 0;
}
- return FastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
+ return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
/*TODO:IsKill=*/false, 0, 0);
}
}
-unsigned AArch64FastISel::Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
+unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
unsigned Opc, ZReg;
switch (RetVT.SimpleTy) {
const TargetRegisterClass *RC =
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- return FastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
+ return fastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
/*IsKill=*/ZReg, true);
}
-unsigned AArch64FastISel::Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
+unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
if (RetVT != MVT::i64)
return 0;
- return FastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
+ return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
Op0, Op0IsKill, Op1, Op1IsKill,
AArch64::XZR, /*IsKill=*/true);
}
-unsigned AArch64FastISel::Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
+unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
if (RetVT != MVT::i64)
return 0;
- return FastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
+ return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
Op0, Op0IsKill, Op1, Op1IsKill,
AArch64::XZR, /*IsKill=*/true);
}
const TargetRegisterClass *RC =
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
if (NeedTrunc) {
- Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
+ Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
- ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
+ ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
return ResultReg;
}
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
const TargetRegisterClass *RC =
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
if (NeedTrunc) {
- Op0Reg = emitAND_ri(MVT::i32, Op0Reg, Op0IsKill, Mask);
- Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
+ Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Op0IsKill, Mask);
+ Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op0IsKill = Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
- ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
+ ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
return ResultReg;
}
// 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
if (Shift >= SrcBits && IsZExt)
- return AArch64MaterializeInt(ConstantInt::get(*Context, APInt(RegSize, 0)),
- RetVT);
+ return materializeInt(ConstantInt::get(*Context, APInt(RegSize, 0)), RetVT);
// It is not possible to fold a sign-extend into the LShr instruction. In this
// case emit a sign-extend.
if (!IsZExt) {
- Op0 = EmitIntExt(SrcVT, Op0, RetVT, IsZExt);
+ Op0 = emitIntExt(SrcVT, Op0, RetVT, IsZExt);
if (!Op0)
return 0;
Op0IsKill = true;
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
const TargetRegisterClass *RC =
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
if (NeedTrunc) {
- Op0Reg = EmitIntExt(RetVT, Op0Reg, MVT::i32, /*IsZExt=*/false);
- Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
+ Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*IsZExt=*/false);
+ Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op0IsKill = Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
- ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
+ ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
return ResultReg;
}
// 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
if (Shift >= SrcBits && IsZExt)
- return AArch64MaterializeInt(ConstantInt::get(*Context, APInt(RegSize, 0)),
- RetVT);
+ return materializeInt(ConstantInt::get(*Context, APInt(RegSize, 0)), RetVT);
unsigned ImmR = std::min<unsigned>(SrcBits - 1, Shift);
unsigned ImmS = SrcBits - 1;
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
-unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
- bool isZExt) {
+unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
+ bool IsZExt) {
assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?");
// FastISel does not have plumbing to deal with extensions where the SrcVT or
default:
return 0;
case MVT::i1:
- return Emiti1Ext(SrcReg, DestVT, isZExt);
+ return emiti1Ext(SrcReg, DestVT, IsZExt);
case MVT::i8:
if (DestVT == MVT::i64)
- Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
+ Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
else
- Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
+ Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
Imm = 7;
break;
case MVT::i16:
if (DestVT == MVT::i64)
- Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
+ Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
else
- Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
+ Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
Imm = 15;
break;
case MVT::i32:
assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?");
- Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
+ Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
Imm = 31;
break;
}
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- return FastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
+ return fastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
}
-bool AArch64FastISel::SelectIntExt(const Instruction *I) {
+bool AArch64FastISel::selectIntExt(const Instruction *I) {
// On ARM, in general, integer casts don't involve legal types; this code
// handles promotable integers. The high bits for a type smaller than
// the register size are assumed to be undefined.
Value *Src = I->getOperand(0);
Type *SrcTy = Src->getType();
- bool isZExt = isa<ZExtInst>(I);
unsigned SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
MVT DestVT = DestEVT.getSimpleVT();
unsigned ResultReg = 0;
+ bool IsZExt = isa<ZExtInst>(I);
// Check if it is an argument and if it is already zero/sign-extended.
if (const auto *Arg = dyn_cast<Argument>(Src)) {
- if ((isZExt && Arg->hasZExtAttr()) || (!isZExt && Arg->hasSExtAttr())) {
+ if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) {
if (DestVT == MVT::i64) {
ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
}
if (!ResultReg)
- ResultReg = EmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
+ ResultReg = emitIntExt(SrcVT, SrcReg, DestVT, IsZExt);
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
+bool AArch64FastISel::selectRem(const Instruction *I, unsigned ISDOpcode) {
EVT DestEVT = TLI.getValueType(I->getType(), true);
if (!DestEVT.isSimple())
return false;
return false;
unsigned DivOpc;
- bool is64bit = (DestVT == MVT::i64);
+ bool Is64bit = (DestVT == MVT::i64);
switch (ISDOpcode) {
default:
return false;
case ISD::SREM:
- DivOpc = is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
+ DivOpc = Is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
break;
case ISD::UREM:
- DivOpc = is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
+ DivOpc = Is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
break;
}
- unsigned MSubOpc = is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
+ unsigned MSubOpc = Is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
unsigned Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned QuotReg = FastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
+ unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
Src1Reg, /*IsKill=*/false);
assert(QuotReg && "Unexpected DIV instruction emission failure.");
// The remainder is computed as numerator - (quotient * denominator) using the
// MSUB instruction.
- unsigned ResultReg = FastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
+ unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
Src1Reg, Src1IsKill, Src0Reg,
Src0IsKill);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectMul(const Instruction *I) {
+bool AArch64FastISel::selectMul(const Instruction *I) {
EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType(), true);
if (!SrcEVT.isSimple())
return false;
bool Src1IsKill = hasTrivialKill(I->getOperand(1));
unsigned ResultReg =
- Emit_MUL_rr(SrcVT, Src0Reg, Src0IsKill, Src1Reg, Src1IsKill);
+ emitMul_rr(SrcVT, Src0Reg, Src0IsKill, Src1Reg, Src1IsKill);
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectShift(const Instruction *I) {
+bool AArch64FastISel::selectShift(const Instruction *I) {
MVT RetVT;
- if (!isLoadStoreTypeLegal(I->getType(), RetVT))
+ if (!isTypeSupported(I->getType(), RetVT, /*IsVectorAllowed=*/true))
return false;
+ if (RetVT.isVector())
+ return selectOperator(I, I->getOpcode());
+
if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = 0;
uint64_t ShiftVal = C->getZExtValue();
const Value *Op0 = I->getOperand(0);
if (const auto *ZExt = dyn_cast<ZExtInst>(Op0)) {
MVT TmpVT;
- if (isValueAvailable(ZExt) &&
- isLoadStoreTypeLegal(ZExt->getSrcTy(), TmpVT)) {
+ if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), TmpVT)) {
SrcVT = TmpVT;
IsZExt = true;
Op0 = ZExt->getOperand(0);
}
} else if (const auto *SExt = dyn_cast<SExtInst>(Op0)) {
MVT TmpVT;
- if (isValueAvailable(SExt) &&
- isLoadStoreTypeLegal(SExt->getSrcTy(), TmpVT)) {
+ if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), TmpVT)) {
SrcVT = TmpVT;
IsZExt = false;
Op0 = SExt->getOperand(0);
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::SelectBitCast(const Instruction *I) {
+bool AArch64FastISel::selectBitCast(const Instruction *I) {
MVT RetVT, SrcVT;
if (!isTypeLegal(I->getOperand(0)->getType(), SrcVT))
if (!Op0Reg)
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
- unsigned ResultReg = FastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
+ unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::selectFRem(const Instruction *I) {
+ MVT RetVT;
+ if (!isTypeLegal(I->getType(), RetVT))
+ return false;
+
+ RTLIB::Libcall LC;
+ switch (RetVT.SimpleTy) {
+ default:
+ return false;
+ case MVT::f32:
+ LC = RTLIB::REM_F32;
+ break;
+ case MVT::f64:
+ LC = RTLIB::REM_F64;
+ break;
+ }
+
+ ArgListTy Args;
+ Args.reserve(I->getNumOperands());
+
+ // Populate the argument list.
+ for (auto &Arg : I->operands()) {
+ ArgListEntry Entry;
+ Entry.Val = Arg;
+ Entry.Ty = Arg->getType();
+ Args.push_back(Entry);
+ }
+
+ CallLoweringInfo CLI;
+ CLI.setCallee(TLI.getLibcallCallingConv(LC), I->getType(),
+ TLI.getLibcallName(LC), std::move(Args));
+ if (!lowerCallTo(CLI))
+ return false;
+ updateValueMap(I, CLI.ResultReg);
return true;
}
-bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) {
+bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
default:
break;
- case Instruction::Load:
- return SelectLoad(I);
- case Instruction::Store:
- return SelectStore(I);
+ case Instruction::Add:
+ case Instruction::Sub:
+ return selectAddSub(I);
+ case Instruction::Mul:
+ if (!selectBinaryOp(I, ISD::MUL))
+ return selectMul(I);
+ return true;
+ case Instruction::SRem:
+ if (!selectBinaryOp(I, ISD::SREM))
+ return selectRem(I, ISD::SREM);
+ return true;
+ case Instruction::URem:
+ if (!selectBinaryOp(I, ISD::UREM))
+ return selectRem(I, ISD::UREM);
+ return true;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ return selectShift(I);
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return selectLogicalOp(I);
case Instruction::Br:
- return SelectBranch(I);
+ return selectBranch(I);
case Instruction::IndirectBr:
- return SelectIndirectBr(I);
- case Instruction::FCmp:
- case Instruction::ICmp:
- return SelectCmp(I);
- case Instruction::Select:
- return SelectSelect(I);
- case Instruction::FPExt:
- return SelectFPExt(I);
- case Instruction::FPTrunc:
- return SelectFPTrunc(I);
+ return selectIndirectBr(I);
+ case Instruction::BitCast:
+ if (!FastISel::selectBitCast(I))
+ return selectBitCast(I);
+ return true;
case Instruction::FPToSI:
- return SelectFPToInt(I, /*Signed=*/true);
+ if (!selectCast(I, ISD::FP_TO_SINT))
+ return selectFPToInt(I, /*Signed=*/true);
+ return true;
case Instruction::FPToUI:
- return SelectFPToInt(I, /*Signed=*/false);
+ return selectFPToInt(I, /*Signed=*/false);
+ case Instruction::ZExt:
+ if (!selectCast(I, ISD::ZERO_EXTEND))
+ return selectIntExt(I);
+ return true;
+ case Instruction::SExt:
+ if (!selectCast(I, ISD::SIGN_EXTEND))
+ return selectIntExt(I);
+ return true;
+ case Instruction::Trunc:
+ if (!selectCast(I, ISD::TRUNCATE))
+ return selectTrunc(I);
+ return true;
+ case Instruction::FPExt:
+ return selectFPExt(I);
+ case Instruction::FPTrunc:
+ return selectFPTrunc(I);
case Instruction::SIToFP:
- return SelectIntToFP(I, /*Signed=*/true);
+ if (!selectCast(I, ISD::SINT_TO_FP))
+ return selectIntToFP(I, /*Signed=*/true);
+ return true;
case Instruction::UIToFP:
- return SelectIntToFP(I, /*Signed=*/false);
- case Instruction::SRem:
- return SelectRem(I, ISD::SREM);
- case Instruction::URem:
- return SelectRem(I, ISD::UREM);
+ return selectIntToFP(I, /*Signed=*/false);
+ case Instruction::Load:
+ return selectLoad(I);
+ case Instruction::Store:
+ return selectStore(I);
+ case Instruction::FCmp:
+ case Instruction::ICmp:
+ return selectCmp(I);
+ case Instruction::Select:
+ return selectSelect(I);
case Instruction::Ret:
- return SelectRet(I);
- case Instruction::Trunc:
- return SelectTrunc(I);
- case Instruction::ZExt:
- case Instruction::SExt:
- return SelectIntExt(I);
-
- // FIXME: All of these should really be handled by the target-independent
- // selector -> improve FastISel tblgen.
- case Instruction::Mul:
- return SelectMul(I);
- case Instruction::Shl: // fall-through
- case Instruction::LShr: // fall-through
- case Instruction::AShr:
- return SelectShift(I);
- case Instruction::BitCast:
- return SelectBitCast(I);
+ return selectRet(I);
+ case Instruction::FRem:
+ return selectFRem(I);
}
- return false;
+
+ // fall-back to target-independent instruction selection.
+ return selectOperator(I, I->getOpcode());
// Silence warnings.
(void)&CC_AArch64_DarwinPCS_VarArg;
}
namespace llvm {
-llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo) {
- return new AArch64FastISel(funcInfo, libInfo);
+llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) {
+ return new AArch64FastISel(FuncInfo, LibInfo);
}
}