X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FAArch64%2FAArch64FastISel.cpp;h=d942ace427bd17d050b6fa947a151f6ac899f9be;hp=b074be9419cc3be0def5f7014b0acb633b2dbe3d;hb=d7a4f74f15f8cff2b2b38a33e6027209ea3796f3;hpb=569c5b62afbfc9d43164f40449d0c6c42fc1836f diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index b074be9419c..d942ace427b 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -14,6 +14,7 @@ //===----------------------------------------------------------------------===// #include "AArch64.h" +#include "AArch64CallingConvention.h" #include "AArch64Subtarget.h" #include "AArch64TargetMachine.h" #include "MCTargetDesc/AArch64AddressingModes.h" @@ -78,11 +79,9 @@ class AArch64FastISel final : public FastISel { return Base.Reg; } void setOffsetReg(unsigned Reg) { - assert(isRegBase() && "Invalid offset register access!"); OffsetReg = Reg; } unsigned getOffsetReg() const { - assert(isRegBase() && "Invalid offset register access!"); return OffsetReg; } void setFI(unsigned FI) { @@ -152,6 +151,8 @@ private: bool foldXALUIntrinsic(AArch64CC::CondCode &CC, const Instruction *I, const Value *Cond); bool optimizeIntExtLoad(const Instruction *I, MVT RetVT, MVT SrcVT); + bool optimizeSelect(const SelectInst *SI); + std::pair getRegForGEPIndex(const Value *Idx); // Emit helper routines. unsigned emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, @@ -189,6 +190,7 @@ private: unsigned emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, bool SetFlags = false, bool WantResult = true, bool IsZExt = false); + unsigned emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, int64_t Imm); unsigned emitSub(MVT RetVT, const Value *LHS, const Value *RHS, bool SetFlags = false, bool WantResult = true, bool IsZExt = false); @@ -809,22 +811,23 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty) } } // end switch - if (Addr.getReg()) { - if (!Addr.getOffsetReg()) { - unsigned Reg = getRegForValue(Obj); - if (!Reg) - return false; - Addr.setOffsetReg(Reg); - return true; - } - return false; + if (Addr.isRegBase() && !Addr.getReg()) { + unsigned Reg = getRegForValue(Obj); + if (!Reg) + return false; + Addr.setReg(Reg); + return true; } - unsigned Reg = getRegForValue(Obj); - if (!Reg) - return false; - Addr.setReg(Reg); - return true; + if (!Addr.getOffsetReg()) { + unsigned Reg = getRegForValue(Obj); + if (!Reg) + return false; + Addr.setOffsetReg(Reg); + return true; + } + + return false; } bool AArch64FastISel::computeCallAddress(const Value *V, Address &Addr) { @@ -941,8 +944,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { // Cannot encode an offset register and an immediate offset in the same // instruction. Fold the immediate offset into the load/store instruction and // emit an additonal add to take care of the offset register. - if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.isRegBase() && - Addr.getOffsetReg()) + if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.getOffsetReg()) RegisterOffsetNeedsLowering = true; // Cannot encode zero register as base. @@ -952,7 +954,8 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { // If this is a stack pointer and the offset needs to be simplified then put // the alloca address into a register, set the base type back to register and // continue. This should almost never happen. - if (ImmediateOffsetNeedsLowering && Addr.isFIBase()) { + if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase()) + { unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), ResultReg) @@ -1003,20 +1006,10 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { // reg+offset into a register. if (ImmediateOffsetNeedsLowering) { unsigned ResultReg; - if (Addr.getReg()) { + if (Addr.getReg()) // Try to fold the immediate into the add instruction. - if (Offset < 0) - ResultReg = emitAddSub_ri(/*UseAdd=*/false, MVT::i64, Addr.getReg(), - /*IsKill=*/false, -Offset); - else - ResultReg = emitAddSub_ri(/*UseAdd=*/true, MVT::i64, Addr.getReg(), - /*IsKill=*/false, Offset); - if (!ResultReg) { - unsigned ImmReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset); - ResultReg = emitAddSub_rr(/*UseAdd=*/true, MVT::i64, Addr.getReg(), - /*IsKill=*/false, ImmReg, /*IsKill=*/true); - } - } else + ResultReg = emitAdd_ri_(MVT::i64, Addr.getReg(), /*IsKill=*/false, Offset); + else ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset); if (!ResultReg) @@ -1059,10 +1052,8 @@ void AArch64FastISel::addLoadStoreOperands(Address &Addr, MIB.addReg(Addr.getOffsetReg()); MIB.addImm(IsSigned); MIB.addImm(Addr.getShift() != 0); - } else { - MIB.addReg(Addr.getReg()); - MIB.addImm(Offset); - } + } else + MIB.addReg(Addr.getReg()).addImm(Offset); } if (MMO) @@ -1096,7 +1087,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, RetVT.SimpleTy = std::max(RetVT.SimpleTy, MVT::i32); // Canonicalize immediates to the RHS first. - if (UseAdd && isa(LHS) && !isa(RHS)) + if (UseAdd && isa(LHS) && !isa(RHS)) std::swap(LHS, RHS); // Canonicalize mul by power of 2 to the RHS. @@ -1130,7 +1121,11 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, else ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, LHSIsKill, Imm, SetFlags, WantResult); - } + } else if (const auto *C = dyn_cast(RHS)) + if (C->isNullValue()) + ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, LHSIsKill, 0, SetFlags, + WantResult); + if (ResultReg) return ResultReg; @@ -1443,6 +1438,30 @@ unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, IsZExt); } +/// \brief This method is a wrapper to simplify add emission. +/// +/// First try to emit an add with an immediate operand using emitAddSub_ri. If +/// that fails, then try to materialize the immediate into a register and use +/// emitAddSub_rr instead. +unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, + int64_t Imm) { + unsigned ResultReg; + if (Imm < 0) + ResultReg = emitAddSub_ri(false, VT, Op0, Op0IsKill, -Imm); + else + ResultReg = emitAddSub_ri(true, VT, Op0, Op0IsKill, Imm); + + if (ResultReg) + return ResultReg; + + unsigned CReg = fastEmit_i(VT, VT, ISD::Constant, Imm); + if (!CReg) + return 0; + + ResultReg = emitAddSub_rr(true, VT, Op0, Op0IsKill, CReg, true); + return ResultReg; +} + unsigned AArch64FastISel::emitSub(MVT RetVT, const Value *LHS, const Value *RHS, bool SetFlags, bool WantResult, bool IsZExt) { return emitAddSub(/*UseAdd=*/false, RetVT, LHS, RHS, SetFlags, WantResult, @@ -2075,12 +2094,12 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { const Value *LHS = CI->getOperand(0); const Value *RHS = CI->getOperand(1); - Type *Ty = LHS->getType(); - if (!Ty->isIntegerTy()) - return false; + MVT VT; + if (!isTypeSupported(LHS->getType(), VT)) + return false; - unsigned BW = cast(Ty)->getBitWidth(); - if (BW != 1 && BW != 8 && BW != 16 && BW != 32 && BW != 64) + unsigned BW = VT.getSizeInBits(); + if (BW > 64) return false; MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; @@ -2094,19 +2113,19 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { int TestBit = -1; bool IsCmpNE; - if ((Predicate == CmpInst::ICMP_EQ) || (Predicate == CmpInst::ICMP_NE)) { - if (const auto *C = dyn_cast(LHS)) - if (C->isNullValue()) - std::swap(LHS, RHS); - - if (!isa(RHS)) - return false; + switch (Predicate) { + default: + return false; + case CmpInst::ICMP_EQ: + case CmpInst::ICMP_NE: + if (isa(LHS) && cast(LHS)->isNullValue()) + std::swap(LHS, RHS); - if (!cast(RHS)->isNullValue()) + if (!isa(RHS) || !cast(RHS)->isNullValue()) return false; if (const auto *AI = dyn_cast(LHS)) - if (AI->getOpcode() == Instruction::And) { + if (AI->getOpcode() == Instruction::And && isValueAvailable(AI)) { const Value *AndLHS = AI->getOperand(0); const Value *AndRHS = AI->getOperand(1); @@ -2120,27 +2139,32 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { LHS = AndLHS; } } - IsCmpNE = Predicate == CmpInst::ICMP_NE; - } else if (Predicate == CmpInst::ICMP_SLT) { - if (!isa(RHS)) - return false; - if (!cast(RHS)->isNullValue()) + if (VT == MVT::i1) + TestBit = 0; + + IsCmpNE = Predicate == CmpInst::ICMP_NE; + break; + case CmpInst::ICMP_SLT: + case CmpInst::ICMP_SGE: + if (!isa(RHS) || !cast(RHS)->isNullValue()) return false; TestBit = BW - 1; - IsCmpNE = true; - } else if (Predicate == CmpInst::ICMP_SGT) { + IsCmpNE = Predicate == CmpInst::ICMP_SLT; + break; + case CmpInst::ICMP_SGT: + case CmpInst::ICMP_SLE: if (!isa(RHS)) return false; - if (cast(RHS)->getValue() != -1) + if (cast(RHS)->getValue() != APInt(BW, -1, true)) return false; TestBit = BW - 1; - IsCmpNE = false; - } else - return false; + IsCmpNE = Predicate == CmpInst::ICMP_SLE; + break; + } // end switch static const unsigned OpcTable[2][2][2] = { { {AArch64::CBZW, AArch64::CBZX }, @@ -2153,7 +2177,7 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { bool Is64Bit = BW == 64; if (TestBit < 32 && TestBit >= 0) Is64Bit = false; - + unsigned Opc = OpcTable[IsBitTest][IsCmpNE][Is64Bit]; const MCInstrDesc &II = TII.get(Opc); @@ -2166,6 +2190,9 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill, AArch64::sub_32); + if ((BW < 32) && !IsBitTest) + SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*IsZExt=*/true); + // Emit the combined compare and branch instruction. SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs()); MachineInstrBuilder MIB = @@ -2473,60 +2500,186 @@ bool AArch64FastISel::selectCmp(const Instruction *I) { return true; } -bool AArch64FastISel::selectSelect(const Instruction *I) { - const SelectInst *SI = cast(I); +/// \brief Optimize selects of i1 if one of the operands has a 'true' or 'false' +/// value. +bool AArch64FastISel::optimizeSelect(const SelectInst *SI) { + if (!SI->getType()->isIntegerTy(1)) + return false; - EVT DestEVT = TLI.getValueType(SI->getType(), true); - if (!DestEVT.isSimple()) + const Value *Src1Val, *Src2Val; + unsigned Opc = 0; + bool NeedExtraOp = false; + if (auto *CI = dyn_cast(SI->getTrueValue())) { + if (CI->isOne()) { + Src1Val = SI->getCondition(); + Src2Val = SI->getFalseValue(); + Opc = AArch64::ORRWrr; + } else { + assert(CI->isZero()); + Src1Val = SI->getFalseValue(); + Src2Val = SI->getCondition(); + Opc = AArch64::BICWrr; + } + } else if (auto *CI = dyn_cast(SI->getFalseValue())) { + if (CI->isOne()) { + Src1Val = SI->getCondition(); + Src2Val = SI->getTrueValue(); + Opc = AArch64::ORRWrr; + NeedExtraOp = true; + } else { + assert(CI->isZero()); + Src1Val = SI->getCondition(); + Src2Val = SI->getTrueValue(); + Opc = AArch64::ANDWrr; + } + } + + if (!Opc) return false; - MVT DestVT = DestEVT.getSimpleVT(); - if (DestVT != MVT::i32 && DestVT != MVT::i64 && DestVT != MVT::f32 && - DestVT != MVT::f64) + unsigned Src1Reg = getRegForValue(Src1Val); + if (!Src1Reg) return false; + bool Src1IsKill = hasTrivialKill(Src1Val); - unsigned SelectOpc; - const TargetRegisterClass *RC = nullptr; - switch (DestVT.SimpleTy) { - default: return false; + unsigned Src2Reg = getRegForValue(Src2Val); + if (!Src2Reg) + return false; + bool Src2IsKill = hasTrivialKill(Src2Val); + + if (NeedExtraOp) { + Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, Src1IsKill, 1); + Src1IsKill = true; + } + unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32spRegClass, Src1Reg, + Src1IsKill, Src2Reg, Src2IsKill); + updateValueMap(SI, ResultReg); + return true; +} + +bool AArch64FastISel::selectSelect(const Instruction *I) { + assert(isa(I) && "Expected a select instruction."); + MVT VT; + if (!isTypeSupported(I->getType(), VT)) + return false; + + unsigned Opc; + const TargetRegisterClass *RC; + switch (VT.SimpleTy) { + default: + return false; + case MVT::i1: + case MVT::i8: + case MVT::i16: case MVT::i32: - SelectOpc = AArch64::CSELWr; RC = &AArch64::GPR32RegClass; break; + Opc = AArch64::CSELWr; + RC = &AArch64::GPR32RegClass; + break; case MVT::i64: - SelectOpc = AArch64::CSELXr; RC = &AArch64::GPR64RegClass; break; + Opc = AArch64::CSELXr; + RC = &AArch64::GPR64RegClass; + break; case MVT::f32: - SelectOpc = AArch64::FCSELSrrr; RC = &AArch64::FPR32RegClass; break; + Opc = AArch64::FCSELSrrr; + RC = &AArch64::FPR32RegClass; + break; case MVT::f64: - SelectOpc = AArch64::FCSELDrrr; RC = &AArch64::FPR64RegClass; break; + Opc = AArch64::FCSELDrrr; + RC = &AArch64::FPR64RegClass; + break; } + const SelectInst *SI = cast(I); const Value *Cond = SI->getCondition(); - bool NeedTest = true; AArch64CC::CondCode CC = AArch64CC::NE; - if (foldXALUIntrinsic(CC, I, Cond)) - NeedTest = false; + AArch64CC::CondCode ExtraCC = AArch64CC::AL; - unsigned CondReg = getRegForValue(Cond); - if (!CondReg) - return false; - bool CondIsKill = hasTrivialKill(Cond); + if (optimizeSelect(SI)) + return true; - if (NeedTest) { - unsigned ANDReg = emitAnd_ri(MVT::i32, CondReg, CondIsKill, 1); - assert(ANDReg && "Unexpected AND instruction emission failure."); - emitICmp_ri(MVT::i32, ANDReg, /*IsKill=*/true, 0); + // Try to pickup the flags, so we don't have to emit another compare. + if (foldXALUIntrinsic(CC, I, Cond)) { + // Fake request the condition to force emission of the XALU intrinsic. + unsigned CondReg = getRegForValue(Cond); + if (!CondReg) + return false; + } else if (isa(Cond) && cast(Cond)->hasOneUse() && + isValueAvailable(Cond)) { + const auto *Cmp = cast(Cond); + // Try to optimize or fold the cmp. + CmpInst::Predicate Predicate = optimizeCmpPredicate(Cmp); + const Value *FoldSelect = nullptr; + switch (Predicate) { + default: + break; + case CmpInst::FCMP_FALSE: + FoldSelect = SI->getFalseValue(); + break; + case CmpInst::FCMP_TRUE: + FoldSelect = SI->getTrueValue(); + break; + } + + if (FoldSelect) { + unsigned SrcReg = getRegForValue(FoldSelect); + if (!SrcReg) + return false; + unsigned UseReg = lookUpRegForValue(SI); + if (UseReg) + MRI.clearKillFlags(UseReg); + + updateValueMap(I, SrcReg); + return true; + } + + // Emit the cmp. + if (!emitCmp(Cmp->getOperand(0), Cmp->getOperand(1), Cmp->isUnsigned())) + return false; + + // FCMP_UEQ and FCMP_ONE cannot be checked with a single select instruction. + CC = getCompareCC(Predicate); + switch (Predicate) { + default: + break; + case CmpInst::FCMP_UEQ: + ExtraCC = AArch64CC::EQ; + CC = AArch64CC::VS; + break; + case CmpInst::FCMP_ONE: + ExtraCC = AArch64CC::MI; + CC = AArch64CC::GT; + break; + } + assert((CC != AArch64CC::AL) && "Unexpected condition code."); + } else { + unsigned CondReg = getRegForValue(Cond); + if (!CondReg) + return false; + bool CondIsKill = hasTrivialKill(Cond); + + // Emit a TST instruction (ANDS wzr, reg, #imm). + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDSWri), + AArch64::WZR) + .addReg(CondReg, getKillRegState(CondIsKill)) + .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); } - unsigned TrueReg = getRegForValue(SI->getTrueValue()); - bool TrueIsKill = hasTrivialKill(SI->getTrueValue()); + unsigned Src1Reg = getRegForValue(SI->getTrueValue()); + bool Src1IsKill = hasTrivialKill(SI->getTrueValue()); - unsigned FalseReg = getRegForValue(SI->getFalseValue()); - bool FalseIsKill = hasTrivialKill(SI->getFalseValue()); + unsigned Src2Reg = getRegForValue(SI->getFalseValue()); + bool Src2IsKill = hasTrivialKill(SI->getFalseValue()); - if (!TrueReg || !FalseReg) + if (!Src1Reg || !Src2Reg) return false; - unsigned ResultReg = fastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill, - FalseReg, FalseIsKill, CC); + if (ExtraCC != AArch64CC::AL) { + Src2Reg = fastEmitInst_rri(Opc, RC, Src1Reg, Src1IsKill, Src2Reg, + Src2IsKill, ExtraCC); + Src2IsKill = true; + } + unsigned ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src1IsKill, Src2Reg, + Src2IsKill, CC); updateValueMap(I, ResultReg); return true; } @@ -3276,6 +3429,32 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { updateValueMap(II, CLI.ResultReg); return true; } + case Intrinsic::fabs: { + MVT VT; + if (!isTypeLegal(II->getType(), VT)) + return false; + + unsigned Opc; + switch (VT.SimpleTy) { + default: + return false; + case MVT::f32: + Opc = AArch64::FABSSr; + break; + case MVT::f64: + Opc = AArch64::FABSDr; + break; + } + unsigned SrcReg = getRegForValue(II->getOperand(0)); + if (!SrcReg) + return false; + bool SrcRegIsKill = hasTrivialKill(II->getOperand(0)); + unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + .addReg(SrcReg, getKillRegState(SrcRegIsKill)); + updateValueMap(II, ResultReg); + return true; + } case Intrinsic::trap: { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK)) .addImm(1); @@ -3709,7 +3888,7 @@ unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, bool Op0IsKill, uint64_t Shift, - bool IsZext) { + bool IsZExt) { assert(RetVT.SimpleTy >= SrcVT.SimpleTy && "Unexpected source/return type pair."); assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || @@ -3722,6 +3901,20 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned RegSize = Is64Bit ? 64 : 32; unsigned DstBits = RetVT.getSizeInBits(); unsigned SrcBits = SrcVT.getSizeInBits(); + const TargetRegisterClass *RC = + Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; + + // Just emit a copy for "zero" shifts. + if (Shift == 0) { + if (RetVT == SrcVT) { + unsigned ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)); + return ResultReg; + } else + return emitIntExt(SrcVT, Op0, RetVT, IsZExt); + } // Don't deal with undefined shifts. if (Shift >= DstBits) @@ -3759,9 +3952,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, {AArch64::SBFMWri, AArch64::SBFMXri}, {AArch64::UBFMWri, AArch64::UBFMXri} }; - unsigned Opc = OpcTable[IsZext][Is64Bit]; - const TargetRegisterClass *RC = - Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; + unsigned Opc = OpcTable[IsZExt][Is64Bit]; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { unsigned TmpReg = MRI.createVirtualRegister(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -3807,8 +3998,9 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, bool IsZExt) { assert(RetVT.SimpleTy >= SrcVT.SimpleTy && "Unexpected source/return type pair."); - assert((SrcVT == MVT::i8 || SrcVT == MVT::i16 || SrcVT == MVT::i32 || - SrcVT == MVT::i64) && "Unexpected source value type."); + assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || + SrcVT == MVT::i32 || SrcVT == MVT::i64) && + "Unexpected source value type."); assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || RetVT == MVT::i64) && "Unexpected return value type."); @@ -3816,6 +4008,20 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned RegSize = Is64Bit ? 64 : 32; unsigned DstBits = RetVT.getSizeInBits(); unsigned SrcBits = SrcVT.getSizeInBits(); + const TargetRegisterClass *RC = + Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; + + // Just emit a copy for "zero" shifts. + if (Shift == 0) { + if (RetVT == SrcVT) { + unsigned ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)); + return ResultReg; + } else + return emitIntExt(SrcVT, Op0, RetVT, IsZExt); + } // Don't deal with undefined shifts. if (Shift >= DstBits) @@ -3868,8 +4074,6 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, {AArch64::UBFMWri, AArch64::UBFMXri} }; unsigned Opc = OpcTable[IsZExt][Is64Bit]; - const TargetRegisterClass *RC = - Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { unsigned TmpReg = MRI.createVirtualRegister(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -3915,8 +4119,9 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, bool IsZExt) { assert(RetVT.SimpleTy >= SrcVT.SimpleTy && "Unexpected source/return type pair."); - assert((SrcVT == MVT::i8 || SrcVT == MVT::i16 || SrcVT == MVT::i32 || - SrcVT == MVT::i64) && "Unexpected source value type."); + assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || + SrcVT == MVT::i32 || SrcVT == MVT::i64) && + "Unexpected source value type."); assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || RetVT == MVT::i64) && "Unexpected return value type."); @@ -3924,6 +4129,20 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned RegSize = Is64Bit ? 64 : 32; unsigned DstBits = RetVT.getSizeInBits(); unsigned SrcBits = SrcVT.getSizeInBits(); + const TargetRegisterClass *RC = + Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; + + // Just emit a copy for "zero" shifts. + if (Shift == 0) { + if (RetVT == SrcVT) { + unsigned ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)); + return ResultReg; + } else + return emitIntExt(SrcVT, Op0, RetVT, IsZExt); + } // Don't deal with undefined shifts. if (Shift >= DstBits) @@ -3964,8 +4183,6 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, {AArch64::UBFMWri, AArch64::UBFMXri} }; unsigned Opc = OpcTable[IsZExt][Is64Bit]; - const TargetRegisterClass *RC = - Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { unsigned TmpReg = MRI.createVirtualRegister(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -4174,6 +4391,14 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) { .addImm(AArch64::sub_32); SrcReg = ResultReg; } + // Conservatively clear all kill flags from all uses, because we are + // replacing a sign-/zero-extend instruction at IR level with a nop at MI + // level. The result of the instruction at IR level might have been + // trivially dead, which is now not longer true. + unsigned UseReg = lookUpRegForValue(I); + if (UseReg) + MRI.clearKillFlags(UseReg); + updateValueMap(I, SrcReg); return true; } @@ -4500,9 +4725,8 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) { return true; } - unsigned Pow2MinusOne = (1 << Lg2) - 1; - unsigned AddReg = emitAddSub_ri(/*UseAdd=*/true, VT, Src0Reg, - /*IsKill=*/false, Pow2MinusOne); + int64_t Pow2MinusOne = (1ULL << Lg2) - 1; + unsigned AddReg = emitAdd_ri_(VT, Src0Reg, /*IsKill=*/false, Pow2MinusOne); if (!AddReg) return false; @@ -4542,6 +4766,32 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) { return true; } +/// This is mostly a copy of the existing FastISel getRegForGEPIndex code. We +/// have to duplicate it for AArch64, because otherwise we would fail during the +/// sign-extend emission. +std::pair AArch64FastISel::getRegForGEPIndex(const Value *Idx) { + unsigned IdxN = getRegForValue(Idx); + if (IdxN == 0) + // Unhandled operand. Halt "fast" selection and bail. + return std::pair(0, false); + + bool IdxNIsKill = hasTrivialKill(Idx); + + // If the index is smaller or larger than intptr_t, truncate or extend it. + MVT PtrVT = TLI.getPointerTy(); + EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); + if (IdxVT.bitsLT(PtrVT)) { + IdxN = emitIntExt(IdxVT.getSimpleVT(), IdxN, PtrVT, /*IsZExt=*/false); + IdxNIsKill = true; + } else if (IdxVT.bitsGT(PtrVT)) + llvm_unreachable("AArch64 FastISel doesn't support types larger than i64"); + return std::pair(IdxN, IdxNIsKill); +} + +/// This is mostly a copy of the existing FastISel GEP code, but we have to +/// duplicate it for AArch64, because otherwise we would bail out even for +/// simple cases. This is because the standard fastEmit functions don't cover +/// MUL at all and ADD is lowered very inefficientily. bool AArch64FastISel::selectGetElementPtr(const Instruction *I) { unsigned N = getRegForValue(I->getOperand(0)); if (!N) @@ -4573,15 +4823,9 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) { continue; } if (TotalOffs) { - N = emitAddSub_ri(/*UseAdd=*/true, VT, N, NIsKill, TotalOffs); - if (!N) { - unsigned C = fastEmit_i(VT, VT, ISD::Constant, TotalOffs); - if (!C) - return false; - N = emitAddSub_rr(/*UseAdd=*/true, VT, N, NIsKill, C, true); - if (!N) - return false; - } + N = emitAdd_ri_(VT, N, NIsKill, TotalOffs); + if (!N) + return false; NIsKill = true; TotalOffs = 0; } @@ -4609,17 +4853,10 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) { } } if (TotalOffs) { - N = emitAddSub_ri(/*UseAdd=*/true, VT, N, NIsKill, TotalOffs); - if (!N) { - unsigned C = fastEmit_i(VT, VT, ISD::Constant, TotalOffs); - if (!C) - return false; - N = emitAddSub_rr(/*UseAdd=*/true, VT, N, NIsKill, C, true); - if (!N) - return false; - } + N = emitAdd_ri_(VT, N, NIsKill, TotalOffs); + if (!N) + return false; } - updateValueMap(I, N); return true; }