return Base.Reg;
}
void setOffsetReg(unsigned Reg) {
- assert(isRegBase() && "Invalid offset register access!");
OffsetReg = Reg;
}
unsigned getOffsetReg() const {
- assert(isRegBase() && "Invalid offset register access!");
return OffsetReg;
}
void setFI(unsigned FI) {
bool selectBitCast(const Instruction *I);
bool selectFRem(const Instruction *I);
bool selectSDiv(const Instruction *I);
+ bool selectGetElementPtr(const Instruction *I);
// Utility helper routines.
bool isTypeLegal(Type *Ty, MVT &VT);
}
} // end switch
- if (Addr.getReg()) {
- if (!Addr.getOffsetReg()) {
- unsigned Reg = getRegForValue(Obj);
- if (!Reg)
- return false;
- Addr.setOffsetReg(Reg);
- return true;
- }
- return false;
+ if (Addr.isRegBase() && !Addr.getReg()) {
+ unsigned Reg = getRegForValue(Obj);
+ if (!Reg)
+ return false;
+ Addr.setReg(Reg);
+ return true;
}
- unsigned Reg = getRegForValue(Obj);
- if (!Reg)
- return false;
- Addr.setReg(Reg);
- return true;
+ if (!Addr.getOffsetReg()) {
+ unsigned Reg = getRegForValue(Obj);
+ if (!Reg)
+ return false;
+ Addr.setOffsetReg(Reg);
+ return true;
+ }
+
+ return false;
}
bool AArch64FastISel::computeCallAddress(const Value *V, Address &Addr) {
// Cannot encode an offset register and an immediate offset in the same
// instruction. Fold the immediate offset into the load/store instruction and
// emit an additonal add to take care of the offset register.
- if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.isRegBase() &&
- Addr.getOffsetReg())
+ if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.getOffsetReg())
RegisterOffsetNeedsLowering = true;
// Cannot encode zero register as base.
// If this is a stack pointer and the offset needs to be simplified then put
// the alloca address into a register, set the base type back to register and
// continue. This should almost never happen.
- if (ImmediateOffsetNeedsLowering && Addr.isFIBase()) {
+ if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase())
+ {
unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
ResultReg)
MIB.addReg(Addr.getOffsetReg());
MIB.addImm(IsSigned);
MIB.addImm(Addr.getShift() != 0);
- } else {
- MIB.addReg(Addr.getReg());
- MIB.addImm(Offset);
- }
+ } else
+ MIB.addReg(Addr.getReg()).addImm(Offset);
}
if (MMO)
const Value *LHS = CI->getOperand(0);
const Value *RHS = CI->getOperand(1);
- Type *Ty = LHS->getType();
- if (!Ty->isIntegerTy())
- return false;
+ MVT VT;
+ if (!isTypeSupported(LHS->getType(), VT))
+ return false;
- unsigned BW = cast<IntegerType>(Ty)->getBitWidth();
- if (BW != 1 && BW != 8 && BW != 16 && BW != 32 && BW != 64)
+ unsigned BW = VT.getSizeInBits();
+ if (BW > 64)
return false;
MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
int TestBit = -1;
bool IsCmpNE;
if ((Predicate == CmpInst::ICMP_EQ) || (Predicate == CmpInst::ICMP_NE)) {
- if (const auto *C = dyn_cast<ConstantInt>(LHS))
+ if (const auto *C = dyn_cast<Constant>(LHS))
if (C->isNullValue())
std::swap(LHS, RHS);
- if (!isa<ConstantInt>(RHS))
+ if (!isa<Constant>(RHS))
return false;
- if (!cast<ConstantInt>(RHS)->isNullValue())
+ if (!cast<Constant>(RHS)->isNullValue())
return false;
if (const auto *AI = dyn_cast<BinaryOperator>(LHS))
- if (AI->getOpcode() == Instruction::And) {
+ if (AI->getOpcode() == Instruction::And && isValueAvailable(AI)) {
const Value *AndLHS = AI->getOperand(0);
const Value *AndRHS = AI->getOperand(1);
LHS = AndLHS;
}
}
+
+ if (VT == MVT::i1)
+ TestBit = 0;
+
IsCmpNE = Predicate == CmpInst::ICMP_NE;
} else if (Predicate == CmpInst::ICMP_SLT) {
- if (!isa<ConstantInt>(RHS))
+ if (!isa<Constant>(RHS))
return false;
- if (!cast<ConstantInt>(RHS)->isNullValue())
+ if (!cast<Constant>(RHS)->isNullValue())
return false;
TestBit = BW - 1;
bool Is64Bit = BW == 64;
if (TestBit < 32 && TestBit >= 0)
Is64Bit = false;
-
+
unsigned Opc = OpcTable[IsBitTest][IsCmpNE][Is64Bit];
const MCInstrDesc &II = TII.get(Opc);
SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
AArch64::sub_32);
+ if ((BW < 32) && !IsBitTest)
+ SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*IsZExt=*/true);
+
// Emit the combined compare and branch instruction.
SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs());
MachineInstrBuilder MIB =
return true;
}
- unsigned Pow2MinusOne = (1 << Lg2) - 1;
- unsigned AddReg = emitAddSub_ri(/*UseAdd=*/true, VT, Src0Reg,
- /*IsKill=*/false, Pow2MinusOne);
+ int64_t Pow2MinusOne = (1ULL << Lg2) - 1;
+ unsigned AddReg = emitAdd_ri_(VT, Src0Reg, /*IsKill=*/false, Pow2MinusOne);
if (!AddReg)
return false;
return true;
}
+/// This is mostly a copy of the existing FastISel GEP code, but we have to
+/// duplicate it for AArch64, because otherwise we would bail out even for
+/// simple cases. This is because the standard fastEmit functions don't cover
+/// MUL at all and ADD is lowered very inefficientily.
+bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {
+ unsigned N = getRegForValue(I->getOperand(0));
+ if (!N)
+ return false;
+ bool NIsKill = hasTrivialKill(I->getOperand(0));
+
+ // Keep a running tab of the total offset to coalesce multiple N = N + Offset
+ // into a single N = N + TotalOffset.
+ uint64_t TotalOffs = 0;
+ Type *Ty = I->getOperand(0)->getType();
+ MVT VT = TLI.getPointerTy();
+ for (auto OI = std::next(I->op_begin()), E = I->op_end(); OI != E; ++OI) {
+ const Value *Idx = *OI;
+ if (auto *StTy = dyn_cast<StructType>(Ty)) {
+ unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
+ // N = N + Offset
+ if (Field)
+ TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
+ Ty = StTy->getElementType(Field);
+ } else {
+ Ty = cast<SequentialType>(Ty)->getElementType();
+ // If this is a constant subscript, handle it quickly.
+ if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
+ if (CI->isZero())
+ continue;
+ // N = N + Offset
+ TotalOffs +=
+ DL.getTypeAllocSize(Ty) * cast<ConstantInt>(CI)->getSExtValue();
+ continue;
+ }
+ if (TotalOffs) {
+ N = emitAdd_ri_(VT, N, NIsKill, TotalOffs);
+ if (!N)
+ return false;
+ NIsKill = true;
+ TotalOffs = 0;
+ }
+
+ // N = N + Idx * ElementSize;
+ uint64_t ElementSize = DL.getTypeAllocSize(Ty);
+ std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
+ unsigned IdxN = Pair.first;
+ bool IdxNIsKill = Pair.second;
+ if (!IdxN)
+ return false;
+
+ if (ElementSize != 1) {
+ unsigned C = fastEmit_i(VT, VT, ISD::Constant, ElementSize);
+ if (!C)
+ return false;
+ IdxN = emitMul_rr(VT, IdxN, IdxNIsKill, C, true);
+ if (!IdxN)
+ return false;
+ IdxNIsKill = true;
+ }
+ N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
+ if (!N)
+ return false;
+ }
+ }
+ if (TotalOffs) {
+ N = emitAdd_ri_(VT, N, NIsKill, TotalOffs);
+ if (!N)
+ return false;
+ }
+ updateValueMap(I, N);
+ return true;
+}
+
bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
default:
return selectRet(I);
case Instruction::FRem:
return selectFRem(I);
+ case Instruction::GetElementPtr:
+ return selectGetElementPtr(I);
}
// fall-back to target-independent instruction selection.