//===----------------------------------------------------------------------===//
#include "AArch64.h"
+#include "AArch64CallingConvention.h"
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
unsigned fastMaterializeFloatZero(const ConstantFP* CF) override;
explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
- const TargetLibraryInfo *LibInfo)
+ const TargetLibraryInfo *LibInfo)
: FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
- Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+ Subtarget =
+ &static_cast<const AArch64Subtarget &>(FuncInfo.MF->getSubtarget());
Context = &FuncInfo.Fn->getContext();
}
CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {
if (CC == CallingConv::WebKit_JS)
return CC_AArch64_WebKit_JS;
+ if (CC == CallingConv::GHC)
+ return CC_AArch64_GHC;
return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
}
return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
+ // For the MachO large code model materialize the FP constant in code.
+ if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
+ unsigned Opc1 = Is64Bit ? AArch64::MOVi64imm : AArch64::MOVi32imm;
+ const TargetRegisterClass *RC = Is64Bit ?
+ &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+
+ unsigned TmpReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc1), TmpReg)
+ .addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(TmpReg, getKillRegState(true));
+
+ return ResultReg;
+ }
+
// Materialize via constant pool. MachineConstantPool wants an explicit
// alignment.
unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
Addr.setExtendType(AArch64_AM::LSL);
const Value *Src = U->getOperand(0);
- if (const auto *I = dyn_cast<Instruction>(Src))
- if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB)
- Src = I;
-
- // Fold the zext or sext when it won't become a noop.
- if (const auto *ZE = dyn_cast<ZExtInst>(Src)) {
- if (!isIntExtFree(ZE) && ZE->getOperand(0)->getType()->isIntegerTy(32)) {
- Addr.setExtendType(AArch64_AM::UXTW);
- Src = ZE->getOperand(0);
- }
- } else if (const auto *SE = dyn_cast<SExtInst>(Src)) {
- if (!isIntExtFree(SE) && SE->getOperand(0)->getType()->isIntegerTy(32)) {
- Addr.setExtendType(AArch64_AM::SXTW);
- Src = SE->getOperand(0);
+ if (const auto *I = dyn_cast<Instruction>(Src)) {
+ if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ // Fold the zext or sext when it won't become a noop.
+ if (const auto *ZE = dyn_cast<ZExtInst>(I)) {
+ if (!isIntExtFree(ZE) &&
+ ZE->getOperand(0)->getType()->isIntegerTy(32)) {
+ Addr.setExtendType(AArch64_AM::UXTW);
+ Src = ZE->getOperand(0);
+ }
+ } else if (const auto *SE = dyn_cast<SExtInst>(I)) {
+ if (!isIntExtFree(SE) &&
+ SE->getOperand(0)->getType()->isIntegerTy(32)) {
+ Addr.setExtendType(AArch64_AM::SXTW);
+ Src = SE->getOperand(0);
+ }
+ }
}
}
Addr.setExtendType(AArch64_AM::LSL);
const Value *Src = LHS;
- if (const auto *I = dyn_cast<Instruction>(Src))
- if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB)
- Src = I;
-
-
- // Fold the zext or sext when it won't become a noop.
- if (const auto *ZE = dyn_cast<ZExtInst>(Src)) {
- if (!isIntExtFree(ZE) && ZE->getOperand(0)->getType()->isIntegerTy(32)) {
- Addr.setExtendType(AArch64_AM::UXTW);
- Src = ZE->getOperand(0);
- }
- } else if (const auto *SE = dyn_cast<SExtInst>(Src)) {
- if (!isIntExtFree(SE) && SE->getOperand(0)->getType()->isIntegerTy(32)) {
- Addr.setExtendType(AArch64_AM::SXTW);
- Src = SE->getOperand(0);
+ if (const auto *I = dyn_cast<Instruction>(Src)) {
+ if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ // Fold the zext or sext when it won't become a noop.
+ if (const auto *ZE = dyn_cast<ZExtInst>(I)) {
+ if (!isIntExtFree(ZE) &&
+ ZE->getOperand(0)->getType()->isIntegerTy(32)) {
+ Addr.setExtendType(AArch64_AM::UXTW);
+ Src = ZE->getOperand(0);
+ }
+ } else if (const auto *SE = dyn_cast<SExtInst>(I)) {
+ if (!isIntExtFree(SE) &&
+ SE->getOperand(0)->getType()->isIntegerTy(32)) {
+ Addr.setExtendType(AArch64_AM::SXTW);
+ Src = SE->getOperand(0);
+ }
+ }
}
}
if (Addr.getOffsetReg())
break;
- if (DL.getTypeSizeInBits(Ty) != 8)
+ if (!Ty || DL.getTypeSizeInBits(Ty) != 8)
break;
const Value *LHS = U->getOperand(0);
unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
bool WantZExt, MachineMemOperand *MMO) {
+ if (!TLI.allowsMisalignedMemoryAccesses(VT))
+ return 0;
+
// Simplify this down to something we can handle.
if (!simplifyAddress(Addr, VT))
return 0;
// could select it. Emit a copy to subreg if necessary. FastISel will remove
// it when it selects the integer extend.
unsigned Reg = lookUpRegForValue(IntExtVal);
- if (!Reg) {
+ auto *MI = MRI.getUniqueVRegDef(Reg);
+ if (!MI) {
if (RetVT == MVT::i64 && VT <= MVT::i32) {
if (WantZExt) {
// Delete the last emitted instruction from emitLoad (SUBREG_TO_REG).
// The integer extend has already been emitted - delete all the instructions
// that have been emitted by the integer extend lowering code and use the
// result from the load instruction directly.
- while (Reg) {
- auto *MI = MRI.getUniqueVRegDef(Reg);
- if (!MI)
- break;
+ while (MI) {
Reg = 0;
for (auto &Opnd : MI->uses()) {
if (Opnd.isReg()) {
}
}
MI->eraseFromParent();
+ MI = nullptr;
+ if (Reg)
+ MI = MRI.getUniqueVRegDef(Reg);
}
updateValueMap(IntExtVal, ResultReg);
return true;
bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
MachineMemOperand *MMO) {
+ if (!TLI.allowsMisalignedMemoryAccesses(VT))
+ return false;
+
// Simplify this down to something we can handle.
if (!simplifyAddress(Addr, VT))
return false;
Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, Src1IsKill, 1);
Src1IsKill = true;
}
- unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32spRegClass, Src1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg,
Src1IsKill, Src2Reg, Src2IsKill);
updateValueMap(SI, ResultReg);
return true;
return false;
bool CondIsKill = hasTrivialKill(Cond);
+ const MCInstrDesc &II = TII.get(AArch64::ANDSWri);
+ CondReg = constrainOperandRegClass(II, CondReg, 1);
+
// Emit a TST instruction (ANDS wzr, reg, #imm).
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDSWri),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
AArch64::WZR)
.addReg(CondReg, getKillRegState(CondIsKill))
.addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
// Copy all of the result registers out of their specified physreg.
MVT CopyVT = RVLocs[0].getValVT();
+
+ // TODO: Handle big-endian results
+ if (CopyVT.isVector() && !Subtarget->isLittleEndian())
+ return false;
+
unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
bool IsTailCall = CLI.IsTailCall;
bool IsVarArg = CLI.IsVarArg;
const Value *Callee = CLI.Callee;
- const char *SymName = CLI.SymName;
+ MCSymbol *Symbol = CLI.Symbol;
- if (!Callee && !SymName)
+ if (!Callee && !Symbol)
return false;
// Allow SelectionDAG isel to handle tail calls.
if (CM == CodeModel::Small) {
const MCInstrDesc &II = TII.get(Addr.getReg() ? AArch64::BLR : AArch64::BL);
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II);
- if (SymName)
- MIB.addExternalSymbol(SymName, 0);
+ if (Symbol)
+ MIB.addSym(Symbol, 0);
else if (Addr.getGlobalValue())
MIB.addGlobalAddress(Addr.getGlobalValue(), 0, 0);
else if (Addr.getReg()) {
return false;
} else {
unsigned CallReg = 0;
- if (SymName) {
+ if (Symbol) {
unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
ADRPReg)
- .addExternalSymbol(SymName, AArch64II::MO_GOT | AArch64II::MO_PAGE);
+ .addSym(Symbol, AArch64II::MO_GOT | AArch64II::MO_PAGE);
CallReg = createResultReg(&AArch64::GPR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::LDRXui),
- CallReg)
- .addReg(ADRPReg)
- .addExternalSymbol(SymName, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
- AArch64II::MO_NC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(AArch64::LDRXui), CallReg)
+ .addReg(ADRPReg)
+ .addSym(Symbol,
+ AArch64II::MO_GOT | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
} else if (Addr.getGlobalValue())
CallReg = materializeGV(Addr.getGlobalValue());
else if (Addr.getReg())
// Add a register mask with the call-preserved registers.
// Proper defs for return values will be added by setPhysRegsDeadExcept().
- MIB.addRegMask(TRI.getCallPreservedMask(CC));
+ MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
CLI.Call = MIB;
std::swap(LHS, RHS);
// Simplify multiplies.
- unsigned IID = II->getIntrinsicID();
+ Intrinsic::ID IID = II->getIntrinsicID();
switch (IID) {
default:
break;
MFI->setFrameAddressIsTaken(true);
const AArch64RegisterInfo *RegInfo =
- static_cast<const AArch64RegisterInfo *>(
- TM.getSubtargetImpl()->getRegisterInfo());
+ static_cast<const AArch64RegisterInfo *>(Subtarget->getRegisterInfo());
unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
unsigned SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
}
CallLoweringInfo CLI;
- CLI.setCallee(TLI.getLibcallCallingConv(LC), II->getType(),
+ MCContext &Ctx = MF->getContext();
+ CLI.setCallee(DL, Ctx, TLI.getLibcallCallingConv(LC), II->getType(),
TLI.getLibcallName(LC), std::move(Args));
if (!lowerCallTo(CLI))
return false;
std::swap(LHS, RHS);
// Simplify multiplies.
- unsigned IID = II->getIntrinsicID();
+ Intrinsic::ID IID = II->getIntrinsicID();
switch (IID) {
default:
break;
AArch64_AM::ASR, 31, /*WantResult=*/false);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
- MulReg = emitMul_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
+ // LHSReg and RHSReg cannot be killed by this Mul, since they are
+ // reused in the next instruction.
+ MulReg = emitMul_rr(VT, LHSReg, /*IsKill=*/false, RHSReg,
+ /*IsKill=*/false);
unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rs(VT, SMULHReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
AArch64::sub_32);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
- MulReg = emitMul_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
+ // LHSReg and RHSReg cannot be killed by this Mul, since they are
+ // reused in the next instruction.
+ MulReg = emitMul_rr(VT, LHSReg, /*IsKill=*/false, RHSReg,
+ /*IsKill=*/false);
unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rr(VT, AArch64::XZR, /*IsKill=*/true, UMULHReg,
if (Ret->getNumOperands() > 0) {
CallingConv::ID CC = F.getCallingConv();
SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
+ GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;
unsigned ResultReg = 0;
uint64_t ShiftVal = C->getZExtValue();
MVT SrcVT = RetVT;
- bool IsZExt = (I->getOpcode() == Instruction::AShr) ? false : true;
+ bool IsZExt = I->getOpcode() != Instruction::AShr;
const Value *Op0 = I->getOperand(0);
if (const auto *ZExt = dyn_cast<ZExtInst>(Op0)) {
if (!isIntExtFree(ZExt)) {
}
CallLoweringInfo CLI;
- CLI.setCallee(TLI.getLibcallCallingConv(LC), I->getType(),
+ MCContext &Ctx = MF->getContext();
+ CLI.setCallee(DL, Ctx, TLI.getLibcallCallingConv(LC), I->getType(),
TLI.getLibcallName(LC), std::move(Args));
if (!lowerCallTo(CLI))
return false;