1 //===-- AArch6464FastISel.cpp - AArch64 FastISel implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the AArch64-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // AArch64GenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "AArch64CallingConvention.h"
18 #include "AArch64Subtarget.h"
19 #include "AArch64TargetMachine.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "llvm/Analysis/BranchProbabilityInfo.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/FastISel.h"
24 #include "llvm/CodeGen/FunctionLoweringInfo.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/MC/MCSymbol.h"
40 #include "llvm/Support/CommandLine.h"
45 class AArch64FastISel final : public FastISel {
55 AArch64_AM::ShiftExtendType ExtType;
63 const GlobalValue *GV;
66 Address() : Kind(RegBase), ExtType(AArch64_AM::InvalidShiftExtend),
67 OffsetReg(0), Shift(0), Offset(0), GV(nullptr) { Base.Reg = 0; }
68 void setKind(BaseKind K) { Kind = K; }
69 BaseKind getKind() const { return Kind; }
70 void setExtendType(AArch64_AM::ShiftExtendType E) { ExtType = E; }
71 AArch64_AM::ShiftExtendType getExtendType() const { return ExtType; }
72 bool isRegBase() const { return Kind == RegBase; }
73 bool isFIBase() const { return Kind == FrameIndexBase; }
74 void setReg(unsigned Reg) {
75 assert(isRegBase() && "Invalid base register access!");
78 unsigned getReg() const {
79 assert(isRegBase() && "Invalid base register access!");
82 void setOffsetReg(unsigned Reg) {
85 unsigned getOffsetReg() const {
88 void setFI(unsigned FI) {
89 assert(isFIBase() && "Invalid base frame index access!");
92 unsigned getFI() const {
93 assert(isFIBase() && "Invalid base frame index access!");
96 void setOffset(int64_t O) { Offset = O; }
97 int64_t getOffset() { return Offset; }
98 void setShift(unsigned S) { Shift = S; }
99 unsigned getShift() { return Shift; }
101 void setGlobalValue(const GlobalValue *G) { GV = G; }
102 const GlobalValue *getGlobalValue() { return GV; }
105 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
106 /// make the right decision when generating code for different targets.
107 const AArch64Subtarget *Subtarget;
108 LLVMContext *Context;
110 bool fastLowerArguments() override;
111 bool fastLowerCall(CallLoweringInfo &CLI) override;
112 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
115 // Selection routines.
116 bool selectAddSub(const Instruction *I);
117 bool selectLogicalOp(const Instruction *I);
118 bool selectLoad(const Instruction *I);
119 bool selectStore(const Instruction *I);
120 bool selectBranch(const Instruction *I);
121 bool selectIndirectBr(const Instruction *I);
122 bool selectCmp(const Instruction *I);
123 bool selectSelect(const Instruction *I);
124 bool selectFPExt(const Instruction *I);
125 bool selectFPTrunc(const Instruction *I);
126 bool selectFPToInt(const Instruction *I, bool Signed);
127 bool selectIntToFP(const Instruction *I, bool Signed);
128 bool selectRem(const Instruction *I, unsigned ISDOpcode);
129 bool selectRet(const Instruction *I);
130 bool selectTrunc(const Instruction *I);
131 bool selectIntExt(const Instruction *I);
132 bool selectMul(const Instruction *I);
133 bool selectShift(const Instruction *I);
134 bool selectBitCast(const Instruction *I);
135 bool selectFRem(const Instruction *I);
136 bool selectSDiv(const Instruction *I);
137 bool selectGetElementPtr(const Instruction *I);
139 // Utility helper routines.
140 bool isTypeLegal(Type *Ty, MVT &VT);
141 bool isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed = false);
142 bool isValueAvailable(const Value *V) const;
143 bool computeAddress(const Value *Obj, Address &Addr, Type *Ty = nullptr);
144 bool computeCallAddress(const Value *V, Address &Addr);
145 bool simplifyAddress(Address &Addr, MVT VT);
146 void addLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB,
147 unsigned Flags, unsigned ScaleFactor,
148 MachineMemOperand *MMO);
149 bool isMemCpySmall(uint64_t Len, unsigned Alignment);
150 bool tryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
152 bool foldXALUIntrinsic(AArch64CC::CondCode &CC, const Instruction *I,
154 bool optimizeIntExtLoad(const Instruction *I, MVT RetVT, MVT SrcVT);
155 bool optimizeSelect(const SelectInst *SI);
156 std::pair<unsigned, bool> getRegForGEPIndex(const Value *Idx);
158 // Emit helper routines.
159 unsigned emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
160 const Value *RHS, bool SetFlags = false,
161 bool WantResult = true, bool IsZExt = false);
162 unsigned emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg,
163 bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
164 bool SetFlags = false, bool WantResult = true);
165 unsigned emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg,
166 bool LHSIsKill, uint64_t Imm, bool SetFlags = false,
167 bool WantResult = true);
168 unsigned emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg,
169 bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
170 AArch64_AM::ShiftExtendType ShiftType,
171 uint64_t ShiftImm, bool SetFlags = false,
172 bool WantResult = true);
173 unsigned emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg,
174 bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
175 AArch64_AM::ShiftExtendType ExtType,
176 uint64_t ShiftImm, bool SetFlags = false,
177 bool WantResult = true);
180 bool emitCompareAndBranch(const BranchInst *BI);
181 bool emitCmp(const Value *LHS, const Value *RHS, bool IsZExt);
182 bool emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, bool IsZExt);
183 bool emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
184 bool emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS);
185 unsigned emitLoad(MVT VT, MVT ResultVT, Address Addr, bool WantZExt = true,
186 MachineMemOperand *MMO = nullptr);
187 bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
188 MachineMemOperand *MMO = nullptr);
189 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
190 unsigned emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
191 unsigned emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
192 bool SetFlags = false, bool WantResult = true,
193 bool IsZExt = false);
194 unsigned emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, int64_t Imm);
195 unsigned emitSub(MVT RetVT, const Value *LHS, const Value *RHS,
196 bool SetFlags = false, bool WantResult = true,
197 bool IsZExt = false);
198 unsigned emitSubs_rr(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
199 unsigned RHSReg, bool RHSIsKill, bool WantResult = true);
200 unsigned emitSubs_rs(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
201 unsigned RHSReg, bool RHSIsKill,
202 AArch64_AM::ShiftExtendType ShiftType, uint64_t ShiftImm,
203 bool WantResult = true);
204 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
206 unsigned emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
207 bool LHSIsKill, uint64_t Imm);
208 unsigned emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
209 bool LHSIsKill, unsigned RHSReg, bool RHSIsKill,
211 unsigned emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
212 unsigned emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
213 unsigned Op1, bool Op1IsKill);
214 unsigned emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
215 unsigned Op1, bool Op1IsKill);
216 unsigned emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
217 unsigned Op1, bool Op1IsKill);
218 unsigned emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
219 unsigned Op1Reg, bool Op1IsKill);
220 unsigned emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
221 uint64_t Imm, bool IsZExt = true);
222 unsigned emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
223 unsigned Op1Reg, bool Op1IsKill);
224 unsigned emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
225 uint64_t Imm, bool IsZExt = true);
226 unsigned emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
227 unsigned Op1Reg, bool Op1IsKill);
228 unsigned emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
229 uint64_t Imm, bool IsZExt = false);
231 unsigned materializeInt(const ConstantInt *CI, MVT VT);
232 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
233 unsigned materializeGV(const GlobalValue *GV);
235 // Call handling routines.
237 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
238 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
240 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
243 // Backend specific FastISel code.
244 unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
245 unsigned fastMaterializeConstant(const Constant *C) override;
246 unsigned fastMaterializeFloatZero(const ConstantFP* CF) override;
248 explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
249 const TargetLibraryInfo *LibInfo)
250 : FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
252 &static_cast<const AArch64Subtarget &>(FuncInfo.MF->getSubtarget());
253 Context = &FuncInfo.Fn->getContext();
256 bool fastSelectInstruction(const Instruction *I) override;
258 #include "AArch64GenFastISel.inc"
261 } // end anonymous namespace
263 #include "AArch64GenCallingConv.inc"
265 /// \brief Check if the sign-/zero-extend will be a noop.
266 static bool isIntExtFree(const Instruction *I) {
267 assert((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
268 "Unexpected integer extend instruction.");
269 assert(!I->getType()->isVectorTy() && I->getType()->isIntegerTy() &&
270 "Unexpected value type.");
271 bool IsZExt = isa<ZExtInst>(I);
273 if (const auto *LI = dyn_cast<LoadInst>(I->getOperand(0)))
277 if (const auto *Arg = dyn_cast<Argument>(I->getOperand(0)))
278 if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr()))
284 /// \brief Determine the implicit scale factor that is applied by a memory
285 /// operation for a given value type.
286 static unsigned getImplicitScaleFactor(MVT VT) {
287 switch (VT.SimpleTy) {
290 case MVT::i1: // fall-through
295 case MVT::i32: // fall-through
298 case MVT::i64: // fall-through
304 CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {
305 if (CC == CallingConv::WebKit_JS)
306 return CC_AArch64_WebKit_JS;
307 if (CC == CallingConv::GHC)
308 return CC_AArch64_GHC;
309 return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
312 unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) {
313 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i64 &&
314 "Alloca should always return a pointer.");
316 // Don't handle dynamic allocas.
317 if (!FuncInfo.StaticAllocaMap.count(AI))
320 DenseMap<const AllocaInst *, int>::iterator SI =
321 FuncInfo.StaticAllocaMap.find(AI);
323 if (SI != FuncInfo.StaticAllocaMap.end()) {
324 unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
327 .addFrameIndex(SI->second)
336 unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) {
341 return fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
343 // Create a copy from the zero register to materialize a "0" value.
344 const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
345 : &AArch64::GPR32RegClass;
346 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
347 unsigned ResultReg = createResultReg(RC);
348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
349 ResultReg).addReg(ZeroReg, getKillRegState(true));
353 unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
354 // Positive zero (+0.0) has to be materialized with a fmov from the zero
355 // register, because the immediate version of fmov cannot encode zero.
356 if (CFP->isNullValue())
357 return fastMaterializeFloatZero(CFP);
359 if (VT != MVT::f32 && VT != MVT::f64)
362 const APFloat Val = CFP->getValueAPF();
363 bool Is64Bit = (VT == MVT::f64);
364 // This checks to see if we can use FMOV instructions to materialize
365 // a constant, otherwise we have to materialize via the constant pool.
366 if (TLI.isFPImmLegal(Val, VT)) {
368 Is64Bit ? AArch64_AM::getFP64Imm(Val) : AArch64_AM::getFP32Imm(Val);
369 assert((Imm != -1) && "Cannot encode floating-point constant.");
370 unsigned Opc = Is64Bit ? AArch64::FMOVDi : AArch64::FMOVSi;
371 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
374 // For the MachO large code model materialize the FP constant in code.
375 if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
376 unsigned Opc1 = Is64Bit ? AArch64::MOVi64imm : AArch64::MOVi32imm;
377 const TargetRegisterClass *RC = Is64Bit ?
378 &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
380 unsigned TmpReg = createResultReg(RC);
381 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc1), TmpReg)
382 .addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
384 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
385 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
386 TII.get(TargetOpcode::COPY), ResultReg)
387 .addReg(TmpReg, getKillRegState(true));
392 // Materialize via constant pool. MachineConstantPool wants an explicit
394 unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
396 Align = DL.getTypeAllocSize(CFP->getType());
398 unsigned CPI = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
399 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
400 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
401 ADRPReg).addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGE);
403 unsigned Opc = Is64Bit ? AArch64::LDRDui : AArch64::LDRSui;
404 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
405 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
407 .addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
411 unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
412 // We can't handle thread-local variables quickly yet.
413 if (GV->isThreadLocal())
416 // MachO still uses GOT for large code-model accesses, but ELF requires
417 // movz/movk sequences, which FastISel doesn't handle yet.
418 if (TM.getCodeModel() != CodeModel::Small && !Subtarget->isTargetMachO())
421 unsigned char OpFlags = Subtarget->ClassifyGlobalReference(GV, TM);
423 EVT DestEVT = TLI.getValueType(DL, GV->getType(), true);
424 if (!DestEVT.isSimple())
427 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
430 if (OpFlags & AArch64II::MO_GOT) {
432 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
434 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGE);
436 ResultReg = createResultReg(&AArch64::GPR64RegClass);
437 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::LDRXui),
440 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
442 } else if (OpFlags & AArch64II::MO_CONSTPOOL) {
443 // We can't handle addresses loaded from a constant pool quickly yet.
447 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
449 .addGlobalAddress(GV, 0, AArch64II::MO_PAGE);
451 ResultReg = createResultReg(&AArch64::GPR64spRegClass);
452 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
455 .addGlobalAddress(GV, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC)
461 unsigned AArch64FastISel::fastMaterializeConstant(const Constant *C) {
462 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
464 // Only handle simple types.
465 if (!CEVT.isSimple())
467 MVT VT = CEVT.getSimpleVT();
469 if (const auto *CI = dyn_cast<ConstantInt>(C))
470 return materializeInt(CI, VT);
471 else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
472 return materializeFP(CFP, VT);
473 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
474 return materializeGV(GV);
479 unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {
480 assert(CFP->isNullValue() &&
481 "Floating-point constant is not a positive zero.");
483 if (!isTypeLegal(CFP->getType(), VT))
486 if (VT != MVT::f32 && VT != MVT::f64)
489 bool Is64Bit = (VT == MVT::f64);
490 unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
491 unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr;
492 return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
495 /// \brief Check if the multiply is by a power-of-2 constant.
496 static bool isMulPowOf2(const Value *I) {
497 if (const auto *MI = dyn_cast<MulOperator>(I)) {
498 if (const auto *C = dyn_cast<ConstantInt>(MI->getOperand(0)))
499 if (C->getValue().isPowerOf2())
501 if (const auto *C = dyn_cast<ConstantInt>(MI->getOperand(1)))
502 if (C->getValue().isPowerOf2())
508 // Computes the address to get to an object.
509 bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
511 const User *U = nullptr;
512 unsigned Opcode = Instruction::UserOp1;
513 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
514 // Don't walk into other basic blocks unless the object is an alloca from
515 // another block, otherwise it may not have a virtual register assigned.
516 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
517 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
518 Opcode = I->getOpcode();
521 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
522 Opcode = C->getOpcode();
526 if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
527 if (Ty->getAddressSpace() > 255)
528 // Fast instruction selection doesn't support the special
535 case Instruction::BitCast: {
536 // Look through bitcasts.
537 return computeAddress(U->getOperand(0), Addr, Ty);
539 case Instruction::IntToPtr: {
540 // Look past no-op inttoptrs.
541 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
542 TLI.getPointerTy(DL))
543 return computeAddress(U->getOperand(0), Addr, Ty);
546 case Instruction::PtrToInt: {
547 // Look past no-op ptrtoints.
548 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
549 return computeAddress(U->getOperand(0), Addr, Ty);
552 case Instruction::GetElementPtr: {
553 Address SavedAddr = Addr;
554 uint64_t TmpOffset = Addr.getOffset();
556 // Iterate through the GEP folding the constants into offsets where
558 gep_type_iterator GTI = gep_type_begin(U);
559 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
561 const Value *Op = *i;
562 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
563 const StructLayout *SL = DL.getStructLayout(STy);
564 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
565 TmpOffset += SL->getElementOffset(Idx);
567 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
569 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
570 // Constant-offset addressing.
571 TmpOffset += CI->getSExtValue() * S;
574 if (canFoldAddIntoGEP(U, Op)) {
575 // A compatible add with a constant operand. Fold the constant.
577 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
578 TmpOffset += CI->getSExtValue() * S;
579 // Iterate on the other operand.
580 Op = cast<AddOperator>(Op)->getOperand(0);
584 goto unsupported_gep;
589 // Try to grab the base operand now.
590 Addr.setOffset(TmpOffset);
591 if (computeAddress(U->getOperand(0), Addr, Ty))
594 // We failed, restore everything and try the other options.
600 case Instruction::Alloca: {
601 const AllocaInst *AI = cast<AllocaInst>(Obj);
602 DenseMap<const AllocaInst *, int>::iterator SI =
603 FuncInfo.StaticAllocaMap.find(AI);
604 if (SI != FuncInfo.StaticAllocaMap.end()) {
605 Addr.setKind(Address::FrameIndexBase);
606 Addr.setFI(SI->second);
611 case Instruction::Add: {
612 // Adds of constants are common and easy enough.
613 const Value *LHS = U->getOperand(0);
614 const Value *RHS = U->getOperand(1);
616 if (isa<ConstantInt>(LHS))
619 if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
620 Addr.setOffset(Addr.getOffset() + CI->getSExtValue());
621 return computeAddress(LHS, Addr, Ty);
624 Address Backup = Addr;
625 if (computeAddress(LHS, Addr, Ty) && computeAddress(RHS, Addr, Ty))
631 case Instruction::Sub: {
632 // Subs of constants are common and easy enough.
633 const Value *LHS = U->getOperand(0);
634 const Value *RHS = U->getOperand(1);
636 if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
637 Addr.setOffset(Addr.getOffset() - CI->getSExtValue());
638 return computeAddress(LHS, Addr, Ty);
642 case Instruction::Shl: {
643 if (Addr.getOffsetReg())
646 const auto *CI = dyn_cast<ConstantInt>(U->getOperand(1));
650 unsigned Val = CI->getZExtValue();
651 if (Val < 1 || Val > 3)
654 uint64_t NumBytes = 0;
655 if (Ty && Ty->isSized()) {
656 uint64_t NumBits = DL.getTypeSizeInBits(Ty);
657 NumBytes = NumBits / 8;
658 if (!isPowerOf2_64(NumBits))
662 if (NumBytes != (1ULL << Val))
666 Addr.setExtendType(AArch64_AM::LSL);
668 const Value *Src = U->getOperand(0);
669 if (const auto *I = dyn_cast<Instruction>(Src)) {
670 if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
671 // Fold the zext or sext when it won't become a noop.
672 if (const auto *ZE = dyn_cast<ZExtInst>(I)) {
673 if (!isIntExtFree(ZE) &&
674 ZE->getOperand(0)->getType()->isIntegerTy(32)) {
675 Addr.setExtendType(AArch64_AM::UXTW);
676 Src = ZE->getOperand(0);
678 } else if (const auto *SE = dyn_cast<SExtInst>(I)) {
679 if (!isIntExtFree(SE) &&
680 SE->getOperand(0)->getType()->isIntegerTy(32)) {
681 Addr.setExtendType(AArch64_AM::SXTW);
682 Src = SE->getOperand(0);
688 if (const auto *AI = dyn_cast<BinaryOperator>(Src))
689 if (AI->getOpcode() == Instruction::And) {
690 const Value *LHS = AI->getOperand(0);
691 const Value *RHS = AI->getOperand(1);
693 if (const auto *C = dyn_cast<ConstantInt>(LHS))
694 if (C->getValue() == 0xffffffff)
697 if (const auto *C = dyn_cast<ConstantInt>(RHS))
698 if (C->getValue() == 0xffffffff) {
699 Addr.setExtendType(AArch64_AM::UXTW);
700 unsigned Reg = getRegForValue(LHS);
703 bool RegIsKill = hasTrivialKill(LHS);
704 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill,
706 Addr.setOffsetReg(Reg);
711 unsigned Reg = getRegForValue(Src);
714 Addr.setOffsetReg(Reg);
717 case Instruction::Mul: {
718 if (Addr.getOffsetReg())
724 const Value *LHS = U->getOperand(0);
725 const Value *RHS = U->getOperand(1);
727 // Canonicalize power-of-2 value to the RHS.
728 if (const auto *C = dyn_cast<ConstantInt>(LHS))
729 if (C->getValue().isPowerOf2())
732 assert(isa<ConstantInt>(RHS) && "Expected an ConstantInt.");
733 const auto *C = cast<ConstantInt>(RHS);
734 unsigned Val = C->getValue().logBase2();
735 if (Val < 1 || Val > 3)
738 uint64_t NumBytes = 0;
739 if (Ty && Ty->isSized()) {
740 uint64_t NumBits = DL.getTypeSizeInBits(Ty);
741 NumBytes = NumBits / 8;
742 if (!isPowerOf2_64(NumBits))
746 if (NumBytes != (1ULL << Val))
750 Addr.setExtendType(AArch64_AM::LSL);
752 const Value *Src = LHS;
753 if (const auto *I = dyn_cast<Instruction>(Src)) {
754 if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
755 // Fold the zext or sext when it won't become a noop.
756 if (const auto *ZE = dyn_cast<ZExtInst>(I)) {
757 if (!isIntExtFree(ZE) &&
758 ZE->getOperand(0)->getType()->isIntegerTy(32)) {
759 Addr.setExtendType(AArch64_AM::UXTW);
760 Src = ZE->getOperand(0);
762 } else if (const auto *SE = dyn_cast<SExtInst>(I)) {
763 if (!isIntExtFree(SE) &&
764 SE->getOperand(0)->getType()->isIntegerTy(32)) {
765 Addr.setExtendType(AArch64_AM::SXTW);
766 Src = SE->getOperand(0);
772 unsigned Reg = getRegForValue(Src);
775 Addr.setOffsetReg(Reg);
778 case Instruction::And: {
779 if (Addr.getOffsetReg())
782 if (!Ty || DL.getTypeSizeInBits(Ty) != 8)
785 const Value *LHS = U->getOperand(0);
786 const Value *RHS = U->getOperand(1);
788 if (const auto *C = dyn_cast<ConstantInt>(LHS))
789 if (C->getValue() == 0xffffffff)
792 if (const auto *C = dyn_cast<ConstantInt>(RHS))
793 if (C->getValue() == 0xffffffff) {
795 Addr.setExtendType(AArch64_AM::LSL);
796 Addr.setExtendType(AArch64_AM::UXTW);
798 unsigned Reg = getRegForValue(LHS);
801 bool RegIsKill = hasTrivialKill(LHS);
802 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill,
804 Addr.setOffsetReg(Reg);
809 case Instruction::SExt:
810 case Instruction::ZExt: {
811 if (!Addr.getReg() || Addr.getOffsetReg())
814 const Value *Src = nullptr;
815 // Fold the zext or sext when it won't become a noop.
816 if (const auto *ZE = dyn_cast<ZExtInst>(U)) {
817 if (!isIntExtFree(ZE) && ZE->getOperand(0)->getType()->isIntegerTy(32)) {
818 Addr.setExtendType(AArch64_AM::UXTW);
819 Src = ZE->getOperand(0);
821 } else if (const auto *SE = dyn_cast<SExtInst>(U)) {
822 if (!isIntExtFree(SE) && SE->getOperand(0)->getType()->isIntegerTy(32)) {
823 Addr.setExtendType(AArch64_AM::SXTW);
824 Src = SE->getOperand(0);
832 unsigned Reg = getRegForValue(Src);
835 Addr.setOffsetReg(Reg);
840 if (Addr.isRegBase() && !Addr.getReg()) {
841 unsigned Reg = getRegForValue(Obj);
848 if (!Addr.getOffsetReg()) {
849 unsigned Reg = getRegForValue(Obj);
852 Addr.setOffsetReg(Reg);
859 bool AArch64FastISel::computeCallAddress(const Value *V, Address &Addr) {
860 const User *U = nullptr;
861 unsigned Opcode = Instruction::UserOp1;
864 if (const auto *I = dyn_cast<Instruction>(V)) {
865 Opcode = I->getOpcode();
867 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
868 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
869 Opcode = C->getOpcode();
875 case Instruction::BitCast:
876 // Look past bitcasts if its operand is in the same BB.
878 return computeCallAddress(U->getOperand(0), Addr);
880 case Instruction::IntToPtr:
881 // Look past no-op inttoptrs if its operand is in the same BB.
883 TLI.getValueType(DL, U->getOperand(0)->getType()) ==
884 TLI.getPointerTy(DL))
885 return computeCallAddress(U->getOperand(0), Addr);
887 case Instruction::PtrToInt:
888 // Look past no-op ptrtoints if its operand is in the same BB.
889 if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
890 return computeCallAddress(U->getOperand(0), Addr);
894 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
895 Addr.setGlobalValue(GV);
899 // If all else fails, try to materialize the value in a register.
900 if (!Addr.getGlobalValue()) {
901 Addr.setReg(getRegForValue(V));
902 return Addr.getReg() != 0;
909 bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) {
910 EVT evt = TLI.getValueType(DL, Ty, true);
912 // Only handle simple types.
913 if (evt == MVT::Other || !evt.isSimple())
915 VT = evt.getSimpleVT();
917 // This is a legal type, but it's not something we handle in fast-isel.
921 // Handle all other legal types, i.e. a register that will directly hold this
923 return TLI.isTypeLegal(VT);
926 /// \brief Determine if the value type is supported by FastISel.
928 /// FastISel for AArch64 can handle more value types than are legal. This adds
929 /// simple value type such as i1, i8, and i16.
930 bool AArch64FastISel::isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed) {
931 if (Ty->isVectorTy() && !IsVectorAllowed)
934 if (isTypeLegal(Ty, VT))
937 // If this is a type than can be sign or zero-extended to a basic operation
938 // go ahead and accept it now.
939 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
945 bool AArch64FastISel::isValueAvailable(const Value *V) const {
946 if (!isa<Instruction>(V))
949 const auto *I = cast<Instruction>(V);
950 if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB)
956 bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
957 unsigned ScaleFactor = getImplicitScaleFactor(VT);
961 bool ImmediateOffsetNeedsLowering = false;
962 bool RegisterOffsetNeedsLowering = false;
963 int64_t Offset = Addr.getOffset();
964 if (((Offset < 0) || (Offset & (ScaleFactor - 1))) && !isInt<9>(Offset))
965 ImmediateOffsetNeedsLowering = true;
966 else if (Offset > 0 && !(Offset & (ScaleFactor - 1)) &&
967 !isUInt<12>(Offset / ScaleFactor))
968 ImmediateOffsetNeedsLowering = true;
970 // Cannot encode an offset register and an immediate offset in the same
971 // instruction. Fold the immediate offset into the load/store instruction and
972 // emit an additional add to take care of the offset register.
973 if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.getOffsetReg())
974 RegisterOffsetNeedsLowering = true;
976 // Cannot encode zero register as base.
977 if (Addr.isRegBase() && Addr.getOffsetReg() && !Addr.getReg())
978 RegisterOffsetNeedsLowering = true;
980 // If this is a stack pointer and the offset needs to be simplified then put
981 // the alloca address into a register, set the base type back to register and
982 // continue. This should almost never happen.
983 if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase())
985 unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
988 .addFrameIndex(Addr.getFI())
991 Addr.setKind(Address::RegBase);
992 Addr.setReg(ResultReg);
995 if (RegisterOffsetNeedsLowering) {
996 unsigned ResultReg = 0;
998 if (Addr.getExtendType() == AArch64_AM::SXTW ||
999 Addr.getExtendType() == AArch64_AM::UXTW )
1000 ResultReg = emitAddSub_rx(/*UseAdd=*/true, MVT::i64, Addr.getReg(),
1001 /*TODO:IsKill=*/false, Addr.getOffsetReg(),
1002 /*TODO:IsKill=*/false, Addr.getExtendType(),
1005 ResultReg = emitAddSub_rs(/*UseAdd=*/true, MVT::i64, Addr.getReg(),
1006 /*TODO:IsKill=*/false, Addr.getOffsetReg(),
1007 /*TODO:IsKill=*/false, AArch64_AM::LSL,
1010 if (Addr.getExtendType() == AArch64_AM::UXTW)
1011 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(),
1012 /*Op0IsKill=*/false, Addr.getShift(),
1014 else if (Addr.getExtendType() == AArch64_AM::SXTW)
1015 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(),
1016 /*Op0IsKill=*/false, Addr.getShift(),
1019 ResultReg = emitLSL_ri(MVT::i64, MVT::i64, Addr.getOffsetReg(),
1020 /*Op0IsKill=*/false, Addr.getShift());
1025 Addr.setReg(ResultReg);
1026 Addr.setOffsetReg(0);
1028 Addr.setExtendType(AArch64_AM::InvalidShiftExtend);
1031 // Since the offset is too large for the load/store instruction get the
1032 // reg+offset into a register.
1033 if (ImmediateOffsetNeedsLowering) {
1036 // Try to fold the immediate into the add instruction.
1037 ResultReg = emitAdd_ri_(MVT::i64, Addr.getReg(), /*IsKill=*/false, Offset);
1039 ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
1043 Addr.setReg(ResultReg);
1049 void AArch64FastISel::addLoadStoreOperands(Address &Addr,
1050 const MachineInstrBuilder &MIB,
1052 unsigned ScaleFactor,
1053 MachineMemOperand *MMO) {
1054 int64_t Offset = Addr.getOffset() / ScaleFactor;
1055 // Frame base works a bit differently. Handle it separately.
1056 if (Addr.isFIBase()) {
1057 int FI = Addr.getFI();
1058 // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size
1059 // and alignment should be based on the VT.
1060 MMO = FuncInfo.MF->getMachineMemOperand(
1061 MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags,
1062 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
1063 // Now add the rest of the operands.
1064 MIB.addFrameIndex(FI).addImm(Offset);
1066 assert(Addr.isRegBase() && "Unexpected address kind.");
1067 const MCInstrDesc &II = MIB->getDesc();
1068 unsigned Idx = (Flags & MachineMemOperand::MOStore) ? 1 : 0;
1070 constrainOperandRegClass(II, Addr.getReg(), II.getNumDefs()+Idx));
1072 constrainOperandRegClass(II, Addr.getOffsetReg(), II.getNumDefs()+Idx+1));
1073 if (Addr.getOffsetReg()) {
1074 assert(Addr.getOffset() == 0 && "Unexpected offset");
1075 bool IsSigned = Addr.getExtendType() == AArch64_AM::SXTW ||
1076 Addr.getExtendType() == AArch64_AM::SXTX;
1077 MIB.addReg(Addr.getReg());
1078 MIB.addReg(Addr.getOffsetReg());
1079 MIB.addImm(IsSigned);
1080 MIB.addImm(Addr.getShift() != 0);
1082 MIB.addReg(Addr.getReg()).addImm(Offset);
1086 MIB.addMemOperand(MMO);
1089 unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
1090 const Value *RHS, bool SetFlags,
1091 bool WantResult, bool IsZExt) {
1092 AArch64_AM::ShiftExtendType ExtendType = AArch64_AM::InvalidShiftExtend;
1093 bool NeedExtend = false;
1094 switch (RetVT.SimpleTy) {
1102 ExtendType = IsZExt ? AArch64_AM::UXTB : AArch64_AM::SXTB;
1106 ExtendType = IsZExt ? AArch64_AM::UXTH : AArch64_AM::SXTH;
1108 case MVT::i32: // fall-through
1113 RetVT.SimpleTy = std::max(RetVT.SimpleTy, MVT::i32);
1115 // Canonicalize immediates to the RHS first.
1116 if (UseAdd && isa<Constant>(LHS) && !isa<Constant>(RHS))
1117 std::swap(LHS, RHS);
1119 // Canonicalize mul by power of 2 to the RHS.
1120 if (UseAdd && LHS->hasOneUse() && isValueAvailable(LHS))
1121 if (isMulPowOf2(LHS))
1122 std::swap(LHS, RHS);
1124 // Canonicalize shift immediate to the RHS.
1125 if (UseAdd && LHS->hasOneUse() && isValueAvailable(LHS))
1126 if (const auto *SI = dyn_cast<BinaryOperator>(LHS))
1127 if (isa<ConstantInt>(SI->getOperand(1)))
1128 if (SI->getOpcode() == Instruction::Shl ||
1129 SI->getOpcode() == Instruction::LShr ||
1130 SI->getOpcode() == Instruction::AShr )
1131 std::swap(LHS, RHS);
1133 unsigned LHSReg = getRegForValue(LHS);
1136 bool LHSIsKill = hasTrivialKill(LHS);
1139 LHSReg = emitIntExt(SrcVT, LHSReg, RetVT, IsZExt);
1141 unsigned ResultReg = 0;
1142 if (const auto *C = dyn_cast<ConstantInt>(RHS)) {
1143 uint64_t Imm = IsZExt ? C->getZExtValue() : C->getSExtValue();
1144 if (C->isNegative())
1145 ResultReg = emitAddSub_ri(!UseAdd, RetVT, LHSReg, LHSIsKill, -Imm,
1146 SetFlags, WantResult);
1148 ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, LHSIsKill, Imm, SetFlags,
1150 } else if (const auto *C = dyn_cast<Constant>(RHS))
1151 if (C->isNullValue())
1152 ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, LHSIsKill, 0, SetFlags,
1158 // Only extend the RHS within the instruction if there is a valid extend type.
1159 if (ExtendType != AArch64_AM::InvalidShiftExtend && RHS->hasOneUse() &&
1160 isValueAvailable(RHS)) {
1161 if (const auto *SI = dyn_cast<BinaryOperator>(RHS))
1162 if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1)))
1163 if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4)) {
1164 unsigned RHSReg = getRegForValue(SI->getOperand(0));
1167 bool RHSIsKill = hasTrivialKill(SI->getOperand(0));
1168 return emitAddSub_rx(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg,
1169 RHSIsKill, ExtendType, C->getZExtValue(),
1170 SetFlags, WantResult);
1172 unsigned RHSReg = getRegForValue(RHS);
1175 bool RHSIsKill = hasTrivialKill(RHS);
1176 return emitAddSub_rx(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
1177 ExtendType, 0, SetFlags, WantResult);
1180 // Check if the mul can be folded into the instruction.
1181 if (RHS->hasOneUse() && isValueAvailable(RHS)) {
1182 if (isMulPowOf2(RHS)) {
1183 const Value *MulLHS = cast<MulOperator>(RHS)->getOperand(0);
1184 const Value *MulRHS = cast<MulOperator>(RHS)->getOperand(1);
1186 if (const auto *C = dyn_cast<ConstantInt>(MulLHS))
1187 if (C->getValue().isPowerOf2())
1188 std::swap(MulLHS, MulRHS);
1190 assert(isa<ConstantInt>(MulRHS) && "Expected a ConstantInt.");
1191 uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
1192 unsigned RHSReg = getRegForValue(MulLHS);
1195 bool RHSIsKill = hasTrivialKill(MulLHS);
1196 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg,
1197 RHSIsKill, AArch64_AM::LSL, ShiftVal, SetFlags,
1204 // Check if the shift can be folded into the instruction.
1205 if (RHS->hasOneUse() && isValueAvailable(RHS)) {
1206 if (const auto *SI = dyn_cast<BinaryOperator>(RHS)) {
1207 if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1))) {
1208 AArch64_AM::ShiftExtendType ShiftType = AArch64_AM::InvalidShiftExtend;
1209 switch (SI->getOpcode()) {
1211 case Instruction::Shl: ShiftType = AArch64_AM::LSL; break;
1212 case Instruction::LShr: ShiftType = AArch64_AM::LSR; break;
1213 case Instruction::AShr: ShiftType = AArch64_AM::ASR; break;
1215 uint64_t ShiftVal = C->getZExtValue();
1216 if (ShiftType != AArch64_AM::InvalidShiftExtend) {
1217 unsigned RHSReg = getRegForValue(SI->getOperand(0));
1220 bool RHSIsKill = hasTrivialKill(SI->getOperand(0));
1221 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg,
1222 RHSIsKill, ShiftType, ShiftVal, SetFlags,
1231 unsigned RHSReg = getRegForValue(RHS);
1234 bool RHSIsKill = hasTrivialKill(RHS);
1237 RHSReg = emitIntExt(SrcVT, RHSReg, RetVT, IsZExt);
1239 return emitAddSub_rr(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
1240 SetFlags, WantResult);
1243 unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg,
1244 bool LHSIsKill, unsigned RHSReg,
1245 bool RHSIsKill, bool SetFlags,
1247 assert(LHSReg && RHSReg && "Invalid register number.");
1249 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1252 static const unsigned OpcTable[2][2][2] = {
1253 { { AArch64::SUBWrr, AArch64::SUBXrr },
1254 { AArch64::ADDWrr, AArch64::ADDXrr } },
1255 { { AArch64::SUBSWrr, AArch64::SUBSXrr },
1256 { AArch64::ADDSWrr, AArch64::ADDSXrr } }
1258 bool Is64Bit = RetVT == MVT::i64;
1259 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1260 const TargetRegisterClass *RC =
1261 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1264 ResultReg = createResultReg(RC);
1266 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1268 const MCInstrDesc &II = TII.get(Opc);
1269 LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
1270 RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
1271 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1272 .addReg(LHSReg, getKillRegState(LHSIsKill))
1273 .addReg(RHSReg, getKillRegState(RHSIsKill));
1277 unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg,
1278 bool LHSIsKill, uint64_t Imm,
1279 bool SetFlags, bool WantResult) {
1280 assert(LHSReg && "Invalid register number.");
1282 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1286 if (isUInt<12>(Imm))
1288 else if ((Imm & 0xfff000) == Imm) {
1294 static const unsigned OpcTable[2][2][2] = {
1295 { { AArch64::SUBWri, AArch64::SUBXri },
1296 { AArch64::ADDWri, AArch64::ADDXri } },
1297 { { AArch64::SUBSWri, AArch64::SUBSXri },
1298 { AArch64::ADDSWri, AArch64::ADDSXri } }
1300 bool Is64Bit = RetVT == MVT::i64;
1301 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1302 const TargetRegisterClass *RC;
1304 RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1306 RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
1309 ResultReg = createResultReg(RC);
1311 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1313 const MCInstrDesc &II = TII.get(Opc);
1314 LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
1315 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1316 .addReg(LHSReg, getKillRegState(LHSIsKill))
1318 .addImm(getShifterImm(AArch64_AM::LSL, ShiftImm));
1322 unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg,
1323 bool LHSIsKill, unsigned RHSReg,
1325 AArch64_AM::ShiftExtendType ShiftType,
1326 uint64_t ShiftImm, bool SetFlags,
1328 assert(LHSReg && RHSReg && "Invalid register number.");
1330 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1333 // Don't deal with undefined shifts.
1334 if (ShiftImm >= RetVT.getSizeInBits())
1337 static const unsigned OpcTable[2][2][2] = {
1338 { { AArch64::SUBWrs, AArch64::SUBXrs },
1339 { AArch64::ADDWrs, AArch64::ADDXrs } },
1340 { { AArch64::SUBSWrs, AArch64::SUBSXrs },
1341 { AArch64::ADDSWrs, AArch64::ADDSXrs } }
1343 bool Is64Bit = RetVT == MVT::i64;
1344 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1345 const TargetRegisterClass *RC =
1346 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1349 ResultReg = createResultReg(RC);
1351 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1353 const MCInstrDesc &II = TII.get(Opc);
1354 LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
1355 RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
1356 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1357 .addReg(LHSReg, getKillRegState(LHSIsKill))
1358 .addReg(RHSReg, getKillRegState(RHSIsKill))
1359 .addImm(getShifterImm(ShiftType, ShiftImm));
1363 unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg,
1364 bool LHSIsKill, unsigned RHSReg,
1366 AArch64_AM::ShiftExtendType ExtType,
1367 uint64_t ShiftImm, bool SetFlags,
1369 assert(LHSReg && RHSReg && "Invalid register number.");
1371 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1377 static const unsigned OpcTable[2][2][2] = {
1378 { { AArch64::SUBWrx, AArch64::SUBXrx },
1379 { AArch64::ADDWrx, AArch64::ADDXrx } },
1380 { { AArch64::SUBSWrx, AArch64::SUBSXrx },
1381 { AArch64::ADDSWrx, AArch64::ADDSXrx } }
1383 bool Is64Bit = RetVT == MVT::i64;
1384 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1385 const TargetRegisterClass *RC = nullptr;
1387 RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1389 RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
1392 ResultReg = createResultReg(RC);
1394 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1396 const MCInstrDesc &II = TII.get(Opc);
1397 LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
1398 RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
1399 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1400 .addReg(LHSReg, getKillRegState(LHSIsKill))
1401 .addReg(RHSReg, getKillRegState(RHSIsKill))
1402 .addImm(getArithExtendImm(ExtType, ShiftImm));
1406 bool AArch64FastISel::emitCmp(const Value *LHS, const Value *RHS, bool IsZExt) {
1407 Type *Ty = LHS->getType();
1408 EVT EVT = TLI.getValueType(DL, Ty, true);
1409 if (!EVT.isSimple())
1411 MVT VT = EVT.getSimpleVT();
1413 switch (VT.SimpleTy) {
1421 return emitICmp(VT, LHS, RHS, IsZExt);
1424 return emitFCmp(VT, LHS, RHS);
1428 bool AArch64FastISel::emitICmp(MVT RetVT, const Value *LHS, const Value *RHS,
1430 return emitSub(RetVT, LHS, RHS, /*SetFlags=*/true, /*WantResult=*/false,
1434 bool AArch64FastISel::emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
1436 return emitAddSub_ri(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, Imm,
1437 /*SetFlags=*/true, /*WantResult=*/false) != 0;
1440 bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) {
1441 if (RetVT != MVT::f32 && RetVT != MVT::f64)
1444 // Check to see if the 2nd operand is a constant that we can encode directly
1446 bool UseImm = false;
1447 if (const auto *CFP = dyn_cast<ConstantFP>(RHS))
1448 if (CFP->isZero() && !CFP->isNegative())
1451 unsigned LHSReg = getRegForValue(LHS);
1454 bool LHSIsKill = hasTrivialKill(LHS);
1457 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri;
1458 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
1459 .addReg(LHSReg, getKillRegState(LHSIsKill));
1463 unsigned RHSReg = getRegForValue(RHS);
1466 bool RHSIsKill = hasTrivialKill(RHS);
1468 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr;
1469 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
1470 .addReg(LHSReg, getKillRegState(LHSIsKill))
1471 .addReg(RHSReg, getKillRegState(RHSIsKill));
1475 unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
1476 bool SetFlags, bool WantResult, bool IsZExt) {
1477 return emitAddSub(/*UseAdd=*/true, RetVT, LHS, RHS, SetFlags, WantResult,
1481 /// \brief This method is a wrapper to simplify add emission.
1483 /// First try to emit an add with an immediate operand using emitAddSub_ri. If
1484 /// that fails, then try to materialize the immediate into a register and use
1485 /// emitAddSub_rr instead.
1486 unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill,
1490 ResultReg = emitAddSub_ri(false, VT, Op0, Op0IsKill, -Imm);
1492 ResultReg = emitAddSub_ri(true, VT, Op0, Op0IsKill, Imm);
1497 unsigned CReg = fastEmit_i(VT, VT, ISD::Constant, Imm);
1501 ResultReg = emitAddSub_rr(true, VT, Op0, Op0IsKill, CReg, true);
1505 unsigned AArch64FastISel::emitSub(MVT RetVT, const Value *LHS, const Value *RHS,
1506 bool SetFlags, bool WantResult, bool IsZExt) {
1507 return emitAddSub(/*UseAdd=*/false, RetVT, LHS, RHS, SetFlags, WantResult,
1511 unsigned AArch64FastISel::emitSubs_rr(MVT RetVT, unsigned LHSReg,
1512 bool LHSIsKill, unsigned RHSReg,
1513 bool RHSIsKill, bool WantResult) {
1514 return emitAddSub_rr(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, RHSReg,
1515 RHSIsKill, /*SetFlags=*/true, WantResult);
1518 unsigned AArch64FastISel::emitSubs_rs(MVT RetVT, unsigned LHSReg,
1519 bool LHSIsKill, unsigned RHSReg,
1521 AArch64_AM::ShiftExtendType ShiftType,
1522 uint64_t ShiftImm, bool WantResult) {
1523 return emitAddSub_rs(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, RHSReg,
1524 RHSIsKill, ShiftType, ShiftImm, /*SetFlags=*/true,
1528 unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
1529 const Value *LHS, const Value *RHS) {
1530 // Canonicalize immediates to the RHS first.
1531 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
1532 std::swap(LHS, RHS);
1534 // Canonicalize mul by power-of-2 to the RHS.
1535 if (LHS->hasOneUse() && isValueAvailable(LHS))
1536 if (isMulPowOf2(LHS))
1537 std::swap(LHS, RHS);
1539 // Canonicalize shift immediate to the RHS.
1540 if (LHS->hasOneUse() && isValueAvailable(LHS))
1541 if (const auto *SI = dyn_cast<ShlOperator>(LHS))
1542 if (isa<ConstantInt>(SI->getOperand(1)))
1543 std::swap(LHS, RHS);
1545 unsigned LHSReg = getRegForValue(LHS);
1548 bool LHSIsKill = hasTrivialKill(LHS);
1550 unsigned ResultReg = 0;
1551 if (const auto *C = dyn_cast<ConstantInt>(RHS)) {
1552 uint64_t Imm = C->getZExtValue();
1553 ResultReg = emitLogicalOp_ri(ISDOpc, RetVT, LHSReg, LHSIsKill, Imm);
1558 // Check if the mul can be folded into the instruction.
1559 if (RHS->hasOneUse() && isValueAvailable(RHS)) {
1560 if (isMulPowOf2(RHS)) {
1561 const Value *MulLHS = cast<MulOperator>(RHS)->getOperand(0);
1562 const Value *MulRHS = cast<MulOperator>(RHS)->getOperand(1);
1564 if (const auto *C = dyn_cast<ConstantInt>(MulLHS))
1565 if (C->getValue().isPowerOf2())
1566 std::swap(MulLHS, MulRHS);
1568 assert(isa<ConstantInt>(MulRHS) && "Expected a ConstantInt.");
1569 uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
1571 unsigned RHSReg = getRegForValue(MulLHS);
1574 bool RHSIsKill = hasTrivialKill(MulLHS);
1575 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, LHSIsKill, RHSReg,
1576 RHSIsKill, ShiftVal);
1582 // Check if the shift can be folded into the instruction.
1583 if (RHS->hasOneUse() && isValueAvailable(RHS)) {
1584 if (const auto *SI = dyn_cast<ShlOperator>(RHS))
1585 if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1))) {
1586 uint64_t ShiftVal = C->getZExtValue();
1587 unsigned RHSReg = getRegForValue(SI->getOperand(0));
1590 bool RHSIsKill = hasTrivialKill(SI->getOperand(0));
1591 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, LHSIsKill, RHSReg,
1592 RHSIsKill, ShiftVal);
1598 unsigned RHSReg = getRegForValue(RHS);
1601 bool RHSIsKill = hasTrivialKill(RHS);
1603 MVT VT = std::max(MVT::i32, RetVT.SimpleTy);
1604 ResultReg = fastEmit_rr(VT, VT, ISDOpc, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1605 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
1606 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
1607 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
1612 unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT,
1613 unsigned LHSReg, bool LHSIsKill,
1615 assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR) &&
1616 "ISD nodes are not consecutive!");
1617 static const unsigned OpcTable[3][2] = {
1618 { AArch64::ANDWri, AArch64::ANDXri },
1619 { AArch64::ORRWri, AArch64::ORRXri },
1620 { AArch64::EORWri, AArch64::EORXri }
1622 const TargetRegisterClass *RC;
1625 switch (RetVT.SimpleTy) {
1632 unsigned Idx = ISDOpc - ISD::AND;
1633 Opc = OpcTable[Idx][0];
1634 RC = &AArch64::GPR32spRegClass;
1639 Opc = OpcTable[ISDOpc - ISD::AND][1];
1640 RC = &AArch64::GPR64spRegClass;
1645 if (!AArch64_AM::isLogicalImmediate(Imm, RegSize))
1648 unsigned ResultReg =
1649 fastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
1650 AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
1651 if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc != ISD::AND) {
1652 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
1653 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
1658 unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT,
1659 unsigned LHSReg, bool LHSIsKill,
1660 unsigned RHSReg, bool RHSIsKill,
1661 uint64_t ShiftImm) {
1662 assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR) &&
1663 "ISD nodes are not consecutive!");
1664 static const unsigned OpcTable[3][2] = {
1665 { AArch64::ANDWrs, AArch64::ANDXrs },
1666 { AArch64::ORRWrs, AArch64::ORRXrs },
1667 { AArch64::EORWrs, AArch64::EORXrs }
1670 // Don't deal with undefined shifts.
1671 if (ShiftImm >= RetVT.getSizeInBits())
1674 const TargetRegisterClass *RC;
1676 switch (RetVT.SimpleTy) {
1683 Opc = OpcTable[ISDOpc - ISD::AND][0];
1684 RC = &AArch64::GPR32RegClass;
1687 Opc = OpcTable[ISDOpc - ISD::AND][1];
1688 RC = &AArch64::GPR64RegClass;
1691 unsigned ResultReg =
1692 fastEmitInst_rri(Opc, RC, LHSReg, LHSIsKill, RHSReg, RHSIsKill,
1693 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm));
1694 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
1695 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
1696 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
1701 unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
1703 return emitLogicalOp_ri(ISD::AND, RetVT, LHSReg, LHSIsKill, Imm);
1706 unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
1707 bool WantZExt, MachineMemOperand *MMO) {
1708 if (!TLI.allowsMisalignedMemoryAccesses(VT))
1711 // Simplify this down to something we can handle.
1712 if (!simplifyAddress(Addr, VT))
1715 unsigned ScaleFactor = getImplicitScaleFactor(VT);
1717 llvm_unreachable("Unexpected value type.");
1719 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
1720 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
1721 bool UseScaled = true;
1722 if ((Addr.getOffset() < 0) || (Addr.getOffset() & (ScaleFactor - 1))) {
1727 static const unsigned GPOpcTable[2][8][4] = {
1729 { { AArch64::LDURSBWi, AArch64::LDURSHWi, AArch64::LDURWi,
1731 { AArch64::LDURSBXi, AArch64::LDURSHXi, AArch64::LDURSWi,
1733 { AArch64::LDRSBWui, AArch64::LDRSHWui, AArch64::LDRWui,
1735 { AArch64::LDRSBXui, AArch64::LDRSHXui, AArch64::LDRSWui,
1737 { AArch64::LDRSBWroX, AArch64::LDRSHWroX, AArch64::LDRWroX,
1739 { AArch64::LDRSBXroX, AArch64::LDRSHXroX, AArch64::LDRSWroX,
1741 { AArch64::LDRSBWroW, AArch64::LDRSHWroW, AArch64::LDRWroW,
1743 { AArch64::LDRSBXroW, AArch64::LDRSHXroW, AArch64::LDRSWroW,
1747 { { AArch64::LDURBBi, AArch64::LDURHHi, AArch64::LDURWi,
1749 { AArch64::LDURBBi, AArch64::LDURHHi, AArch64::LDURWi,
1751 { AArch64::LDRBBui, AArch64::LDRHHui, AArch64::LDRWui,
1753 { AArch64::LDRBBui, AArch64::LDRHHui, AArch64::LDRWui,
1755 { AArch64::LDRBBroX, AArch64::LDRHHroX, AArch64::LDRWroX,
1757 { AArch64::LDRBBroX, AArch64::LDRHHroX, AArch64::LDRWroX,
1759 { AArch64::LDRBBroW, AArch64::LDRHHroW, AArch64::LDRWroW,
1761 { AArch64::LDRBBroW, AArch64::LDRHHroW, AArch64::LDRWroW,
1766 static const unsigned FPOpcTable[4][2] = {
1767 { AArch64::LDURSi, AArch64::LDURDi },
1768 { AArch64::LDRSui, AArch64::LDRDui },
1769 { AArch64::LDRSroX, AArch64::LDRDroX },
1770 { AArch64::LDRSroW, AArch64::LDRDroW }
1774 const TargetRegisterClass *RC;
1775 bool UseRegOffset = Addr.isRegBase() && !Addr.getOffset() && Addr.getReg() &&
1776 Addr.getOffsetReg();
1777 unsigned Idx = UseRegOffset ? 2 : UseScaled ? 1 : 0;
1778 if (Addr.getExtendType() == AArch64_AM::UXTW ||
1779 Addr.getExtendType() == AArch64_AM::SXTW)
1782 bool IsRet64Bit = RetVT == MVT::i64;
1783 switch (VT.SimpleTy) {
1785 llvm_unreachable("Unexpected value type.");
1786 case MVT::i1: // Intentional fall-through.
1788 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][0];
1789 RC = (IsRet64Bit && !WantZExt) ?
1790 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1793 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][1];
1794 RC = (IsRet64Bit && !WantZExt) ?
1795 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1798 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][2];
1799 RC = (IsRet64Bit && !WantZExt) ?
1800 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1803 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][3];
1804 RC = &AArch64::GPR64RegClass;
1807 Opc = FPOpcTable[Idx][0];
1808 RC = &AArch64::FPR32RegClass;
1811 Opc = FPOpcTable[Idx][1];
1812 RC = &AArch64::FPR64RegClass;
1816 // Create the base instruction, then add the operands.
1817 unsigned ResultReg = createResultReg(RC);
1818 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1819 TII.get(Opc), ResultReg);
1820 addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, ScaleFactor, MMO);
1822 // Loading an i1 requires special handling.
1823 if (VT == MVT::i1) {
1824 unsigned ANDReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, 1);
1825 assert(ANDReg && "Unexpected AND instruction emission failure.");
1829 // For zero-extending loads to 64bit we emit a 32bit load and then convert
1830 // the 32bit reg to a 64bit reg.
1831 if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) {
1832 unsigned Reg64 = createResultReg(&AArch64::GPR64RegClass);
1833 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1834 TII.get(AArch64::SUBREG_TO_REG), Reg64)
1836 .addReg(ResultReg, getKillRegState(true))
1837 .addImm(AArch64::sub_32);
1843 bool AArch64FastISel::selectAddSub(const Instruction *I) {
1845 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true))
1849 return selectOperator(I, I->getOpcode());
1852 switch (I->getOpcode()) {
1854 llvm_unreachable("Unexpected instruction.");
1855 case Instruction::Add:
1856 ResultReg = emitAdd(VT, I->getOperand(0), I->getOperand(1));
1858 case Instruction::Sub:
1859 ResultReg = emitSub(VT, I->getOperand(0), I->getOperand(1));
1865 updateValueMap(I, ResultReg);
1869 bool AArch64FastISel::selectLogicalOp(const Instruction *I) {
1871 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true))
1875 return selectOperator(I, I->getOpcode());
1878 switch (I->getOpcode()) {
1880 llvm_unreachable("Unexpected instruction.");
1881 case Instruction::And:
1882 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
1884 case Instruction::Or:
1885 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
1887 case Instruction::Xor:
1888 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
1894 updateValueMap(I, ResultReg);
1898 bool AArch64FastISel::selectLoad(const Instruction *I) {
1900 // Verify we have a legal type before going any further. Currently, we handle
1901 // simple types that will directly fit in a register (i32/f32/i64/f64) or
1902 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
1903 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true) ||
1904 cast<LoadInst>(I)->isAtomic())
1907 // See if we can handle this address.
1909 if (!computeAddress(I->getOperand(0), Addr, I->getType()))
1912 // Fold the following sign-/zero-extend into the load instruction.
1913 bool WantZExt = true;
1915 const Value *IntExtVal = nullptr;
1916 if (I->hasOneUse()) {
1917 if (const auto *ZE = dyn_cast<ZExtInst>(I->use_begin()->getUser())) {
1918 if (isTypeSupported(ZE->getType(), RetVT))
1922 } else if (const auto *SE = dyn_cast<SExtInst>(I->use_begin()->getUser())) {
1923 if (isTypeSupported(SE->getType(), RetVT))
1931 unsigned ResultReg =
1932 emitLoad(VT, RetVT, Addr, WantZExt, createMachineMemOperandFor(I));
1936 // There are a few different cases we have to handle, because the load or the
1937 // sign-/zero-extend might not be selected by FastISel if we fall-back to
1938 // SelectionDAG. There is also an ordering issue when both instructions are in
1939 // different basic blocks.
1940 // 1.) The load instruction is selected by FastISel, but the integer extend
1941 // not. This usually happens when the integer extend is in a different
1942 // basic block and SelectionDAG took over for that basic block.
1943 // 2.) The load instruction is selected before the integer extend. This only
1944 // happens when the integer extend is in a different basic block.
1945 // 3.) The load instruction is selected by SelectionDAG and the integer extend
1946 // by FastISel. This happens if there are instructions between the load
1947 // and the integer extend that couldn't be selected by FastISel.
1949 // The integer extend hasn't been emitted yet. FastISel or SelectionDAG
1950 // could select it. Emit a copy to subreg if necessary. FastISel will remove
1951 // it when it selects the integer extend.
1952 unsigned Reg = lookUpRegForValue(IntExtVal);
1953 auto *MI = MRI.getUniqueVRegDef(Reg);
1955 if (RetVT == MVT::i64 && VT <= MVT::i32) {
1957 // Delete the last emitted instruction from emitLoad (SUBREG_TO_REG).
1958 std::prev(FuncInfo.InsertPt)->eraseFromParent();
1959 ResultReg = std::prev(FuncInfo.InsertPt)->getOperand(0).getReg();
1961 ResultReg = fastEmitInst_extractsubreg(MVT::i32, ResultReg,
1965 updateValueMap(I, ResultReg);
1969 // The integer extend has already been emitted - delete all the instructions
1970 // that have been emitted by the integer extend lowering code and use the
1971 // result from the load instruction directly.
1974 for (auto &Opnd : MI->uses()) {
1976 Reg = Opnd.getReg();
1980 MI->eraseFromParent();
1983 MI = MRI.getUniqueVRegDef(Reg);
1985 updateValueMap(IntExtVal, ResultReg);
1989 updateValueMap(I, ResultReg);
1993 bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
1994 MachineMemOperand *MMO) {
1995 if (!TLI.allowsMisalignedMemoryAccesses(VT))
1998 // Simplify this down to something we can handle.
1999 if (!simplifyAddress(Addr, VT))
2002 unsigned ScaleFactor = getImplicitScaleFactor(VT);
2004 llvm_unreachable("Unexpected value type.");
2006 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
2007 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
2008 bool UseScaled = true;
2009 if ((Addr.getOffset() < 0) || (Addr.getOffset() & (ScaleFactor - 1))) {
2014 static const unsigned OpcTable[4][6] = {
2015 { AArch64::STURBBi, AArch64::STURHHi, AArch64::STURWi, AArch64::STURXi,
2016 AArch64::STURSi, AArch64::STURDi },
2017 { AArch64::STRBBui, AArch64::STRHHui, AArch64::STRWui, AArch64::STRXui,
2018 AArch64::STRSui, AArch64::STRDui },
2019 { AArch64::STRBBroX, AArch64::STRHHroX, AArch64::STRWroX, AArch64::STRXroX,
2020 AArch64::STRSroX, AArch64::STRDroX },
2021 { AArch64::STRBBroW, AArch64::STRHHroW, AArch64::STRWroW, AArch64::STRXroW,
2022 AArch64::STRSroW, AArch64::STRDroW }
2026 bool VTIsi1 = false;
2027 bool UseRegOffset = Addr.isRegBase() && !Addr.getOffset() && Addr.getReg() &&
2028 Addr.getOffsetReg();
2029 unsigned Idx = UseRegOffset ? 2 : UseScaled ? 1 : 0;
2030 if (Addr.getExtendType() == AArch64_AM::UXTW ||
2031 Addr.getExtendType() == AArch64_AM::SXTW)
2034 switch (VT.SimpleTy) {
2035 default: llvm_unreachable("Unexpected value type.");
2036 case MVT::i1: VTIsi1 = true;
2037 case MVT::i8: Opc = OpcTable[Idx][0]; break;
2038 case MVT::i16: Opc = OpcTable[Idx][1]; break;
2039 case MVT::i32: Opc = OpcTable[Idx][2]; break;
2040 case MVT::i64: Opc = OpcTable[Idx][3]; break;
2041 case MVT::f32: Opc = OpcTable[Idx][4]; break;
2042 case MVT::f64: Opc = OpcTable[Idx][5]; break;
2045 // Storing an i1 requires special handling.
2046 if (VTIsi1 && SrcReg != AArch64::WZR) {
2047 unsigned ANDReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1);
2048 assert(ANDReg && "Unexpected AND instruction emission failure.");
2051 // Create the base instruction, then add the operands.
2052 const MCInstrDesc &II = TII.get(Opc);
2053 SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs());
2054 MachineInstrBuilder MIB =
2055 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(SrcReg);
2056 addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, ScaleFactor, MMO);
2061 bool AArch64FastISel::selectStore(const Instruction *I) {
2063 const Value *Op0 = I->getOperand(0);
2064 // Verify we have a legal type before going any further. Currently, we handle
2065 // simple types that will directly fit in a register (i32/f32/i64/f64) or
2066 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
2067 if (!isTypeSupported(Op0->getType(), VT, /*IsVectorAllowed=*/true) ||
2068 cast<StoreInst>(I)->isAtomic())
2071 // Get the value to be stored into a register. Use the zero register directly
2072 // when possible to avoid an unnecessary copy and a wasted register.
2073 unsigned SrcReg = 0;
2074 if (const auto *CI = dyn_cast<ConstantInt>(Op0)) {
2076 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
2077 } else if (const auto *CF = dyn_cast<ConstantFP>(Op0)) {
2078 if (CF->isZero() && !CF->isNegative()) {
2079 VT = MVT::getIntegerVT(VT.getSizeInBits());
2080 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
2085 SrcReg = getRegForValue(Op0);
2090 // See if we can handle this address.
2092 if (!computeAddress(I->getOperand(1), Addr, I->getOperand(0)->getType()))
2095 if (!emitStore(VT, SrcReg, Addr, createMachineMemOperandFor(I)))
2100 static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) {
2102 case CmpInst::FCMP_ONE:
2103 case CmpInst::FCMP_UEQ:
2105 // AL is our "false" for now. The other two need more compares.
2106 return AArch64CC::AL;
2107 case CmpInst::ICMP_EQ:
2108 case CmpInst::FCMP_OEQ:
2109 return AArch64CC::EQ;
2110 case CmpInst::ICMP_SGT:
2111 case CmpInst::FCMP_OGT:
2112 return AArch64CC::GT;
2113 case CmpInst::ICMP_SGE:
2114 case CmpInst::FCMP_OGE:
2115 return AArch64CC::GE;
2116 case CmpInst::ICMP_UGT:
2117 case CmpInst::FCMP_UGT:
2118 return AArch64CC::HI;
2119 case CmpInst::FCMP_OLT:
2120 return AArch64CC::MI;
2121 case CmpInst::ICMP_ULE:
2122 case CmpInst::FCMP_OLE:
2123 return AArch64CC::LS;
2124 case CmpInst::FCMP_ORD:
2125 return AArch64CC::VC;
2126 case CmpInst::FCMP_UNO:
2127 return AArch64CC::VS;
2128 case CmpInst::FCMP_UGE:
2129 return AArch64CC::PL;
2130 case CmpInst::ICMP_SLT:
2131 case CmpInst::FCMP_ULT:
2132 return AArch64CC::LT;
2133 case CmpInst::ICMP_SLE:
2134 case CmpInst::FCMP_ULE:
2135 return AArch64CC::LE;
2136 case CmpInst::FCMP_UNE:
2137 case CmpInst::ICMP_NE:
2138 return AArch64CC::NE;
2139 case CmpInst::ICMP_UGE:
2140 return AArch64CC::HS;
2141 case CmpInst::ICMP_ULT:
2142 return AArch64CC::LO;
2146 /// \brief Try to emit a combined compare-and-branch instruction.
2147 bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
2148 assert(isa<CmpInst>(BI->getCondition()) && "Expected cmp instruction");
2149 const CmpInst *CI = cast<CmpInst>(BI->getCondition());
2150 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2152 const Value *LHS = CI->getOperand(0);
2153 const Value *RHS = CI->getOperand(1);
2156 if (!isTypeSupported(LHS->getType(), VT))
2159 unsigned BW = VT.getSizeInBits();
2163 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
2164 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
2166 // Try to take advantage of fallthrough opportunities.
2167 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
2168 std::swap(TBB, FBB);
2169 Predicate = CmpInst::getInversePredicate(Predicate);
2174 switch (Predicate) {
2177 case CmpInst::ICMP_EQ:
2178 case CmpInst::ICMP_NE:
2179 if (isa<Constant>(LHS) && cast<Constant>(LHS)->isNullValue())
2180 std::swap(LHS, RHS);
2182 if (!isa<Constant>(RHS) || !cast<Constant>(RHS)->isNullValue())
2185 if (const auto *AI = dyn_cast<BinaryOperator>(LHS))
2186 if (AI->getOpcode() == Instruction::And && isValueAvailable(AI)) {
2187 const Value *AndLHS = AI->getOperand(0);
2188 const Value *AndRHS = AI->getOperand(1);
2190 if (const auto *C = dyn_cast<ConstantInt>(AndLHS))
2191 if (C->getValue().isPowerOf2())
2192 std::swap(AndLHS, AndRHS);
2194 if (const auto *C = dyn_cast<ConstantInt>(AndRHS))
2195 if (C->getValue().isPowerOf2()) {
2196 TestBit = C->getValue().logBase2();
2204 IsCmpNE = Predicate == CmpInst::ICMP_NE;
2206 case CmpInst::ICMP_SLT:
2207 case CmpInst::ICMP_SGE:
2208 if (!isa<Constant>(RHS) || !cast<Constant>(RHS)->isNullValue())
2212 IsCmpNE = Predicate == CmpInst::ICMP_SLT;
2214 case CmpInst::ICMP_SGT:
2215 case CmpInst::ICMP_SLE:
2216 if (!isa<ConstantInt>(RHS))
2219 if (cast<ConstantInt>(RHS)->getValue() != APInt(BW, -1, true))
2223 IsCmpNE = Predicate == CmpInst::ICMP_SLE;
2227 static const unsigned OpcTable[2][2][2] = {
2228 { {AArch64::CBZW, AArch64::CBZX },
2229 {AArch64::CBNZW, AArch64::CBNZX} },
2230 { {AArch64::TBZW, AArch64::TBZX },
2231 {AArch64::TBNZW, AArch64::TBNZX} }
2234 bool IsBitTest = TestBit != -1;
2235 bool Is64Bit = BW == 64;
2236 if (TestBit < 32 && TestBit >= 0)
2239 unsigned Opc = OpcTable[IsBitTest][IsCmpNE][Is64Bit];
2240 const MCInstrDesc &II = TII.get(Opc);
2242 unsigned SrcReg = getRegForValue(LHS);
2245 bool SrcIsKill = hasTrivialKill(LHS);
2247 if (BW == 64 && !Is64Bit)
2248 SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
2251 if ((BW < 32) && !IsBitTest)
2252 SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*IsZExt=*/true);
2254 // Emit the combined compare and branch instruction.
2255 SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs());
2256 MachineInstrBuilder MIB =
2257 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
2258 .addReg(SrcReg, getKillRegState(SrcIsKill));
2260 MIB.addImm(TestBit);
2263 finishCondBranch(BI->getParent(), TBB, FBB);
2267 bool AArch64FastISel::selectBranch(const Instruction *I) {
2268 const BranchInst *BI = cast<BranchInst>(I);
2269 if (BI->isUnconditional()) {
2270 MachineBasicBlock *MSucc = FuncInfo.MBBMap[BI->getSuccessor(0)];
2271 fastEmitBranch(MSucc, BI->getDebugLoc());
2275 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
2276 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
2278 AArch64CC::CondCode CC = AArch64CC::NE;
2279 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
2280 if (CI->hasOneUse() && isValueAvailable(CI)) {
2281 // Try to optimize or fold the cmp.
2282 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2283 switch (Predicate) {
2286 case CmpInst::FCMP_FALSE:
2287 fastEmitBranch(FBB, DbgLoc);
2289 case CmpInst::FCMP_TRUE:
2290 fastEmitBranch(TBB, DbgLoc);
2294 // Try to emit a combined compare-and-branch first.
2295 if (emitCompareAndBranch(BI))
2298 // Try to take advantage of fallthrough opportunities.
2299 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
2300 std::swap(TBB, FBB);
2301 Predicate = CmpInst::getInversePredicate(Predicate);
2305 if (!emitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
2308 // FCMP_UEQ and FCMP_ONE cannot be checked with a single branch
2310 CC = getCompareCC(Predicate);
2311 AArch64CC::CondCode ExtraCC = AArch64CC::AL;
2312 switch (Predicate) {
2315 case CmpInst::FCMP_UEQ:
2316 ExtraCC = AArch64CC::EQ;
2319 case CmpInst::FCMP_ONE:
2320 ExtraCC = AArch64CC::MI;
2324 assert((CC != AArch64CC::AL) && "Unexpected condition code.");
2326 // Emit the extra branch for FCMP_UEQ and FCMP_ONE.
2327 if (ExtraCC != AArch64CC::AL) {
2328 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
2334 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
2338 finishCondBranch(BI->getParent(), TBB, FBB);
2341 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
2343 if (TI->hasOneUse() && isValueAvailable(TI) &&
2344 isTypeSupported(TI->getOperand(0)->getType(), SrcVT)) {
2345 unsigned CondReg = getRegForValue(TI->getOperand(0));
2348 bool CondIsKill = hasTrivialKill(TI->getOperand(0));
2350 // Issue an extract_subreg to get the lower 32-bits.
2351 if (SrcVT == MVT::i64) {
2352 CondReg = fastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
2357 unsigned ANDReg = emitAnd_ri(MVT::i32, CondReg, CondIsKill, 1);
2358 assert(ANDReg && "Unexpected AND instruction emission failure.");
2359 emitICmp_ri(MVT::i32, ANDReg, /*IsKill=*/true, 0);
2361 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
2362 std::swap(TBB, FBB);
2365 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
2369 finishCondBranch(BI->getParent(), TBB, FBB);
2372 } else if (const auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
2373 uint64_t Imm = CI->getZExtValue();
2374 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
2375 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B))
2378 // Obtain the branch weight and add the target to the successor list.
2380 uint32_t BranchWeight =
2381 FuncInfo.BPI->getEdgeWeight(BI->getParent(), Target->getBasicBlock());
2382 FuncInfo.MBB->addSuccessor(Target, BranchWeight);
2384 FuncInfo.MBB->addSuccessorWithoutWeight(Target);
2386 } else if (foldXALUIntrinsic(CC, I, BI->getCondition())) {
2387 // Fake request the condition, otherwise the intrinsic might be completely
2389 unsigned CondReg = getRegForValue(BI->getCondition());
2394 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
2398 finishCondBranch(BI->getParent(), TBB, FBB);
2402 unsigned CondReg = getRegForValue(BI->getCondition());
2405 bool CondRegIsKill = hasTrivialKill(BI->getCondition());
2407 // We've been divorced from our compare! Our block was split, and
2408 // now our compare lives in a predecessor block. We musn't
2409 // re-compare here, as the children of the compare aren't guaranteed
2410 // live across the block boundary (we *could* check for this).
2411 // Regardless, the compare has been done in the predecessor block,
2412 // and it left a value for us in a virtual register. Ergo, we test
2413 // the one-bit value left in the virtual register.
2415 // FIXME: Optimize this with TBZW/TBZNW.
2416 unsigned ANDReg = emitAnd_ri(MVT::i32, CondReg, CondRegIsKill, 1);
2417 assert(ANDReg && "Unexpected AND instruction emission failure.");
2418 emitICmp_ri(MVT::i32, ANDReg, /*IsKill=*/true, 0);
2420 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
2421 std::swap(TBB, FBB);
2425 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
2429 finishCondBranch(BI->getParent(), TBB, FBB);
2433 bool AArch64FastISel::selectIndirectBr(const Instruction *I) {
2434 const IndirectBrInst *BI = cast<IndirectBrInst>(I);
2435 unsigned AddrReg = getRegForValue(BI->getOperand(0));
2439 // Emit the indirect branch.
2440 const MCInstrDesc &II = TII.get(AArch64::BR);
2441 AddrReg = constrainOperandRegClass(II, AddrReg, II.getNumDefs());
2442 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(AddrReg);
2444 // Make sure the CFG is up-to-date.
2445 for (auto *Succ : BI->successors())
2446 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[Succ]);
2451 bool AArch64FastISel::selectCmp(const Instruction *I) {
2452 const CmpInst *CI = cast<CmpInst>(I);
2454 // Vectors of i1 are weird: bail out.
2455 if (CI->getType()->isVectorTy())
2458 // Try to optimize or fold the cmp.
2459 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2460 unsigned ResultReg = 0;
2461 switch (Predicate) {
2464 case CmpInst::FCMP_FALSE:
2465 ResultReg = createResultReg(&AArch64::GPR32RegClass);
2466 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2467 TII.get(TargetOpcode::COPY), ResultReg)
2468 .addReg(AArch64::WZR, getKillRegState(true));
2470 case CmpInst::FCMP_TRUE:
2471 ResultReg = fastEmit_i(MVT::i32, MVT::i32, ISD::Constant, 1);
2476 updateValueMap(I, ResultReg);
2481 if (!emitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
2484 ResultReg = createResultReg(&AArch64::GPR32RegClass);
2486 // FCMP_UEQ and FCMP_ONE cannot be checked with a single instruction. These
2487 // condition codes are inverted, because they are used by CSINC.
2488 static unsigned CondCodeTable[2][2] = {
2489 { AArch64CC::NE, AArch64CC::VC },
2490 { AArch64CC::PL, AArch64CC::LE }
2492 unsigned *CondCodes = nullptr;
2493 switch (Predicate) {
2496 case CmpInst::FCMP_UEQ:
2497 CondCodes = &CondCodeTable[0][0];
2499 case CmpInst::FCMP_ONE:
2500 CondCodes = &CondCodeTable[1][0];
2505 unsigned TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
2506 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
2508 .addReg(AArch64::WZR, getKillRegState(true))
2509 .addReg(AArch64::WZR, getKillRegState(true))
2510 .addImm(CondCodes[0]);
2511 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
2513 .addReg(TmpReg1, getKillRegState(true))
2514 .addReg(AArch64::WZR, getKillRegState(true))
2515 .addImm(CondCodes[1]);
2517 updateValueMap(I, ResultReg);
2521 // Now set a register based on the comparison.
2522 AArch64CC::CondCode CC = getCompareCC(Predicate);
2523 assert((CC != AArch64CC::AL) && "Unexpected condition code.");
2524 AArch64CC::CondCode invertedCC = getInvertedCondCode(CC);
2525 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
2527 .addReg(AArch64::WZR, getKillRegState(true))
2528 .addReg(AArch64::WZR, getKillRegState(true))
2529 .addImm(invertedCC);
2531 updateValueMap(I, ResultReg);
2535 /// \brief Optimize selects of i1 if one of the operands has a 'true' or 'false'
2537 bool AArch64FastISel::optimizeSelect(const SelectInst *SI) {
2538 if (!SI->getType()->isIntegerTy(1))
2541 const Value *Src1Val, *Src2Val;
2543 bool NeedExtraOp = false;
2544 if (auto *CI = dyn_cast<ConstantInt>(SI->getTrueValue())) {
2546 Src1Val = SI->getCondition();
2547 Src2Val = SI->getFalseValue();
2548 Opc = AArch64::ORRWrr;
2550 assert(CI->isZero());
2551 Src1Val = SI->getFalseValue();
2552 Src2Val = SI->getCondition();
2553 Opc = AArch64::BICWrr;
2555 } else if (auto *CI = dyn_cast<ConstantInt>(SI->getFalseValue())) {
2557 Src1Val = SI->getCondition();
2558 Src2Val = SI->getTrueValue();
2559 Opc = AArch64::ORRWrr;
2562 assert(CI->isZero());
2563 Src1Val = SI->getCondition();
2564 Src2Val = SI->getTrueValue();
2565 Opc = AArch64::ANDWrr;
2572 unsigned Src1Reg = getRegForValue(Src1Val);
2575 bool Src1IsKill = hasTrivialKill(Src1Val);
2577 unsigned Src2Reg = getRegForValue(Src2Val);
2580 bool Src2IsKill = hasTrivialKill(Src2Val);
2583 Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, Src1IsKill, 1);
2586 unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg,
2587 Src1IsKill, Src2Reg, Src2IsKill);
2588 updateValueMap(SI, ResultReg);
2592 bool AArch64FastISel::selectSelect(const Instruction *I) {
2593 assert(isa<SelectInst>(I) && "Expected a select instruction.");
2595 if (!isTypeSupported(I->getType(), VT))
2599 const TargetRegisterClass *RC;
2600 switch (VT.SimpleTy) {
2607 Opc = AArch64::CSELWr;
2608 RC = &AArch64::GPR32RegClass;
2611 Opc = AArch64::CSELXr;
2612 RC = &AArch64::GPR64RegClass;
2615 Opc = AArch64::FCSELSrrr;
2616 RC = &AArch64::FPR32RegClass;
2619 Opc = AArch64::FCSELDrrr;
2620 RC = &AArch64::FPR64RegClass;
2624 const SelectInst *SI = cast<SelectInst>(I);
2625 const Value *Cond = SI->getCondition();
2626 AArch64CC::CondCode CC = AArch64CC::NE;
2627 AArch64CC::CondCode ExtraCC = AArch64CC::AL;
2629 if (optimizeSelect(SI))
2632 // Try to pickup the flags, so we don't have to emit another compare.
2633 if (foldXALUIntrinsic(CC, I, Cond)) {
2634 // Fake request the condition to force emission of the XALU intrinsic.
2635 unsigned CondReg = getRegForValue(Cond);
2638 } else if (isa<CmpInst>(Cond) && cast<CmpInst>(Cond)->hasOneUse() &&
2639 isValueAvailable(Cond)) {
2640 const auto *Cmp = cast<CmpInst>(Cond);
2641 // Try to optimize or fold the cmp.
2642 CmpInst::Predicate Predicate = optimizeCmpPredicate(Cmp);
2643 const Value *FoldSelect = nullptr;
2644 switch (Predicate) {
2647 case CmpInst::FCMP_FALSE:
2648 FoldSelect = SI->getFalseValue();
2650 case CmpInst::FCMP_TRUE:
2651 FoldSelect = SI->getTrueValue();
2656 unsigned SrcReg = getRegForValue(FoldSelect);
2659 unsigned UseReg = lookUpRegForValue(SI);
2661 MRI.clearKillFlags(UseReg);
2663 updateValueMap(I, SrcReg);
2668 if (!emitCmp(Cmp->getOperand(0), Cmp->getOperand(1), Cmp->isUnsigned()))
2671 // FCMP_UEQ and FCMP_ONE cannot be checked with a single select instruction.
2672 CC = getCompareCC(Predicate);
2673 switch (Predicate) {
2676 case CmpInst::FCMP_UEQ:
2677 ExtraCC = AArch64CC::EQ;
2680 case CmpInst::FCMP_ONE:
2681 ExtraCC = AArch64CC::MI;
2685 assert((CC != AArch64CC::AL) && "Unexpected condition code.");
2687 unsigned CondReg = getRegForValue(Cond);
2690 bool CondIsKill = hasTrivialKill(Cond);
2692 const MCInstrDesc &II = TII.get(AArch64::ANDSWri);
2693 CondReg = constrainOperandRegClass(II, CondReg, 1);
2695 // Emit a TST instruction (ANDS wzr, reg, #imm).
2696 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
2698 .addReg(CondReg, getKillRegState(CondIsKill))
2699 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
2702 unsigned Src1Reg = getRegForValue(SI->getTrueValue());
2703 bool Src1IsKill = hasTrivialKill(SI->getTrueValue());
2705 unsigned Src2Reg = getRegForValue(SI->getFalseValue());
2706 bool Src2IsKill = hasTrivialKill(SI->getFalseValue());
2708 if (!Src1Reg || !Src2Reg)
2711 if (ExtraCC != AArch64CC::AL) {
2712 Src2Reg = fastEmitInst_rri(Opc, RC, Src1Reg, Src1IsKill, Src2Reg,
2713 Src2IsKill, ExtraCC);
2716 unsigned ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src1IsKill, Src2Reg,
2718 updateValueMap(I, ResultReg);
2722 bool AArch64FastISel::selectFPExt(const Instruction *I) {
2723 Value *V = I->getOperand(0);
2724 if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy())
2727 unsigned Op = getRegForValue(V);
2731 unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
2732 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
2733 ResultReg).addReg(Op);
2734 updateValueMap(I, ResultReg);
2738 bool AArch64FastISel::selectFPTrunc(const Instruction *I) {
2739 Value *V = I->getOperand(0);
2740 if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy())
2743 unsigned Op = getRegForValue(V);
2747 unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
2748 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
2749 ResultReg).addReg(Op);
2750 updateValueMap(I, ResultReg);
2754 // FPToUI and FPToSI
2755 bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) {
2757 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
2760 unsigned SrcReg = getRegForValue(I->getOperand(0));
2764 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType(), true);
2765 if (SrcVT == MVT::f128)
2769 if (SrcVT == MVT::f64) {
2771 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr;
2773 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr;
2776 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr;
2778 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
2780 unsigned ResultReg = createResultReg(
2781 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
2782 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2784 updateValueMap(I, ResultReg);
2788 bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) {
2790 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
2792 assert ((DestVT == MVT::f32 || DestVT == MVT::f64) &&
2793 "Unexpected value type.");
2795 unsigned SrcReg = getRegForValue(I->getOperand(0));
2798 bool SrcIsKill = hasTrivialKill(I->getOperand(0));
2800 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType(), true);
2802 // Handle sign-extension.
2803 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
2805 emitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed);
2812 if (SrcVT == MVT::i64) {
2814 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri;
2816 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri;
2819 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri;
2821 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
2824 unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
2826 updateValueMap(I, ResultReg);
2830 bool AArch64FastISel::fastLowerArguments() {
2831 if (!FuncInfo.CanLowerReturn)
2834 const Function *F = FuncInfo.Fn;
2838 CallingConv::ID CC = F->getCallingConv();
2839 if (CC != CallingConv::C)
2842 // Only handle simple cases of up to 8 GPR and FPR each.
2843 unsigned GPRCnt = 0;
2844 unsigned FPRCnt = 0;
2846 for (auto const &Arg : F->args()) {
2847 // The first argument is at index 1.
2849 if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
2850 F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
2851 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
2852 F->getAttributes().hasAttribute(Idx, Attribute::Nest))
2855 Type *ArgTy = Arg.getType();
2856 if (ArgTy->isStructTy() || ArgTy->isArrayTy())
2859 EVT ArgVT = TLI.getValueType(DL, ArgTy);
2860 if (!ArgVT.isSimple())
2863 MVT VT = ArgVT.getSimpleVT().SimpleTy;
2864 if (VT.isFloatingPoint() && !Subtarget->hasFPARMv8())
2867 if (VT.isVector() &&
2868 (!Subtarget->hasNEON() || !Subtarget->isLittleEndian()))
2871 if (VT >= MVT::i1 && VT <= MVT::i64)
2873 else if ((VT >= MVT::f16 && VT <= MVT::f64) || VT.is64BitVector() ||
2874 VT.is128BitVector())
2879 if (GPRCnt > 8 || FPRCnt > 8)
2883 static const MCPhysReg Registers[6][8] = {
2884 { AArch64::W0, AArch64::W1, AArch64::W2, AArch64::W3, AArch64::W4,
2885 AArch64::W5, AArch64::W6, AArch64::W7 },
2886 { AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4,
2887 AArch64::X5, AArch64::X6, AArch64::X7 },
2888 { AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4,
2889 AArch64::H5, AArch64::H6, AArch64::H7 },
2890 { AArch64::S0, AArch64::S1, AArch64::S2, AArch64::S3, AArch64::S4,
2891 AArch64::S5, AArch64::S6, AArch64::S7 },
2892 { AArch64::D0, AArch64::D1, AArch64::D2, AArch64::D3, AArch64::D4,
2893 AArch64::D5, AArch64::D6, AArch64::D7 },
2894 { AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
2895 AArch64::Q5, AArch64::Q6, AArch64::Q7 }
2898 unsigned GPRIdx = 0;
2899 unsigned FPRIdx = 0;
2900 for (auto const &Arg : F->args()) {
2901 MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
2903 const TargetRegisterClass *RC;
2904 if (VT >= MVT::i1 && VT <= MVT::i32) {
2905 SrcReg = Registers[0][GPRIdx++];
2906 RC = &AArch64::GPR32RegClass;
2908 } else if (VT == MVT::i64) {
2909 SrcReg = Registers[1][GPRIdx++];
2910 RC = &AArch64::GPR64RegClass;
2911 } else if (VT == MVT::f16) {
2912 SrcReg = Registers[2][FPRIdx++];
2913 RC = &AArch64::FPR16RegClass;
2914 } else if (VT == MVT::f32) {
2915 SrcReg = Registers[3][FPRIdx++];
2916 RC = &AArch64::FPR32RegClass;
2917 } else if ((VT == MVT::f64) || VT.is64BitVector()) {
2918 SrcReg = Registers[4][FPRIdx++];
2919 RC = &AArch64::FPR64RegClass;
2920 } else if (VT.is128BitVector()) {
2921 SrcReg = Registers[5][FPRIdx++];
2922 RC = &AArch64::FPR128RegClass;
2924 llvm_unreachable("Unexpected value type.");
2926 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
2927 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
2928 // Without this, EmitLiveInCopies may eliminate the livein if its only
2929 // use is a bitcast (which isn't turned into an instruction).
2930 unsigned ResultReg = createResultReg(RC);
2931 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2932 TII.get(TargetOpcode::COPY), ResultReg)
2933 .addReg(DstReg, getKillRegState(true));
2934 updateValueMap(&Arg, ResultReg);
2939 bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
2940 SmallVectorImpl<MVT> &OutVTs,
2941 unsigned &NumBytes) {
2942 CallingConv::ID CC = CLI.CallConv;
2943 SmallVector<CCValAssign, 16> ArgLocs;
2944 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
2945 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
2947 // Get a count of how many bytes are to be pushed on the stack.
2948 NumBytes = CCInfo.getNextStackOffset();
2950 // Issue CALLSEQ_START
2951 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
2952 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
2955 // Process the args.
2956 for (CCValAssign &VA : ArgLocs) {
2957 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
2958 MVT ArgVT = OutVTs[VA.getValNo()];
2960 unsigned ArgReg = getRegForValue(ArgVal);
2964 // Handle arg promotion: SExt, ZExt, AExt.
2965 switch (VA.getLocInfo()) {
2966 case CCValAssign::Full:
2968 case CCValAssign::SExt: {
2969 MVT DestVT = VA.getLocVT();
2971 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
2976 case CCValAssign::AExt:
2977 // Intentional fall-through.
2978 case CCValAssign::ZExt: {
2979 MVT DestVT = VA.getLocVT();
2981 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
2987 llvm_unreachable("Unknown arg promotion!");
2990 // Now copy/store arg to correct locations.
2991 if (VA.isRegLoc() && !VA.needsCustom()) {
2992 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2993 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
2994 CLI.OutRegs.push_back(VA.getLocReg());
2995 } else if (VA.needsCustom()) {
2996 // FIXME: Handle custom args.
2999 assert(VA.isMemLoc() && "Assuming store on stack.");
3001 // Don't emit stores for undef values.
3002 if (isa<UndefValue>(ArgVal))
3005 // Need to store on the stack.
3006 unsigned ArgSize = (ArgVT.getSizeInBits() + 7) / 8;
3008 unsigned BEAlign = 0;
3009 if (ArgSize < 8 && !Subtarget->isLittleEndian())
3010 BEAlign = 8 - ArgSize;
3013 Addr.setKind(Address::RegBase);
3014 Addr.setReg(AArch64::SP);
3015 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
3017 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
3018 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3019 MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
3020 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
3022 if (!emitStore(ArgVT, ArgReg, Addr, MMO))
3029 bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
3030 unsigned NumBytes) {
3031 CallingConv::ID CC = CLI.CallConv;
3033 // Issue CALLSEQ_END
3034 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
3035 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
3036 .addImm(NumBytes).addImm(0);
3038 // Now the return value.
3039 if (RetVT != MVT::isVoid) {
3040 SmallVector<CCValAssign, 16> RVLocs;
3041 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
3042 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC));
3044 // Only handle a single return value.
3045 if (RVLocs.size() != 1)
3048 // Copy all of the result registers out of their specified physreg.
3049 MVT CopyVT = RVLocs[0].getValVT();
3051 // TODO: Handle big-endian results
3052 if (CopyVT.isVector() && !Subtarget->isLittleEndian())
3055 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
3056 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3057 TII.get(TargetOpcode::COPY), ResultReg)
3058 .addReg(RVLocs[0].getLocReg());
3059 CLI.InRegs.push_back(RVLocs[0].getLocReg());
3061 CLI.ResultReg = ResultReg;
3062 CLI.NumResultRegs = 1;
3068 bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3069 CallingConv::ID CC = CLI.CallConv;
3070 bool IsTailCall = CLI.IsTailCall;
3071 bool IsVarArg = CLI.IsVarArg;
3072 const Value *Callee = CLI.Callee;
3073 MCSymbol *Symbol = CLI.Symbol;
3075 if (!Callee && !Symbol)
3078 // Allow SelectionDAG isel to handle tail calls.
3082 CodeModel::Model CM = TM.getCodeModel();
3083 // Only support the small and large code model.
3084 if (CM != CodeModel::Small && CM != CodeModel::Large)
3087 // FIXME: Add large code model support for ELF.
3088 if (CM == CodeModel::Large && !Subtarget->isTargetMachO())
3091 // Let SDISel handle vararg functions.
3095 // FIXME: Only handle *simple* calls for now.
3097 if (CLI.RetTy->isVoidTy())
3098 RetVT = MVT::isVoid;
3099 else if (!isTypeLegal(CLI.RetTy, RetVT))
3102 for (auto Flag : CLI.OutFlags)
3103 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
3106 // Set up the argument vectors.
3107 SmallVector<MVT, 16> OutVTs;
3108 OutVTs.reserve(CLI.OutVals.size());
3110 for (auto *Val : CLI.OutVals) {
3112 if (!isTypeLegal(Val->getType(), VT) &&
3113 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
3116 // We don't handle vector parameters yet.
3117 if (VT.isVector() || VT.getSizeInBits() > 64)
3120 OutVTs.push_back(VT);
3124 if (Callee && !computeCallAddress(Callee, Addr))
3127 // Handle the arguments now that we've gotten them.
3129 if (!processCallArgs(CLI, OutVTs, NumBytes))
3133 MachineInstrBuilder MIB;
3134 if (CM == CodeModel::Small) {
3135 const MCInstrDesc &II = TII.get(Addr.getReg() ? AArch64::BLR : AArch64::BL);
3136 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II);
3138 MIB.addSym(Symbol, 0);
3139 else if (Addr.getGlobalValue())
3140 MIB.addGlobalAddress(Addr.getGlobalValue(), 0, 0);
3141 else if (Addr.getReg()) {
3142 unsigned Reg = constrainOperandRegClass(II, Addr.getReg(), 0);
3147 unsigned CallReg = 0;
3149 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
3150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
3152 .addSym(Symbol, AArch64II::MO_GOT | AArch64II::MO_PAGE);
3154 CallReg = createResultReg(&AArch64::GPR64RegClass);
3155 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3156 TII.get(AArch64::LDRXui), CallReg)
3159 AArch64II::MO_GOT | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
3160 } else if (Addr.getGlobalValue())
3161 CallReg = materializeGV(Addr.getGlobalValue());
3162 else if (Addr.getReg())
3163 CallReg = Addr.getReg();
3168 const MCInstrDesc &II = TII.get(AArch64::BLR);
3169 CallReg = constrainOperandRegClass(II, CallReg, 0);
3170 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(CallReg);
3173 // Add implicit physical register uses to the call.
3174 for (auto Reg : CLI.OutRegs)
3175 MIB.addReg(Reg, RegState::Implicit);
3177 // Add a register mask with the call-preserved registers.
3178 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3179 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
3183 // Finish off the call including any return values.
3184 return finishCall(CLI, RetVT, NumBytes);
3187 bool AArch64FastISel::isMemCpySmall(uint64_t Len, unsigned Alignment) {
3189 return Len / Alignment <= 4;
3194 bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest, Address Src,
3195 uint64_t Len, unsigned Alignment) {
3196 // Make sure we don't bloat code by inlining very large memcpy's.
3197 if (!isMemCpySmall(Len, Alignment))
3200 int64_t UnscaledOffset = 0;
3201 Address OrigDest = Dest;
3202 Address OrigSrc = Src;
3206 if (!Alignment || Alignment >= 8) {
3217 // Bound based on alignment.
3218 if (Len >= 4 && Alignment == 4)
3220 else if (Len >= 2 && Alignment == 2)
3227 unsigned ResultReg = emitLoad(VT, VT, Src);
3231 if (!emitStore(VT, ResultReg, Dest))
3234 int64_t Size = VT.getSizeInBits() / 8;
3236 UnscaledOffset += Size;
3238 // We need to recompute the unscaled offset for each iteration.
3239 Dest.setOffset(OrigDest.getOffset() + UnscaledOffset);
3240 Src.setOffset(OrigSrc.getOffset() + UnscaledOffset);
3246 /// \brief Check if it is possible to fold the condition from the XALU intrinsic
3247 /// into the user. The condition code will only be updated on success.
3248 bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode &CC,
3249 const Instruction *I,
3250 const Value *Cond) {
3251 if (!isa<ExtractValueInst>(Cond))
3254 const auto *EV = cast<ExtractValueInst>(Cond);
3255 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
3258 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
3260 const Function *Callee = II->getCalledFunction();
3262 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
3263 if (!isTypeLegal(RetTy, RetVT))
3266 if (RetVT != MVT::i32 && RetVT != MVT::i64)
3269 const Value *LHS = II->getArgOperand(0);
3270 const Value *RHS = II->getArgOperand(1);
3272 // Canonicalize immediate to the RHS.
3273 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
3274 isCommutativeIntrinsic(II))
3275 std::swap(LHS, RHS);
3277 // Simplify multiplies.
3278 Intrinsic::ID IID = II->getIntrinsicID();
3282 case Intrinsic::smul_with_overflow:
3283 if (const auto *C = dyn_cast<ConstantInt>(RHS))
3284 if (C->getValue() == 2)
3285 IID = Intrinsic::sadd_with_overflow;
3287 case Intrinsic::umul_with_overflow:
3288 if (const auto *C = dyn_cast<ConstantInt>(RHS))
3289 if (C->getValue() == 2)
3290 IID = Intrinsic::uadd_with_overflow;
3294 AArch64CC::CondCode TmpCC;
3298 case Intrinsic::sadd_with_overflow:
3299 case Intrinsic::ssub_with_overflow:
3300 TmpCC = AArch64CC::VS;
3302 case Intrinsic::uadd_with_overflow:
3303 TmpCC = AArch64CC::HS;
3305 case Intrinsic::usub_with_overflow:
3306 TmpCC = AArch64CC::LO;
3308 case Intrinsic::smul_with_overflow:
3309 case Intrinsic::umul_with_overflow:
3310 TmpCC = AArch64CC::NE;
3314 // Check if both instructions are in the same basic block.
3315 if (!isValueAvailable(II))
3318 // Make sure nothing is in the way
3319 BasicBlock::const_iterator Start(I);
3320 BasicBlock::const_iterator End(II);
3321 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
3322 // We only expect extractvalue instructions between the intrinsic and the
3323 // instruction to be selected.
3324 if (!isa<ExtractValueInst>(Itr))
3327 // Check that the extractvalue operand comes from the intrinsic.
3328 const auto *EVI = cast<ExtractValueInst>(Itr);
3329 if (EVI->getAggregateOperand() != II)
3337 bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
3338 // FIXME: Handle more intrinsics.
3339 switch (II->getIntrinsicID()) {
3340 default: return false;
3341 case Intrinsic::frameaddress: {
3342 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
3343 MFI->setFrameAddressIsTaken(true);
3345 const AArch64RegisterInfo *RegInfo =
3346 static_cast<const AArch64RegisterInfo *>(Subtarget->getRegisterInfo());
3347 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
3348 unsigned SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3349 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3350 TII.get(TargetOpcode::COPY), SrcReg).addReg(FramePtr);
3351 // Recursively load frame address
3357 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
3359 DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
3360 SrcReg, /*IsKill=*/true, 0);
3361 assert(DestReg && "Unexpected LDR instruction emission failure.");
3365 updateValueMap(II, SrcReg);
3368 case Intrinsic::memcpy:
3369 case Intrinsic::memmove: {
3370 const auto *MTI = cast<MemTransferInst>(II);
3371 // Don't handle volatile.
3372 if (MTI->isVolatile())
3375 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
3376 // we would emit dead code because we don't currently handle memmoves.
3377 bool IsMemCpy = (II->getIntrinsicID() == Intrinsic::memcpy);
3378 if (isa<ConstantInt>(MTI->getLength()) && IsMemCpy) {
3379 // Small memcpy's are common enough that we want to do them without a call
3381 uint64_t Len = cast<ConstantInt>(MTI->getLength())->getZExtValue();
3382 unsigned Alignment = MTI->getAlignment();
3383 if (isMemCpySmall(Len, Alignment)) {
3385 if (!computeAddress(MTI->getRawDest(), Dest) ||
3386 !computeAddress(MTI->getRawSource(), Src))
3388 if (tryEmitSmallMemCpy(Dest, Src, Len, Alignment))
3393 if (!MTI->getLength()->getType()->isIntegerTy(64))
3396 if (MTI->getSourceAddressSpace() > 255 || MTI->getDestAddressSpace() > 255)
3397 // Fast instruction selection doesn't support the special
3401 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
3402 return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
3404 case Intrinsic::memset: {
3405 const MemSetInst *MSI = cast<MemSetInst>(II);
3406 // Don't handle volatile.
3407 if (MSI->isVolatile())
3410 if (!MSI->getLength()->getType()->isIntegerTy(64))
3413 if (MSI->getDestAddressSpace() > 255)
3414 // Fast instruction selection doesn't support the special
3418 return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
3420 case Intrinsic::sin:
3421 case Intrinsic::cos:
3422 case Intrinsic::pow: {
3424 if (!isTypeLegal(II->getType(), RetVT))
3427 if (RetVT != MVT::f32 && RetVT != MVT::f64)
3430 static const RTLIB::Libcall LibCallTable[3][2] = {
3431 { RTLIB::SIN_F32, RTLIB::SIN_F64 },
3432 { RTLIB::COS_F32, RTLIB::COS_F64 },
3433 { RTLIB::POW_F32, RTLIB::POW_F64 }
3436 bool Is64Bit = RetVT == MVT::f64;
3437 switch (II->getIntrinsicID()) {
3439 llvm_unreachable("Unexpected intrinsic.");
3440 case Intrinsic::sin:
3441 LC = LibCallTable[0][Is64Bit];
3443 case Intrinsic::cos:
3444 LC = LibCallTable[1][Is64Bit];
3446 case Intrinsic::pow:
3447 LC = LibCallTable[2][Is64Bit];
3452 Args.reserve(II->getNumArgOperands());
3454 // Populate the argument list.
3455 for (auto &Arg : II->arg_operands()) {
3458 Entry.Ty = Arg->getType();
3459 Args.push_back(Entry);
3462 CallLoweringInfo CLI;
3463 MCContext &Ctx = MF->getContext();
3464 CLI.setCallee(DL, Ctx, TLI.getLibcallCallingConv(LC), II->getType(),
3465 TLI.getLibcallName(LC), std::move(Args));
3466 if (!lowerCallTo(CLI))
3468 updateValueMap(II, CLI.ResultReg);
3471 case Intrinsic::fabs: {
3473 if (!isTypeLegal(II->getType(), VT))
3477 switch (VT.SimpleTy) {
3481 Opc = AArch64::FABSSr;
3484 Opc = AArch64::FABSDr;
3487 unsigned SrcReg = getRegForValue(II->getOperand(0));
3490 bool SrcRegIsKill = hasTrivialKill(II->getOperand(0));
3491 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
3492 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
3493 .addReg(SrcReg, getKillRegState(SrcRegIsKill));
3494 updateValueMap(II, ResultReg);
3497 case Intrinsic::trap: {
3498 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK))
3502 case Intrinsic::sqrt: {
3503 Type *RetTy = II->getCalledFunction()->getReturnType();
3506 if (!isTypeLegal(RetTy, VT))
3509 unsigned Op0Reg = getRegForValue(II->getOperand(0));
3512 bool Op0IsKill = hasTrivialKill(II->getOperand(0));
3514 unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
3518 updateValueMap(II, ResultReg);
3521 case Intrinsic::sadd_with_overflow:
3522 case Intrinsic::uadd_with_overflow:
3523 case Intrinsic::ssub_with_overflow:
3524 case Intrinsic::usub_with_overflow:
3525 case Intrinsic::smul_with_overflow:
3526 case Intrinsic::umul_with_overflow: {
3527 // This implements the basic lowering of the xalu with overflow intrinsics.
3528 const Function *Callee = II->getCalledFunction();
3529 auto *Ty = cast<StructType>(Callee->getReturnType());
3530 Type *RetTy = Ty->getTypeAtIndex(0U);
3533 if (!isTypeLegal(RetTy, VT))
3536 if (VT != MVT::i32 && VT != MVT::i64)
3539 const Value *LHS = II->getArgOperand(0);
3540 const Value *RHS = II->getArgOperand(1);
3541 // Canonicalize immediate to the RHS.
3542 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
3543 isCommutativeIntrinsic(II))
3544 std::swap(LHS, RHS);
3546 // Simplify multiplies.
3547 Intrinsic::ID IID = II->getIntrinsicID();
3551 case Intrinsic::smul_with_overflow:
3552 if (const auto *C = dyn_cast<ConstantInt>(RHS))
3553 if (C->getValue() == 2) {
3554 IID = Intrinsic::sadd_with_overflow;
3558 case Intrinsic::umul_with_overflow:
3559 if (const auto *C = dyn_cast<ConstantInt>(RHS))
3560 if (C->getValue() == 2) {
3561 IID = Intrinsic::uadd_with_overflow;
3567 unsigned ResultReg1 = 0, ResultReg2 = 0, MulReg = 0;
3568 AArch64CC::CondCode CC = AArch64CC::Invalid;
3570 default: llvm_unreachable("Unexpected intrinsic!");
3571 case Intrinsic::sadd_with_overflow:
3572 ResultReg1 = emitAdd(VT, LHS, RHS, /*SetFlags=*/true);
3575 case Intrinsic::uadd_with_overflow:
3576 ResultReg1 = emitAdd(VT, LHS, RHS, /*SetFlags=*/true);
3579 case Intrinsic::ssub_with_overflow:
3580 ResultReg1 = emitSub(VT, LHS, RHS, /*SetFlags=*/true);
3583 case Intrinsic::usub_with_overflow:
3584 ResultReg1 = emitSub(VT, LHS, RHS, /*SetFlags=*/true);
3587 case Intrinsic::smul_with_overflow: {
3589 unsigned LHSReg = getRegForValue(LHS);
3592 bool LHSIsKill = hasTrivialKill(LHS);
3594 unsigned RHSReg = getRegForValue(RHS);
3597 bool RHSIsKill = hasTrivialKill(RHS);
3599 if (VT == MVT::i32) {
3600 MulReg = emitSMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
3601 unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg,
3602 /*IsKill=*/false, 32);
3603 MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
3605 ShiftReg = fastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
3607 emitSubs_rs(VT, ShiftReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
3608 AArch64_AM::ASR, 31, /*WantResult=*/false);
3610 assert(VT == MVT::i64 && "Unexpected value type.");
3611 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3612 // reused in the next instruction.
3613 MulReg = emitMul_rr(VT, LHSReg, /*IsKill=*/false, RHSReg,
3615 unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
3617 emitSubs_rs(VT, SMULHReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
3618 AArch64_AM::ASR, 63, /*WantResult=*/false);
3622 case Intrinsic::umul_with_overflow: {
3624 unsigned LHSReg = getRegForValue(LHS);
3627 bool LHSIsKill = hasTrivialKill(LHS);
3629 unsigned RHSReg = getRegForValue(RHS);
3632 bool RHSIsKill = hasTrivialKill(RHS);
3634 if (VT == MVT::i32) {
3635 MulReg = emitUMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
3636 emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg,
3637 /*IsKill=*/false, AArch64_AM::LSR, 32,
3638 /*WantResult=*/false);
3639 MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
3642 assert(VT == MVT::i64 && "Unexpected value type.");
3643 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3644 // reused in the next instruction.
3645 MulReg = emitMul_rr(VT, LHSReg, /*IsKill=*/false, RHSReg,
3647 unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
3649 emitSubs_rr(VT, AArch64::XZR, /*IsKill=*/true, UMULHReg,
3650 /*IsKill=*/false, /*WantResult=*/false);
3657 ResultReg1 = createResultReg(TLI.getRegClassFor(VT));
3658 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3659 TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg);
3662 ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
3663 AArch64::WZR, /*IsKill=*/true, AArch64::WZR,
3664 /*IsKill=*/true, getInvertedCondCode(CC));
3666 assert((ResultReg1 + 1) == ResultReg2 &&
3667 "Nonconsecutive result registers.");
3668 updateValueMap(II, ResultReg1, 2);
3675 bool AArch64FastISel::selectRet(const Instruction *I) {
3676 const ReturnInst *Ret = cast<ReturnInst>(I);
3677 const Function &F = *I->getParent()->getParent();
3679 if (!FuncInfo.CanLowerReturn)
3685 // Build a list of return value registers.
3686 SmallVector<unsigned, 4> RetRegs;
3688 if (Ret->getNumOperands() > 0) {
3689 CallingConv::ID CC = F.getCallingConv();
3690 SmallVector<ISD::OutputArg, 4> Outs;
3691 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
3693 // Analyze operands of the call, assigning locations to each operand.
3694 SmallVector<CCValAssign, 16> ValLocs;
3695 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
3696 CCAssignFn *RetCC = CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
3697 : RetCC_AArch64_AAPCS;
3698 CCInfo.AnalyzeReturn(Outs, RetCC);
3700 // Only handle a single return value for now.
3701 if (ValLocs.size() != 1)
3704 CCValAssign &VA = ValLocs[0];
3705 const Value *RV = Ret->getOperand(0);
3707 // Don't bother handling odd stuff for now.
3708 if ((VA.getLocInfo() != CCValAssign::Full) &&
3709 (VA.getLocInfo() != CCValAssign::BCvt))
3712 // Only handle register returns for now.
3716 unsigned Reg = getRegForValue(RV);
3720 unsigned SrcReg = Reg + VA.getValNo();
3721 unsigned DestReg = VA.getLocReg();
3722 // Avoid a cross-class copy. This is very unlikely.
3723 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
3726 EVT RVEVT = TLI.getValueType(DL, RV->getType());
3727 if (!RVEVT.isSimple())
3730 // Vectors (of > 1 lane) in big endian need tricky handling.
3731 if (RVEVT.isVector() && RVEVT.getVectorNumElements() > 1 &&
3732 !Subtarget->isLittleEndian())
3735 MVT RVVT = RVEVT.getSimpleVT();
3736 if (RVVT == MVT::f128)
3739 MVT DestVT = VA.getValVT();
3740 // Special handling for extended integers.
3741 if (RVVT != DestVT) {
3742 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
3745 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
3748 bool IsZExt = Outs[0].Flags.isZExt();
3749 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
3755 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3756 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
3758 // Add register to return instruction.
3759 RetRegs.push_back(VA.getLocReg());
3762 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3763 TII.get(AArch64::RET_ReallyLR));
3764 for (unsigned RetReg : RetRegs)
3765 MIB.addReg(RetReg, RegState::Implicit);
3769 bool AArch64FastISel::selectTrunc(const Instruction *I) {
3770 Type *DestTy = I->getType();
3771 Value *Op = I->getOperand(0);
3772 Type *SrcTy = Op->getType();
3774 EVT SrcEVT = TLI.getValueType(DL, SrcTy, true);
3775 EVT DestEVT = TLI.getValueType(DL, DestTy, true);
3776 if (!SrcEVT.isSimple())
3778 if (!DestEVT.isSimple())
3781 MVT SrcVT = SrcEVT.getSimpleVT();
3782 MVT DestVT = DestEVT.getSimpleVT();
3784 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
3787 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 &&
3791 unsigned SrcReg = getRegForValue(Op);
3794 bool SrcIsKill = hasTrivialKill(Op);
3796 // If we're truncating from i64 to a smaller non-legal type then generate an
3797 // AND. Otherwise, we know the high bits are undefined and a truncate only
3798 // generate a COPY. We cannot mark the source register also as result
3799 // register, because this can incorrectly transfer the kill flag onto the
3802 if (SrcVT == MVT::i64) {
3804 switch (DestVT.SimpleTy) {
3806 // Trunc i64 to i32 is handled by the target-independent fast-isel.
3818 // Issue an extract_subreg to get the lower 32-bits.
3819 unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
3821 // Create the AND instruction which performs the actual truncation.
3822 ResultReg = emitAnd_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask);
3823 assert(ResultReg && "Unexpected AND instruction emission failure.");
3825 ResultReg = createResultReg(&AArch64::GPR32RegClass);
3826 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3827 TII.get(TargetOpcode::COPY), ResultReg)
3828 .addReg(SrcReg, getKillRegState(SrcIsKill));
3831 updateValueMap(I, ResultReg);
3835 unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) {
3836 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 ||
3837 DestVT == MVT::i64) &&
3838 "Unexpected value type.");
3839 // Handle i8 and i16 as i32.
3840 if (DestVT == MVT::i8 || DestVT == MVT::i16)
3844 unsigned ResultReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1);
3845 assert(ResultReg && "Unexpected AND instruction emission failure.");
3846 if (DestVT == MVT::i64) {
3847 // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
3848 // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
3849 unsigned Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3850 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3851 TII.get(AArch64::SUBREG_TO_REG), Reg64)
3854 .addImm(AArch64::sub_32);
3859 if (DestVT == MVT::i64) {
3860 // FIXME: We're SExt i1 to i64.
3863 return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
3864 /*TODO:IsKill=*/false, 0, 0);
3868 unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
3869 unsigned Op1, bool Op1IsKill) {
3871 switch (RetVT.SimpleTy) {
3877 Opc = AArch64::MADDWrrr; ZReg = AArch64::WZR; break;
3879 Opc = AArch64::MADDXrrr; ZReg = AArch64::XZR; break;
3882 const TargetRegisterClass *RC =
3883 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
3884 return fastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
3885 /*IsKill=*/ZReg, true);
3888 unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
3889 unsigned Op1, bool Op1IsKill) {
3890 if (RetVT != MVT::i64)
3893 return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
3894 Op0, Op0IsKill, Op1, Op1IsKill,
3895 AArch64::XZR, /*IsKill=*/true);
3898 unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
3899 unsigned Op1, bool Op1IsKill) {
3900 if (RetVT != MVT::i64)
3903 return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
3904 Op0, Op0IsKill, Op1, Op1IsKill,
3905 AArch64::XZR, /*IsKill=*/true);
3908 unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
3909 unsigned Op1Reg, bool Op1IsKill) {
3911 bool NeedTrunc = false;
3913 switch (RetVT.SimpleTy) {
3915 case MVT::i8: Opc = AArch64::LSLVWr; NeedTrunc = true; Mask = 0xff; break;
3916 case MVT::i16: Opc = AArch64::LSLVWr; NeedTrunc = true; Mask = 0xffff; break;
3917 case MVT::i32: Opc = AArch64::LSLVWr; break;
3918 case MVT::i64: Opc = AArch64::LSLVXr; break;
3921 const TargetRegisterClass *RC =
3922 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
3924 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
3927 unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
3930 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
3934 unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
3935 bool Op0IsKill, uint64_t Shift,
3937 assert(RetVT.SimpleTy >= SrcVT.SimpleTy &&
3938 "Unexpected source/return type pair.");
3939 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
3940 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
3941 "Unexpected source value type.");
3942 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
3943 RetVT == MVT::i64) && "Unexpected return value type.");
3945 bool Is64Bit = (RetVT == MVT::i64);
3946 unsigned RegSize = Is64Bit ? 64 : 32;
3947 unsigned DstBits = RetVT.getSizeInBits();
3948 unsigned SrcBits = SrcVT.getSizeInBits();
3949 const TargetRegisterClass *RC =
3950 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
3952 // Just emit a copy for "zero" shifts.
3954 if (RetVT == SrcVT) {
3955 unsigned ResultReg = createResultReg(RC);
3956 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3957 TII.get(TargetOpcode::COPY), ResultReg)
3958 .addReg(Op0, getKillRegState(Op0IsKill));
3961 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
3964 // Don't deal with undefined shifts.
3965 if (Shift >= DstBits)
3968 // For immediate shifts we can fold the zero-/sign-extension into the shift.
3969 // {S|U}BFM Wd, Wn, #r, #s
3970 // Wd<32+s-r,32-r> = Wn<s:0> when r > s
3972 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
3973 // %2 = shl i16 %1, 4
3974 // Wd<32+7-28,32-28> = Wn<7:0> <- clamp s to 7
3975 // 0b1111_1111_1111_1111__1111_1010_1010_0000 sext
3976 // 0b0000_0000_0000_0000__0000_0101_0101_0000 sext | zext
3977 // 0b0000_0000_0000_0000__0000_1010_1010_0000 zext
3979 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
3980 // %2 = shl i16 %1, 8
3981 // Wd<32+7-24,32-24> = Wn<7:0>
3982 // 0b1111_1111_1111_1111__1010_1010_0000_0000 sext
3983 // 0b0000_0000_0000_0000__0101_0101_0000_0000 sext | zext
3984 // 0b0000_0000_0000_0000__1010_1010_0000_0000 zext
3986 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
3987 // %2 = shl i16 %1, 12
3988 // Wd<32+3-20,32-20> = Wn<3:0>
3989 // 0b1111_1111_1111_1111__1010_0000_0000_0000 sext
3990 // 0b0000_0000_0000_0000__0101_0000_0000_0000 sext | zext
3991 // 0b0000_0000_0000_0000__1010_0000_0000_0000 zext
3993 unsigned ImmR = RegSize - Shift;
3994 // Limit the width to the length of the source type.
3995 unsigned ImmS = std::min<unsigned>(SrcBits - 1, DstBits - 1 - Shift);
3996 static const unsigned OpcTable[2][2] = {
3997 {AArch64::SBFMWri, AArch64::SBFMXri},
3998 {AArch64::UBFMWri, AArch64::UBFMXri}
4000 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4001 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4002 unsigned TmpReg = MRI.createVirtualRegister(RC);
4003 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4004 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4006 .addReg(Op0, getKillRegState(Op0IsKill))
4007 .addImm(AArch64::sub_32);
4011 return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
4014 unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
4015 unsigned Op1Reg, bool Op1IsKill) {
4017 bool NeedTrunc = false;
4019 switch (RetVT.SimpleTy) {
4021 case MVT::i8: Opc = AArch64::LSRVWr; NeedTrunc = true; Mask = 0xff; break;
4022 case MVT::i16: Opc = AArch64::LSRVWr; NeedTrunc = true; Mask = 0xffff; break;
4023 case MVT::i32: Opc = AArch64::LSRVWr; break;
4024 case MVT::i64: Opc = AArch64::LSRVXr; break;
4027 const TargetRegisterClass *RC =
4028 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4030 Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Op0IsKill, Mask);
4031 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
4032 Op0IsKill = Op1IsKill = true;
4034 unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
4037 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
4041 unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
4042 bool Op0IsKill, uint64_t Shift,
4044 assert(RetVT.SimpleTy >= SrcVT.SimpleTy &&
4045 "Unexpected source/return type pair.");
4046 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4047 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4048 "Unexpected source value type.");
4049 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4050 RetVT == MVT::i64) && "Unexpected return value type.");
4052 bool Is64Bit = (RetVT == MVT::i64);
4053 unsigned RegSize = Is64Bit ? 64 : 32;
4054 unsigned DstBits = RetVT.getSizeInBits();
4055 unsigned SrcBits = SrcVT.getSizeInBits();
4056 const TargetRegisterClass *RC =
4057 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4059 // Just emit a copy for "zero" shifts.
4061 if (RetVT == SrcVT) {
4062 unsigned ResultReg = createResultReg(RC);
4063 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4064 TII.get(TargetOpcode::COPY), ResultReg)
4065 .addReg(Op0, getKillRegState(Op0IsKill));
4068 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4071 // Don't deal with undefined shifts.
4072 if (Shift >= DstBits)
4075 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4076 // {S|U}BFM Wd, Wn, #r, #s
4077 // Wd<s-r:0> = Wn<s:r> when r <= s
4079 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4080 // %2 = lshr i16 %1, 4
4081 // Wd<7-4:0> = Wn<7:4>
4082 // 0b0000_0000_0000_0000__0000_1111_1111_1010 sext
4083 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4084 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4086 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4087 // %2 = lshr i16 %1, 8
4088 // Wd<7-7,0> = Wn<7:7>
4089 // 0b0000_0000_0000_0000__0000_0000_1111_1111 sext
4090 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4091 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4093 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4094 // %2 = lshr i16 %1, 12
4095 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4096 // 0b0000_0000_0000_0000__0000_0000_0000_1111 sext
4097 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4098 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4100 if (Shift >= SrcBits && IsZExt)
4101 return materializeInt(ConstantInt::get(*Context, APInt(RegSize, 0)), RetVT);
4103 // It is not possible to fold a sign-extend into the LShr instruction. In this
4104 // case emit a sign-extend.
4106 Op0 = emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4111 SrcBits = SrcVT.getSizeInBits();
4115 unsigned ImmR = std::min<unsigned>(SrcBits - 1, Shift);
4116 unsigned ImmS = SrcBits - 1;
4117 static const unsigned OpcTable[2][2] = {
4118 {AArch64::SBFMWri, AArch64::SBFMXri},
4119 {AArch64::UBFMWri, AArch64::UBFMXri}
4121 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4122 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4123 unsigned TmpReg = MRI.createVirtualRegister(RC);
4124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4125 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4127 .addReg(Op0, getKillRegState(Op0IsKill))
4128 .addImm(AArch64::sub_32);
4132 return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
4135 unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
4136 unsigned Op1Reg, bool Op1IsKill) {
4138 bool NeedTrunc = false;
4140 switch (RetVT.SimpleTy) {
4142 case MVT::i8: Opc = AArch64::ASRVWr; NeedTrunc = true; Mask = 0xff; break;
4143 case MVT::i16: Opc = AArch64::ASRVWr; NeedTrunc = true; Mask = 0xffff; break;
4144 case MVT::i32: Opc = AArch64::ASRVWr; break;
4145 case MVT::i64: Opc = AArch64::ASRVXr; break;
4148 const TargetRegisterClass *RC =
4149 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4151 Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*IsZExt=*/false);
4152 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
4153 Op0IsKill = Op1IsKill = true;
4155 unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
4158 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
4162 unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
4163 bool Op0IsKill, uint64_t Shift,
4165 assert(RetVT.SimpleTy >= SrcVT.SimpleTy &&
4166 "Unexpected source/return type pair.");
4167 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4168 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4169 "Unexpected source value type.");
4170 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4171 RetVT == MVT::i64) && "Unexpected return value type.");
4173 bool Is64Bit = (RetVT == MVT::i64);
4174 unsigned RegSize = Is64Bit ? 64 : 32;
4175 unsigned DstBits = RetVT.getSizeInBits();
4176 unsigned SrcBits = SrcVT.getSizeInBits();
4177 const TargetRegisterClass *RC =
4178 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4180 // Just emit a copy for "zero" shifts.
4182 if (RetVT == SrcVT) {
4183 unsigned ResultReg = createResultReg(RC);
4184 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4185 TII.get(TargetOpcode::COPY), ResultReg)
4186 .addReg(Op0, getKillRegState(Op0IsKill));
4189 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4192 // Don't deal with undefined shifts.
4193 if (Shift >= DstBits)
4196 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4197 // {S|U}BFM Wd, Wn, #r, #s
4198 // Wd<s-r:0> = Wn<s:r> when r <= s
4200 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4201 // %2 = ashr i16 %1, 4
4202 // Wd<7-4:0> = Wn<7:4>
4203 // 0b1111_1111_1111_1111__1111_1111_1111_1010 sext
4204 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4205 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4207 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4208 // %2 = ashr i16 %1, 8
4209 // Wd<7-7,0> = Wn<7:7>
4210 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4211 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4212 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4214 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4215 // %2 = ashr i16 %1, 12
4216 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4217 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4218 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4219 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4221 if (Shift >= SrcBits && IsZExt)
4222 return materializeInt(ConstantInt::get(*Context, APInt(RegSize, 0)), RetVT);
4224 unsigned ImmR = std::min<unsigned>(SrcBits - 1, Shift);
4225 unsigned ImmS = SrcBits - 1;
4226 static const unsigned OpcTable[2][2] = {
4227 {AArch64::SBFMWri, AArch64::SBFMXri},
4228 {AArch64::UBFMWri, AArch64::UBFMXri}
4230 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4231 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4232 unsigned TmpReg = MRI.createVirtualRegister(RC);
4233 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4234 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4236 .addReg(Op0, getKillRegState(Op0IsKill))
4237 .addImm(AArch64::sub_32);
4241 return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
4244 unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
4246 assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?");
4248 // FastISel does not have plumbing to deal with extensions where the SrcVT or
4249 // DestVT are odd things, so test to make sure that they are both types we can
4250 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
4251 // bail out to SelectionDAG.
4252 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) &&
4253 (DestVT != MVT::i32) && (DestVT != MVT::i64)) ||
4254 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) &&
4255 (SrcVT != MVT::i16) && (SrcVT != MVT::i32)))
4261 switch (SrcVT.SimpleTy) {
4265 return emiti1Ext(SrcReg, DestVT, IsZExt);
4267 if (DestVT == MVT::i64)
4268 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4270 Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
4274 if (DestVT == MVT::i64)
4275 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4277 Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
4281 assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?");
4282 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4287 // Handle i8 and i16 as i32.
4288 if (DestVT == MVT::i8 || DestVT == MVT::i16)
4290 else if (DestVT == MVT::i64) {
4291 unsigned Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
4292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4293 TII.get(AArch64::SUBREG_TO_REG), Src64)
4296 .addImm(AArch64::sub_32);
4300 const TargetRegisterClass *RC =
4301 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4302 return fastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
4305 static bool isZExtLoad(const MachineInstr *LI) {
4306 switch (LI->getOpcode()) {
4309 case AArch64::LDURBBi:
4310 case AArch64::LDURHHi:
4311 case AArch64::LDURWi:
4312 case AArch64::LDRBBui:
4313 case AArch64::LDRHHui:
4314 case AArch64::LDRWui:
4315 case AArch64::LDRBBroX:
4316 case AArch64::LDRHHroX:
4317 case AArch64::LDRWroX:
4318 case AArch64::LDRBBroW:
4319 case AArch64::LDRHHroW:
4320 case AArch64::LDRWroW:
4325 static bool isSExtLoad(const MachineInstr *LI) {
4326 switch (LI->getOpcode()) {
4329 case AArch64::LDURSBWi:
4330 case AArch64::LDURSHWi:
4331 case AArch64::LDURSBXi:
4332 case AArch64::LDURSHXi:
4333 case AArch64::LDURSWi:
4334 case AArch64::LDRSBWui:
4335 case AArch64::LDRSHWui:
4336 case AArch64::LDRSBXui:
4337 case AArch64::LDRSHXui:
4338 case AArch64::LDRSWui:
4339 case AArch64::LDRSBWroX:
4340 case AArch64::LDRSHWroX:
4341 case AArch64::LDRSBXroX:
4342 case AArch64::LDRSHXroX:
4343 case AArch64::LDRSWroX:
4344 case AArch64::LDRSBWroW:
4345 case AArch64::LDRSHWroW:
4346 case AArch64::LDRSBXroW:
4347 case AArch64::LDRSHXroW:
4348 case AArch64::LDRSWroW:
4353 bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT,
4355 const auto *LI = dyn_cast<LoadInst>(I->getOperand(0));
4356 if (!LI || !LI->hasOneUse())
4359 // Check if the load instruction has already been selected.
4360 unsigned Reg = lookUpRegForValue(LI);
4364 MachineInstr *MI = MRI.getUniqueVRegDef(Reg);
4368 // Check if the correct load instruction has been emitted - SelectionDAG might
4369 // have emitted a zero-extending load, but we need a sign-extending load.
4370 bool IsZExt = isa<ZExtInst>(I);
4371 const auto *LoadMI = MI;
4372 if (LoadMI->getOpcode() == TargetOpcode::COPY &&
4373 LoadMI->getOperand(1).getSubReg() == AArch64::sub_32) {
4374 unsigned LoadReg = MI->getOperand(1).getReg();
4375 LoadMI = MRI.getUniqueVRegDef(LoadReg);
4376 assert(LoadMI && "Expected valid instruction");
4378 if (!(IsZExt && isZExtLoad(LoadMI)) && !(!IsZExt && isSExtLoad(LoadMI)))
4381 // Nothing to be done.
4382 if (RetVT != MVT::i64 || SrcVT > MVT::i32) {
4383 updateValueMap(I, Reg);
4388 unsigned Reg64 = createResultReg(&AArch64::GPR64RegClass);
4389 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4390 TII.get(AArch64::SUBREG_TO_REG), Reg64)
4392 .addReg(Reg, getKillRegState(true))
4393 .addImm(AArch64::sub_32);
4396 assert((MI->getOpcode() == TargetOpcode::COPY &&
4397 MI->getOperand(1).getSubReg() == AArch64::sub_32) &&
4398 "Expected copy instruction");
4399 Reg = MI->getOperand(1).getReg();
4400 MI->eraseFromParent();
4402 updateValueMap(I, Reg);
4406 bool AArch64FastISel::selectIntExt(const Instruction *I) {
4407 assert((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
4408 "Unexpected integer extend instruction.");
4411 if (!isTypeSupported(I->getType(), RetVT))
4414 if (!isTypeSupported(I->getOperand(0)->getType(), SrcVT))
4417 // Try to optimize already sign-/zero-extended values from load instructions.
4418 if (optimizeIntExtLoad(I, RetVT, SrcVT))
4421 unsigned SrcReg = getRegForValue(I->getOperand(0));
4424 bool SrcIsKill = hasTrivialKill(I->getOperand(0));
4426 // Try to optimize already sign-/zero-extended values from function arguments.
4427 bool IsZExt = isa<ZExtInst>(I);
4428 if (const auto *Arg = dyn_cast<Argument>(I->getOperand(0))) {
4429 if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) {
4430 if (RetVT == MVT::i64 && SrcVT != MVT::i64) {
4431 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
4432 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4433 TII.get(AArch64::SUBREG_TO_REG), ResultReg)
4435 .addReg(SrcReg, getKillRegState(SrcIsKill))
4436 .addImm(AArch64::sub_32);
4439 // Conservatively clear all kill flags from all uses, because we are
4440 // replacing a sign-/zero-extend instruction at IR level with a nop at MI
4441 // level. The result of the instruction at IR level might have been
4442 // trivially dead, which is now not longer true.
4443 unsigned UseReg = lookUpRegForValue(I);
4445 MRI.clearKillFlags(UseReg);
4447 updateValueMap(I, SrcReg);
4452 unsigned ResultReg = emitIntExt(SrcVT, SrcReg, RetVT, IsZExt);
4456 updateValueMap(I, ResultReg);
4460 bool AArch64FastISel::selectRem(const Instruction *I, unsigned ISDOpcode) {
4461 EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
4462 if (!DestEVT.isSimple())
4465 MVT DestVT = DestEVT.getSimpleVT();
4466 if (DestVT != MVT::i64 && DestVT != MVT::i32)
4470 bool Is64bit = (DestVT == MVT::i64);
4471 switch (ISDOpcode) {
4475 DivOpc = Is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
4478 DivOpc = Is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
4481 unsigned MSubOpc = Is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
4482 unsigned Src0Reg = getRegForValue(I->getOperand(0));
4485 bool Src0IsKill = hasTrivialKill(I->getOperand(0));
4487 unsigned Src1Reg = getRegForValue(I->getOperand(1));
4490 bool Src1IsKill = hasTrivialKill(I->getOperand(1));
4492 const TargetRegisterClass *RC =
4493 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4494 unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
4495 Src1Reg, /*IsKill=*/false);
4496 assert(QuotReg && "Unexpected DIV instruction emission failure.");
4497 // The remainder is computed as numerator - (quotient * denominator) using the
4498 // MSUB instruction.
4499 unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
4500 Src1Reg, Src1IsKill, Src0Reg,
4502 updateValueMap(I, ResultReg);
4506 bool AArch64FastISel::selectMul(const Instruction *I) {
4508 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true))
4512 return selectBinaryOp(I, ISD::MUL);
4514 const Value *Src0 = I->getOperand(0);
4515 const Value *Src1 = I->getOperand(1);
4516 if (const auto *C = dyn_cast<ConstantInt>(Src0))
4517 if (C->getValue().isPowerOf2())
4518 std::swap(Src0, Src1);
4520 // Try to simplify to a shift instruction.
4521 if (const auto *C = dyn_cast<ConstantInt>(Src1))
4522 if (C->getValue().isPowerOf2()) {
4523 uint64_t ShiftVal = C->getValue().logBase2();
4526 if (const auto *ZExt = dyn_cast<ZExtInst>(Src0)) {
4527 if (!isIntExtFree(ZExt)) {
4529 if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), VT)) {
4532 Src0 = ZExt->getOperand(0);
4535 } else if (const auto *SExt = dyn_cast<SExtInst>(Src0)) {
4536 if (!isIntExtFree(SExt)) {
4538 if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), VT)) {
4541 Src0 = SExt->getOperand(0);
4546 unsigned Src0Reg = getRegForValue(Src0);
4549 bool Src0IsKill = hasTrivialKill(Src0);
4551 unsigned ResultReg =
4552 emitLSL_ri(VT, SrcVT, Src0Reg, Src0IsKill, ShiftVal, IsZExt);
4555 updateValueMap(I, ResultReg);
4560 unsigned Src0Reg = getRegForValue(I->getOperand(0));
4563 bool Src0IsKill = hasTrivialKill(I->getOperand(0));
4565 unsigned Src1Reg = getRegForValue(I->getOperand(1));
4568 bool Src1IsKill = hasTrivialKill(I->getOperand(1));
4570 unsigned ResultReg = emitMul_rr(VT, Src0Reg, Src0IsKill, Src1Reg, Src1IsKill);
4575 updateValueMap(I, ResultReg);
4579 bool AArch64FastISel::selectShift(const Instruction *I) {
4581 if (!isTypeSupported(I->getType(), RetVT, /*IsVectorAllowed=*/true))
4584 if (RetVT.isVector())
4585 return selectOperator(I, I->getOpcode());
4587 if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
4588 unsigned ResultReg = 0;
4589 uint64_t ShiftVal = C->getZExtValue();
4591 bool IsZExt = I->getOpcode() != Instruction::AShr;
4592 const Value *Op0 = I->getOperand(0);
4593 if (const auto *ZExt = dyn_cast<ZExtInst>(Op0)) {
4594 if (!isIntExtFree(ZExt)) {
4596 if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), TmpVT)) {
4599 Op0 = ZExt->getOperand(0);
4602 } else if (const auto *SExt = dyn_cast<SExtInst>(Op0)) {
4603 if (!isIntExtFree(SExt)) {
4605 if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), TmpVT)) {
4608 Op0 = SExt->getOperand(0);
4613 unsigned Op0Reg = getRegForValue(Op0);
4616 bool Op0IsKill = hasTrivialKill(Op0);
4618 switch (I->getOpcode()) {
4619 default: llvm_unreachable("Unexpected instruction.");
4620 case Instruction::Shl:
4621 ResultReg = emitLSL_ri(RetVT, SrcVT, Op0Reg, Op0IsKill, ShiftVal, IsZExt);
4623 case Instruction::AShr:
4624 ResultReg = emitASR_ri(RetVT, SrcVT, Op0Reg, Op0IsKill, ShiftVal, IsZExt);
4626 case Instruction::LShr:
4627 ResultReg = emitLSR_ri(RetVT, SrcVT, Op0Reg, Op0IsKill, ShiftVal, IsZExt);
4633 updateValueMap(I, ResultReg);
4637 unsigned Op0Reg = getRegForValue(I->getOperand(0));
4640 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
4642 unsigned Op1Reg = getRegForValue(I->getOperand(1));
4645 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
4647 unsigned ResultReg = 0;
4648 switch (I->getOpcode()) {
4649 default: llvm_unreachable("Unexpected instruction.");
4650 case Instruction::Shl:
4651 ResultReg = emitLSL_rr(RetVT, Op0Reg, Op0IsKill, Op1Reg, Op1IsKill);
4653 case Instruction::AShr:
4654 ResultReg = emitASR_rr(RetVT, Op0Reg, Op0IsKill, Op1Reg, Op1IsKill);
4656 case Instruction::LShr:
4657 ResultReg = emitLSR_rr(RetVT, Op0Reg, Op0IsKill, Op1Reg, Op1IsKill);
4664 updateValueMap(I, ResultReg);
4668 bool AArch64FastISel::selectBitCast(const Instruction *I) {
4671 if (!isTypeLegal(I->getOperand(0)->getType(), SrcVT))
4673 if (!isTypeLegal(I->getType(), RetVT))
4677 if (RetVT == MVT::f32 && SrcVT == MVT::i32)
4678 Opc = AArch64::FMOVWSr;
4679 else if (RetVT == MVT::f64 && SrcVT == MVT::i64)
4680 Opc = AArch64::FMOVXDr;
4681 else if (RetVT == MVT::i32 && SrcVT == MVT::f32)
4682 Opc = AArch64::FMOVSWr;
4683 else if (RetVT == MVT::i64 && SrcVT == MVT::f64)
4684 Opc = AArch64::FMOVDXr;
4688 const TargetRegisterClass *RC = nullptr;
4689 switch (RetVT.SimpleTy) {
4690 default: llvm_unreachable("Unexpected value type.");
4691 case MVT::i32: RC = &AArch64::GPR32RegClass; break;
4692 case MVT::i64: RC = &AArch64::GPR64RegClass; break;
4693 case MVT::f32: RC = &AArch64::FPR32RegClass; break;
4694 case MVT::f64: RC = &AArch64::FPR64RegClass; break;
4696 unsigned Op0Reg = getRegForValue(I->getOperand(0));
4699 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
4700 unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
4705 updateValueMap(I, ResultReg);
4709 bool AArch64FastISel::selectFRem(const Instruction *I) {
4711 if (!isTypeLegal(I->getType(), RetVT))
4715 switch (RetVT.SimpleTy) {
4719 LC = RTLIB::REM_F32;
4722 LC = RTLIB::REM_F64;
4727 Args.reserve(I->getNumOperands());
4729 // Populate the argument list.
4730 for (auto &Arg : I->operands()) {
4733 Entry.Ty = Arg->getType();
4734 Args.push_back(Entry);
4737 CallLoweringInfo CLI;
4738 MCContext &Ctx = MF->getContext();
4739 CLI.setCallee(DL, Ctx, TLI.getLibcallCallingConv(LC), I->getType(),
4740 TLI.getLibcallName(LC), std::move(Args));
4741 if (!lowerCallTo(CLI))
4743 updateValueMap(I, CLI.ResultReg);
4747 bool AArch64FastISel::selectSDiv(const Instruction *I) {
4749 if (!isTypeLegal(I->getType(), VT))
4752 if (!isa<ConstantInt>(I->getOperand(1)))
4753 return selectBinaryOp(I, ISD::SDIV);
4755 const APInt &C = cast<ConstantInt>(I->getOperand(1))->getValue();
4756 if ((VT != MVT::i32 && VT != MVT::i64) || !C ||
4757 !(C.isPowerOf2() || (-C).isPowerOf2()))
4758 return selectBinaryOp(I, ISD::SDIV);
4760 unsigned Lg2 = C.countTrailingZeros();
4761 unsigned Src0Reg = getRegForValue(I->getOperand(0));
4764 bool Src0IsKill = hasTrivialKill(I->getOperand(0));
4766 if (cast<BinaryOperator>(I)->isExact()) {
4767 unsigned ResultReg = emitASR_ri(VT, VT, Src0Reg, Src0IsKill, Lg2);
4770 updateValueMap(I, ResultReg);
4774 int64_t Pow2MinusOne = (1ULL << Lg2) - 1;
4775 unsigned AddReg = emitAdd_ri_(VT, Src0Reg, /*IsKill=*/false, Pow2MinusOne);
4779 // (Src0 < 0) ? Pow2 - 1 : 0;
4780 if (!emitICmp_ri(VT, Src0Reg, /*IsKill=*/false, 0))
4784 const TargetRegisterClass *RC;
4785 if (VT == MVT::i64) {
4786 SelectOpc = AArch64::CSELXr;
4787 RC = &AArch64::GPR64RegClass;
4789 SelectOpc = AArch64::CSELWr;
4790 RC = &AArch64::GPR32RegClass;
4792 unsigned SelectReg =
4793 fastEmitInst_rri(SelectOpc, RC, AddReg, /*IsKill=*/true, Src0Reg,
4794 Src0IsKill, AArch64CC::LT);
4798 // Divide by Pow2 --> ashr. If we're dividing by a negative value we must also
4799 // negate the result.
4800 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
4803 ResultReg = emitAddSub_rs(/*UseAdd=*/false, VT, ZeroReg, /*IsKill=*/true,
4804 SelectReg, /*IsKill=*/true, AArch64_AM::ASR, Lg2);
4806 ResultReg = emitASR_ri(VT, VT, SelectReg, /*IsKill=*/true, Lg2);
4811 updateValueMap(I, ResultReg);
4815 /// This is mostly a copy of the existing FastISel getRegForGEPIndex code. We
4816 /// have to duplicate it for AArch64, because otherwise we would fail during the
4817 /// sign-extend emission.
4818 std::pair<unsigned, bool> AArch64FastISel::getRegForGEPIndex(const Value *Idx) {
4819 unsigned IdxN = getRegForValue(Idx);
4821 // Unhandled operand. Halt "fast" selection and bail.
4822 return std::pair<unsigned, bool>(0, false);
4824 bool IdxNIsKill = hasTrivialKill(Idx);
4826 // If the index is smaller or larger than intptr_t, truncate or extend it.
4827 MVT PtrVT = TLI.getPointerTy(DL);
4828 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
4829 if (IdxVT.bitsLT(PtrVT)) {
4830 IdxN = emitIntExt(IdxVT.getSimpleVT(), IdxN, PtrVT, /*IsZExt=*/false);
4832 } else if (IdxVT.bitsGT(PtrVT))
4833 llvm_unreachable("AArch64 FastISel doesn't support types larger than i64");
4834 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
4837 /// This is mostly a copy of the existing FastISel GEP code, but we have to
4838 /// duplicate it for AArch64, because otherwise we would bail out even for
4839 /// simple cases. This is because the standard fastEmit functions don't cover
4840 /// MUL at all and ADD is lowered very inefficientily.
4841 bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {
4842 unsigned N = getRegForValue(I->getOperand(0));
4845 bool NIsKill = hasTrivialKill(I->getOperand(0));
4847 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
4848 // into a single N = N + TotalOffset.
4849 uint64_t TotalOffs = 0;
4850 Type *Ty = I->getOperand(0)->getType();
4851 MVT VT = TLI.getPointerTy(DL);
4852 for (auto OI = std::next(I->op_begin()), E = I->op_end(); OI != E; ++OI) {
4853 const Value *Idx = *OI;
4854 if (auto *StTy = dyn_cast<StructType>(Ty)) {
4855 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
4858 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
4859 Ty = StTy->getElementType(Field);
4861 Ty = cast<SequentialType>(Ty)->getElementType();
4862 // If this is a constant subscript, handle it quickly.
4863 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
4868 DL.getTypeAllocSize(Ty) * cast<ConstantInt>(CI)->getSExtValue();
4872 N = emitAdd_ri_(VT, N, NIsKill, TotalOffs);
4879 // N = N + Idx * ElementSize;
4880 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
4881 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
4882 unsigned IdxN = Pair.first;
4883 bool IdxNIsKill = Pair.second;
4887 if (ElementSize != 1) {
4888 unsigned C = fastEmit_i(VT, VT, ISD::Constant, ElementSize);
4891 IdxN = emitMul_rr(VT, IdxN, IdxNIsKill, C, true);
4896 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
4902 N = emitAdd_ri_(VT, N, NIsKill, TotalOffs);
4906 updateValueMap(I, N);
4910 bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
4911 switch (I->getOpcode()) {
4914 case Instruction::Add:
4915 case Instruction::Sub:
4916 return selectAddSub(I);
4917 case Instruction::Mul:
4918 return selectMul(I);
4919 case Instruction::SDiv:
4920 return selectSDiv(I);
4921 case Instruction::SRem:
4922 if (!selectBinaryOp(I, ISD::SREM))
4923 return selectRem(I, ISD::SREM);
4925 case Instruction::URem:
4926 if (!selectBinaryOp(I, ISD::UREM))
4927 return selectRem(I, ISD::UREM);
4929 case Instruction::Shl:
4930 case Instruction::LShr:
4931 case Instruction::AShr:
4932 return selectShift(I);
4933 case Instruction::And:
4934 case Instruction::Or:
4935 case Instruction::Xor:
4936 return selectLogicalOp(I);
4937 case Instruction::Br:
4938 return selectBranch(I);
4939 case Instruction::IndirectBr:
4940 return selectIndirectBr(I);
4941 case Instruction::BitCast:
4942 if (!FastISel::selectBitCast(I))
4943 return selectBitCast(I);
4945 case Instruction::FPToSI:
4946 if (!selectCast(I, ISD::FP_TO_SINT))
4947 return selectFPToInt(I, /*Signed=*/true);
4949 case Instruction::FPToUI:
4950 return selectFPToInt(I, /*Signed=*/false);
4951 case Instruction::ZExt:
4952 case Instruction::SExt:
4953 return selectIntExt(I);
4954 case Instruction::Trunc:
4955 if (!selectCast(I, ISD::TRUNCATE))
4956 return selectTrunc(I);
4958 case Instruction::FPExt:
4959 return selectFPExt(I);
4960 case Instruction::FPTrunc:
4961 return selectFPTrunc(I);
4962 case Instruction::SIToFP:
4963 if (!selectCast(I, ISD::SINT_TO_FP))
4964 return selectIntToFP(I, /*Signed=*/true);
4966 case Instruction::UIToFP:
4967 return selectIntToFP(I, /*Signed=*/false);
4968 case Instruction::Load:
4969 return selectLoad(I);
4970 case Instruction::Store:
4971 return selectStore(I);
4972 case Instruction::FCmp:
4973 case Instruction::ICmp:
4974 return selectCmp(I);
4975 case Instruction::Select:
4976 return selectSelect(I);
4977 case Instruction::Ret:
4978 return selectRet(I);
4979 case Instruction::FRem:
4980 return selectFRem(I);
4981 case Instruction::GetElementPtr:
4982 return selectGetElementPtr(I);
4985 // fall-back to target-independent instruction selection.
4986 return selectOperator(I, I->getOpcode());
4987 // Silence warnings.
4988 (void)&CC_AArch64_DarwinPCS_VarArg;
4992 llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &FuncInfo,
4993 const TargetLibraryInfo *LibInfo) {
4994 return new AArch64FastISel(FuncInfo, LibInfo);