1 //===-- AArch6464FastISel.cpp - AArch64 FastISel implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the AArch64-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // AArch64GenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "AArch64Subtarget.h"
18 #include "AArch64TargetMachine.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GetElementPtrTypeIterator.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/Support/CommandLine.h"
42 class AArch64FastISel : public FastISel {
60 Address() : Kind(RegBase), Offset(0) { Base.Reg = 0; }
61 void setKind(BaseKind K) { Kind = K; }
62 BaseKind getKind() const { return Kind; }
63 bool isRegBase() const { return Kind == RegBase; }
64 bool isFIBase() const { return Kind == FrameIndexBase; }
65 void setReg(unsigned Reg) {
66 assert(isRegBase() && "Invalid base register access!");
69 unsigned getReg() const {
70 assert(isRegBase() && "Invalid base register access!");
73 void setFI(unsigned FI) {
74 assert(isFIBase() && "Invalid base frame index access!");
77 unsigned getFI() const {
78 assert(isFIBase() && "Invalid base frame index access!");
81 void setOffset(int64_t O) { Offset = O; }
82 int64_t getOffset() { return Offset; }
84 bool isValid() { return isFIBase() || (isRegBase() && getReg() != 0); }
87 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
88 /// make the right decision when generating code for different targets.
89 const AArch64Subtarget *Subtarget;
92 bool FastLowerCall(CallLoweringInfo &CLI) override;
93 bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
96 // Selection routines.
97 bool SelectLoad(const Instruction *I);
98 bool SelectStore(const Instruction *I);
99 bool SelectBranch(const Instruction *I);
100 bool SelectIndirectBr(const Instruction *I);
101 bool SelectCmp(const Instruction *I);
102 bool SelectSelect(const Instruction *I);
103 bool SelectFPExt(const Instruction *I);
104 bool SelectFPTrunc(const Instruction *I);
105 bool SelectFPToInt(const Instruction *I, bool Signed);
106 bool SelectIntToFP(const Instruction *I, bool Signed);
107 bool SelectRem(const Instruction *I, unsigned ISDOpcode);
108 bool SelectRet(const Instruction *I);
109 bool SelectTrunc(const Instruction *I);
110 bool SelectIntExt(const Instruction *I);
111 bool SelectMul(const Instruction *I);
112 bool SelectShift(const Instruction *I, bool IsLeftShift, bool IsArithmetic);
114 // Utility helper routines.
115 bool isTypeLegal(Type *Ty, MVT &VT);
116 bool isLoadStoreTypeLegal(Type *Ty, MVT &VT);
117 bool ComputeAddress(const Value *Obj, Address &Addr);
118 bool SimplifyAddress(Address &Addr, MVT VT, int64_t ScaleFactor,
120 void AddLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB,
121 unsigned Flags, bool UseUnscaled);
122 bool IsMemCpySmall(uint64_t Len, unsigned Alignment);
123 bool TryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
125 bool foldXALUIntrinsic(AArch64CC::CondCode &CC, const Instruction *I,
129 bool EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt);
130 bool EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
131 bool UseUnscaled = false);
132 bool EmitStore(MVT VT, unsigned SrcReg, Address Addr,
133 bool UseUnscaled = false);
134 unsigned EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
135 unsigned Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
136 unsigned Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
137 unsigned Op1, bool Op1IsKill);
138 unsigned Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
139 unsigned Op1, bool Op1IsKill);
140 unsigned Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
141 unsigned Op1, bool Op1IsKill);
142 unsigned Emit_LSL_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t Imm);
143 unsigned Emit_LSR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t Imm);
144 unsigned Emit_ASR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t Imm);
146 unsigned AArch64MaterializeFP(const ConstantFP *CFP, MVT VT);
147 unsigned AArch64MaterializeGV(const GlobalValue *GV);
149 // Call handling routines.
151 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
152 bool ProcessCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
154 bool FinishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
157 // Backend specific FastISel code.
158 unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
159 unsigned TargetMaterializeConstant(const Constant *C) override;
161 explicit AArch64FastISel(FunctionLoweringInfo &funcInfo,
162 const TargetLibraryInfo *libInfo)
163 : FastISel(funcInfo, libInfo) {
164 Subtarget = &TM.getSubtarget<AArch64Subtarget>();
165 Context = &funcInfo.Fn->getContext();
168 bool TargetSelectInstruction(const Instruction *I) override;
170 #include "AArch64GenFastISel.inc"
173 } // end anonymous namespace
175 #include "AArch64GenCallingConv.inc"
177 CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {
178 if (CC == CallingConv::WebKit_JS)
179 return CC_AArch64_WebKit_JS;
180 return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
183 unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
184 assert(TLI.getValueType(AI->getType(), true) == MVT::i64 &&
185 "Alloca should always return a pointer.");
187 // Don't handle dynamic allocas.
188 if (!FuncInfo.StaticAllocaMap.count(AI))
191 DenseMap<const AllocaInst *, int>::iterator SI =
192 FuncInfo.StaticAllocaMap.find(AI);
194 if (SI != FuncInfo.StaticAllocaMap.end()) {
195 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
196 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
198 .addFrameIndex(SI->second)
207 unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) {
208 if (VT != MVT::f32 && VT != MVT::f64)
211 const APFloat Val = CFP->getValueAPF();
212 bool is64bit = (VT == MVT::f64);
214 // This checks to see if we can use FMOV instructions to materialize
215 // a constant, otherwise we have to materialize via the constant pool.
216 if (TLI.isFPImmLegal(Val, VT)) {
220 Imm = AArch64_AM::getFP64Imm(Val);
221 Opc = AArch64::FMOVDi;
223 Imm = AArch64_AM::getFP32Imm(Val);
224 Opc = AArch64::FMOVSi;
226 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
227 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
232 // Materialize via constant pool. MachineConstantPool wants an explicit
234 unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
236 Align = DL.getTypeAllocSize(CFP->getType());
238 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
239 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
240 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
241 ADRPReg).addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGE);
243 unsigned Opc = is64bit ? AArch64::LDRDui : AArch64::LDRSui;
244 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
245 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
247 .addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
251 unsigned AArch64FastISel::AArch64MaterializeGV(const GlobalValue *GV) {
252 // We can't handle thread-local variables quickly yet.
253 if (GV->isThreadLocal())
256 // MachO still uses GOT for large code-model accesses, but ELF requires
257 // movz/movk sequences, which FastISel doesn't handle yet.
258 if (TM.getCodeModel() != CodeModel::Small && !Subtarget->isTargetMachO())
261 unsigned char OpFlags = Subtarget->ClassifyGlobalReference(GV, TM);
263 EVT DestEVT = TLI.getValueType(GV->getType(), true);
264 if (!DestEVT.isSimple())
267 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
270 if (OpFlags & AArch64II::MO_GOT) {
272 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
274 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGE);
276 ResultReg = createResultReg(&AArch64::GPR64RegClass);
277 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::LDRXui),
280 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
284 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
285 ADRPReg).addGlobalAddress(GV, 0, AArch64II::MO_PAGE);
287 ResultReg = createResultReg(&AArch64::GPR64spRegClass);
288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
291 .addGlobalAddress(GV, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC)
297 unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) {
298 EVT CEVT = TLI.getValueType(C->getType(), true);
300 // Only handle simple types.
301 if (!CEVT.isSimple())
303 MVT VT = CEVT.getSimpleVT();
305 // FIXME: Handle ConstantInt.
306 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
307 return AArch64MaterializeFP(CFP, VT);
308 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
309 return AArch64MaterializeGV(GV);
314 // Computes the address to get to an object.
315 bool AArch64FastISel::ComputeAddress(const Value *Obj, Address &Addr) {
316 const User *U = nullptr;
317 unsigned Opcode = Instruction::UserOp1;
318 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
319 // Don't walk into other basic blocks unless the object is an alloca from
320 // another block, otherwise it may not have a virtual register assigned.
321 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
322 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
323 Opcode = I->getOpcode();
326 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
327 Opcode = C->getOpcode();
331 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
332 if (Ty->getAddressSpace() > 255)
333 // Fast instruction selection doesn't support the special
340 case Instruction::BitCast: {
341 // Look through bitcasts.
342 return ComputeAddress(U->getOperand(0), Addr);
344 case Instruction::IntToPtr: {
345 // Look past no-op inttoptrs.
346 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
347 return ComputeAddress(U->getOperand(0), Addr);
350 case Instruction::PtrToInt: {
351 // Look past no-op ptrtoints.
352 if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
353 return ComputeAddress(U->getOperand(0), Addr);
356 case Instruction::GetElementPtr: {
357 Address SavedAddr = Addr;
358 uint64_t TmpOffset = Addr.getOffset();
360 // Iterate through the GEP folding the constants into offsets where
362 gep_type_iterator GTI = gep_type_begin(U);
363 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
365 const Value *Op = *i;
366 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
367 const StructLayout *SL = DL.getStructLayout(STy);
368 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
369 TmpOffset += SL->getElementOffset(Idx);
371 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
373 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
374 // Constant-offset addressing.
375 TmpOffset += CI->getSExtValue() * S;
378 if (canFoldAddIntoGEP(U, Op)) {
379 // A compatible add with a constant operand. Fold the constant.
381 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
382 TmpOffset += CI->getSExtValue() * S;
383 // Iterate on the other operand.
384 Op = cast<AddOperator>(Op)->getOperand(0);
388 goto unsupported_gep;
393 // Try to grab the base operand now.
394 Addr.setOffset(TmpOffset);
395 if (ComputeAddress(U->getOperand(0), Addr))
398 // We failed, restore everything and try the other options.
404 case Instruction::Alloca: {
405 const AllocaInst *AI = cast<AllocaInst>(Obj);
406 DenseMap<const AllocaInst *, int>::iterator SI =
407 FuncInfo.StaticAllocaMap.find(AI);
408 if (SI != FuncInfo.StaticAllocaMap.end()) {
409 Addr.setKind(Address::FrameIndexBase);
410 Addr.setFI(SI->second);
417 // Try to get this in a register if nothing else has worked.
419 Addr.setReg(getRegForValue(Obj));
420 return Addr.isValid();
423 bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) {
424 EVT evt = TLI.getValueType(Ty, true);
426 // Only handle simple types.
427 if (evt == MVT::Other || !evt.isSimple())
429 VT = evt.getSimpleVT();
431 // This is a legal type, but it's not something we handle in fast-isel.
435 // Handle all other legal types, i.e. a register that will directly hold this
437 return TLI.isTypeLegal(VT);
440 bool AArch64FastISel::isLoadStoreTypeLegal(Type *Ty, MVT &VT) {
441 if (isTypeLegal(Ty, VT))
444 // If this is a type than can be sign or zero-extended to a basic operation
445 // go ahead and accept it now. For stores, this reflects truncation.
446 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
452 bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT,
453 int64_t ScaleFactor, bool UseUnscaled) {
454 bool needsLowering = false;
455 int64_t Offset = Addr.getOffset();
456 switch (VT.SimpleTy) {
467 // Using scaled, 12-bit, unsigned immediate offsets.
468 needsLowering = ((Offset & 0xfff) != Offset);
470 // Using unscaled, 9-bit, signed immediate offsets.
471 needsLowering = (Offset > 256 || Offset < -256);
475 //If this is a stack pointer and the offset needs to be simplified then put
476 // the alloca address into a register, set the base type back to register and
477 // continue. This should almost never happen.
478 if (needsLowering && Addr.getKind() == Address::FrameIndexBase) {
479 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
480 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
482 .addFrameIndex(Addr.getFI())
485 Addr.setKind(Address::RegBase);
486 Addr.setReg(ResultReg);
489 // Since the offset is too large for the load/store instruction get the
490 // reg+offset into a register.
492 uint64_t UnscaledOffset = Addr.getOffset() * ScaleFactor;
493 unsigned ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(), false,
494 UnscaledOffset, MVT::i64);
497 Addr.setReg(ResultReg);
503 void AArch64FastISel::AddLoadStoreOperands(Address &Addr,
504 const MachineInstrBuilder &MIB,
505 unsigned Flags, bool UseUnscaled) {
506 int64_t Offset = Addr.getOffset();
507 // Frame base works a bit differently. Handle it separately.
508 if (Addr.getKind() == Address::FrameIndexBase) {
509 int FI = Addr.getFI();
510 // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size
511 // and alignment should be based on the VT.
512 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
513 MachinePointerInfo::getFixedStack(FI, Offset), Flags,
514 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
515 // Now add the rest of the operands.
516 MIB.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
518 // Now add the rest of the operands.
519 MIB.addReg(Addr.getReg());
524 bool AArch64FastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
526 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
527 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
528 if (!UseUnscaled && Addr.getOffset() < 0)
532 const TargetRegisterClass *RC;
534 int64_t ScaleFactor = 0;
535 switch (VT.SimpleTy) {
540 // Intentional fall-through.
542 Opc = UseUnscaled ? AArch64::LDURBBi : AArch64::LDRBBui;
543 RC = &AArch64::GPR32RegClass;
547 Opc = UseUnscaled ? AArch64::LDURHHi : AArch64::LDRHHui;
548 RC = &AArch64::GPR32RegClass;
552 Opc = UseUnscaled ? AArch64::LDURWi : AArch64::LDRWui;
553 RC = &AArch64::GPR32RegClass;
557 Opc = UseUnscaled ? AArch64::LDURXi : AArch64::LDRXui;
558 RC = &AArch64::GPR64RegClass;
562 Opc = UseUnscaled ? AArch64::LDURSi : AArch64::LDRSui;
563 RC = TLI.getRegClassFor(VT);
567 Opc = UseUnscaled ? AArch64::LDURDi : AArch64::LDRDui;
568 RC = TLI.getRegClassFor(VT);
574 int64_t Offset = Addr.getOffset();
575 if (Offset & (ScaleFactor - 1))
576 // Retry using an unscaled, 9-bit, signed immediate offset.
577 return EmitLoad(VT, ResultReg, Addr, /*UseUnscaled*/ true);
579 Addr.setOffset(Offset / ScaleFactor);
582 // Simplify this down to something we can handle.
583 if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled))
586 // Create the base instruction, then add the operands.
587 ResultReg = createResultReg(RC);
588 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
589 TII.get(Opc), ResultReg);
590 AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, UseUnscaled);
592 // Loading an i1 requires special handling.
594 MRI.constrainRegClass(ResultReg, &AArch64::GPR32RegClass);
595 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
596 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
599 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
605 bool AArch64FastISel::SelectLoad(const Instruction *I) {
607 // Verify we have a legal type before going any further. Currently, we handle
608 // simple types that will directly fit in a register (i32/f32/i64/f64) or
609 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
610 if (!isLoadStoreTypeLegal(I->getType(), VT) || cast<LoadInst>(I)->isAtomic())
613 // See if we can handle this address.
615 if (!ComputeAddress(I->getOperand(0), Addr))
619 if (!EmitLoad(VT, ResultReg, Addr))
622 UpdateValueMap(I, ResultReg);
626 bool AArch64FastISel::EmitStore(MVT VT, unsigned SrcReg, Address Addr,
628 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
629 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
630 if (!UseUnscaled && Addr.getOffset() < 0)
635 int64_t ScaleFactor = 0;
636 // Using scaled, 12-bit, unsigned immediate offsets.
637 switch (VT.SimpleTy) {
643 StrOpc = UseUnscaled ? AArch64::STURBBi : AArch64::STRBBui;
647 StrOpc = UseUnscaled ? AArch64::STURHHi : AArch64::STRHHui;
651 StrOpc = UseUnscaled ? AArch64::STURWi : AArch64::STRWui;
655 StrOpc = UseUnscaled ? AArch64::STURXi : AArch64::STRXui;
659 StrOpc = UseUnscaled ? AArch64::STURSi : AArch64::STRSui;
663 StrOpc = UseUnscaled ? AArch64::STURDi : AArch64::STRDui;
669 int64_t Offset = Addr.getOffset();
670 if (Offset & (ScaleFactor - 1))
671 // Retry using an unscaled, 9-bit, signed immediate offset.
672 return EmitStore(VT, SrcReg, Addr, /*UseUnscaled*/ true);
674 Addr.setOffset(Offset / ScaleFactor);
677 // Simplify this down to something we can handle.
678 if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled))
681 // Storing an i1 requires special handling.
683 MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
684 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
685 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
688 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
691 // Create the base instruction, then add the operands.
692 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
693 TII.get(StrOpc)).addReg(SrcReg);
694 AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, UseUnscaled);
698 bool AArch64FastISel::SelectStore(const Instruction *I) {
700 Value *Op0 = I->getOperand(0);
701 // Verify we have a legal type before going any further. Currently, we handle
702 // simple types that will directly fit in a register (i32/f32/i64/f64) or
703 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
704 if (!isLoadStoreTypeLegal(Op0->getType(), VT) ||
705 cast<StoreInst>(I)->isAtomic())
708 // Get the value to be stored into a register.
709 unsigned SrcReg = getRegForValue(Op0);
713 // See if we can handle this address.
715 if (!ComputeAddress(I->getOperand(1), Addr))
718 if (!EmitStore(VT, SrcReg, Addr))
723 static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) {
725 case CmpInst::FCMP_ONE:
726 case CmpInst::FCMP_UEQ:
728 // AL is our "false" for now. The other two need more compares.
729 return AArch64CC::AL;
730 case CmpInst::ICMP_EQ:
731 case CmpInst::FCMP_OEQ:
732 return AArch64CC::EQ;
733 case CmpInst::ICMP_SGT:
734 case CmpInst::FCMP_OGT:
735 return AArch64CC::GT;
736 case CmpInst::ICMP_SGE:
737 case CmpInst::FCMP_OGE:
738 return AArch64CC::GE;
739 case CmpInst::ICMP_UGT:
740 case CmpInst::FCMP_UGT:
741 return AArch64CC::HI;
742 case CmpInst::FCMP_OLT:
743 return AArch64CC::MI;
744 case CmpInst::ICMP_ULE:
745 case CmpInst::FCMP_OLE:
746 return AArch64CC::LS;
747 case CmpInst::FCMP_ORD:
748 return AArch64CC::VC;
749 case CmpInst::FCMP_UNO:
750 return AArch64CC::VS;
751 case CmpInst::FCMP_UGE:
752 return AArch64CC::PL;
753 case CmpInst::ICMP_SLT:
754 case CmpInst::FCMP_ULT:
755 return AArch64CC::LT;
756 case CmpInst::ICMP_SLE:
757 case CmpInst::FCMP_ULE:
758 return AArch64CC::LE;
759 case CmpInst::FCMP_UNE:
760 case CmpInst::ICMP_NE:
761 return AArch64CC::NE;
762 case CmpInst::ICMP_UGE:
763 return AArch64CC::HS;
764 case CmpInst::ICMP_ULT:
765 return AArch64CC::LO;
769 bool AArch64FastISel::SelectBranch(const Instruction *I) {
770 const BranchInst *BI = cast<BranchInst>(I);
771 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
772 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
774 AArch64CC::CondCode CC = AArch64CC::NE;
775 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
776 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
777 // We may not handle every CC for now.
778 CC = getCompareCC(CI->getPredicate());
779 if (CC == AArch64CC::AL)
783 if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
787 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
790 FuncInfo.MBB->addSuccessor(TBB);
792 FastEmitBranch(FBB, DbgLoc);
795 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
797 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
798 (isLoadStoreTypeLegal(TI->getOperand(0)->getType(), SrcVT))) {
799 unsigned CondReg = getRegForValue(TI->getOperand(0));
803 // Issue an extract_subreg to get the lower 32-bits.
804 if (SrcVT == MVT::i64)
805 CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, /*Kill=*/true,
808 MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
809 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
810 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
811 TII.get(AArch64::ANDWri), ANDReg)
813 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
814 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
815 TII.get(AArch64::SUBSWri))
821 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
825 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
828 FuncInfo.MBB->addSuccessor(TBB);
829 FastEmitBranch(FBB, DbgLoc);
832 } else if (const ConstantInt *CI =
833 dyn_cast<ConstantInt>(BI->getCondition())) {
834 uint64_t Imm = CI->getZExtValue();
835 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
836 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B))
838 FuncInfo.MBB->addSuccessor(Target);
840 } else if (foldXALUIntrinsic(CC, I, BI->getCondition())) {
841 // Fake request the condition, otherwise the intrinsic might be completely
843 unsigned CondReg = getRegForValue(BI->getCondition());
848 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
851 FuncInfo.MBB->addSuccessor(TBB);
853 FastEmitBranch(FBB, DbgLoc);
857 unsigned CondReg = getRegForValue(BI->getCondition());
861 // We've been divorced from our compare! Our block was split, and
862 // now our compare lives in a predecessor block. We musn't
863 // re-compare here, as the children of the compare aren't guaranteed
864 // live across the block boundary (we *could* check for this).
865 // Regardless, the compare has been done in the predecessor block,
866 // and it left a value for us in a virtual register. Ergo, we test
867 // the one-bit value left in the virtual register.
868 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri),
874 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
879 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
882 FuncInfo.MBB->addSuccessor(TBB);
883 FastEmitBranch(FBB, DbgLoc);
887 bool AArch64FastISel::SelectIndirectBr(const Instruction *I) {
888 const IndirectBrInst *BI = cast<IndirectBrInst>(I);
889 unsigned AddrReg = getRegForValue(BI->getOperand(0));
893 // Emit the indirect branch.
894 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BR))
897 // Make sure the CFG is up-to-date.
898 for (unsigned i = 0, e = BI->getNumSuccessors(); i != e; ++i)
899 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[BI->getSuccessor(i)]);
904 bool AArch64FastISel::EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt) {
905 Type *Ty = Src1Value->getType();
906 EVT SrcEVT = TLI.getValueType(Ty, true);
907 if (!SrcEVT.isSimple())
909 MVT SrcVT = SrcEVT.getSimpleVT();
911 // Check to see if the 2nd operand is a constant that we can encode directly
915 bool isNegativeImm = false;
916 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
917 if (SrcVT == MVT::i64 || SrcVT == MVT::i32 || SrcVT == MVT::i16 ||
918 SrcVT == MVT::i8 || SrcVT == MVT::i1) {
919 const APInt &CIVal = ConstInt->getValue();
921 Imm = (isZExt) ? CIVal.getZExtValue() : CIVal.getSExtValue();
922 if (CIVal.isNegative()) {
923 isNegativeImm = true;
926 // FIXME: We can handle more immediates using shifts.
927 UseImm = ((Imm & 0xfff) == Imm);
929 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
930 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
931 if (ConstFP->isZero() && !ConstFP->isNegative())
938 bool needsExt = false;
939 switch (SrcVT.SimpleTy) {
946 // Intentional fall-through.
950 CmpOpc = isNegativeImm ? AArch64::ADDSWri : AArch64::SUBSWri;
952 CmpOpc = AArch64::SUBSWrr;
957 CmpOpc = isNegativeImm ? AArch64::ADDSXri : AArch64::SUBSXri;
959 CmpOpc = AArch64::SUBSXrr;
963 CmpOpc = UseImm ? AArch64::FCMPSri : AArch64::FCMPSrr;
967 CmpOpc = UseImm ? AArch64::FCMPDri : AArch64::FCMPDrr;
971 unsigned SrcReg1 = getRegForValue(Src1Value);
977 SrcReg2 = getRegForValue(Src2Value);
982 // We have i1, i8, or i16, we need to either zero extend or sign extend.
984 SrcReg1 = EmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
988 SrcReg2 = EmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
996 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1008 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1011 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1018 bool AArch64FastISel::SelectCmp(const Instruction *I) {
1019 const CmpInst *CI = cast<CmpInst>(I);
1021 // We may not handle every CC for now.
1022 AArch64CC::CondCode CC = getCompareCC(CI->getPredicate());
1023 if (CC == AArch64CC::AL)
1027 if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1030 // Now set a register based on the comparison.
1031 AArch64CC::CondCode invertedCC = getInvertedCondCode(CC);
1032 unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
1033 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
1035 .addReg(AArch64::WZR)
1036 .addReg(AArch64::WZR)
1037 .addImm(invertedCC);
1039 UpdateValueMap(I, ResultReg);
1043 bool AArch64FastISel::SelectSelect(const Instruction *I) {
1044 const SelectInst *SI = cast<SelectInst>(I);
1046 EVT DestEVT = TLI.getValueType(SI->getType(), true);
1047 if (!DestEVT.isSimple())
1050 MVT DestVT = DestEVT.getSimpleVT();
1051 if (DestVT != MVT::i32 && DestVT != MVT::i64 && DestVT != MVT::f32 &&
1056 switch (DestVT.SimpleTy) {
1057 default: return false;
1058 case MVT::i32: SelectOpc = AArch64::CSELWr; break;
1059 case MVT::i64: SelectOpc = AArch64::CSELXr; break;
1060 case MVT::f32: SelectOpc = AArch64::FCSELSrrr; break;
1061 case MVT::f64: SelectOpc = AArch64::FCSELDrrr; break;
1064 const Value *Cond = SI->getCondition();
1065 bool NeedTest = true;
1066 AArch64CC::CondCode CC = AArch64CC::NE;
1067 if (foldXALUIntrinsic(CC, I, Cond))
1070 unsigned CondReg = getRegForValue(Cond);
1073 bool CondIsKill = hasTrivialKill(Cond);
1076 MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
1077 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
1078 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
1080 .addReg(CondReg, getKillRegState(CondIsKill))
1081 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1083 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri))
1090 unsigned TrueReg = getRegForValue(SI->getTrueValue());
1091 bool TrueIsKill = hasTrivialKill(SI->getTrueValue());
1093 unsigned FalseReg = getRegForValue(SI->getFalseValue());
1094 bool FalseIsKill = hasTrivialKill(SI->getFalseValue());
1096 if (!TrueReg || !FalseReg)
1099 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
1100 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SelectOpc),
1102 .addReg(TrueReg, getKillRegState(TrueIsKill))
1103 .addReg(FalseReg, getKillRegState(FalseIsKill))
1106 UpdateValueMap(I, ResultReg);
1110 bool AArch64FastISel::SelectFPExt(const Instruction *I) {
1111 Value *V = I->getOperand(0);
1112 if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy())
1115 unsigned Op = getRegForValue(V);
1119 unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
1120 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
1121 ResultReg).addReg(Op);
1122 UpdateValueMap(I, ResultReg);
1126 bool AArch64FastISel::SelectFPTrunc(const Instruction *I) {
1127 Value *V = I->getOperand(0);
1128 if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy())
1131 unsigned Op = getRegForValue(V);
1135 unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
1136 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
1137 ResultReg).addReg(Op);
1138 UpdateValueMap(I, ResultReg);
1142 // FPToUI and FPToSI
1143 bool AArch64FastISel::SelectFPToInt(const Instruction *I, bool Signed) {
1145 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
1148 unsigned SrcReg = getRegForValue(I->getOperand(0));
1152 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true);
1153 if (SrcVT == MVT::f128)
1157 if (SrcVT == MVT::f64) {
1159 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr;
1161 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr;
1164 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr;
1166 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
1168 unsigned ResultReg = createResultReg(
1169 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
1170 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
1172 UpdateValueMap(I, ResultReg);
1176 bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
1178 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
1180 assert ((DestVT == MVT::f32 || DestVT == MVT::f64) &&
1181 "Unexpected value type.");
1183 unsigned SrcReg = getRegForValue(I->getOperand(0));
1187 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true);
1189 // Handle sign-extension.
1190 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
1192 EmitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed);
1197 MRI.constrainRegClass(SrcReg, SrcVT == MVT::i64 ? &AArch64::GPR64RegClass
1198 : &AArch64::GPR32RegClass);
1201 if (SrcVT == MVT::i64) {
1203 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri;
1205 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri;
1208 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri;
1210 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
1213 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
1214 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
1216 UpdateValueMap(I, ResultReg);
1220 bool AArch64FastISel::ProcessCallArgs(CallLoweringInfo &CLI,
1221 SmallVectorImpl<MVT> &OutVTs,
1222 unsigned &NumBytes) {
1223 CallingConv::ID CC = CLI.CallConv;
1224 SmallVector<CCValAssign, 16> ArgLocs;
1225 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
1226 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1228 // Get a count of how many bytes are to be pushed on the stack.
1229 NumBytes = CCInfo.getNextStackOffset();
1231 // Issue CALLSEQ_START
1232 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1233 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
1236 // Process the args.
1237 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1238 CCValAssign &VA = ArgLocs[i];
1239 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1240 MVT ArgVT = OutVTs[VA.getValNo()];
1242 unsigned ArgReg = getRegForValue(ArgVal);
1246 // Handle arg promotion: SExt, ZExt, AExt.
1247 switch (VA.getLocInfo()) {
1248 case CCValAssign::Full:
1250 case CCValAssign::SExt: {
1251 MVT DestVT = VA.getLocVT();
1253 ArgReg = EmitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1258 case CCValAssign::AExt:
1259 // Intentional fall-through.
1260 case CCValAssign::ZExt: {
1261 MVT DestVT = VA.getLocVT();
1263 ArgReg = EmitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1269 llvm_unreachable("Unknown arg promotion!");
1272 // Now copy/store arg to correct locations.
1273 if (VA.isRegLoc() && !VA.needsCustom()) {
1274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1275 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1276 CLI.OutRegs.push_back(VA.getLocReg());
1277 } else if (VA.needsCustom()) {
1278 // FIXME: Handle custom args.
1281 assert(VA.isMemLoc() && "Assuming store on stack.");
1283 // Don't emit stores for undef values.
1284 if (isa<UndefValue>(ArgVal))
1287 // Need to store on the stack.
1288 unsigned ArgSize = (ArgVT.getSizeInBits() + 7) / 8;
1290 unsigned BEAlign = 0;
1291 if (ArgSize < 8 && !Subtarget->isLittleEndian())
1292 BEAlign = 8 - ArgSize;
1295 Addr.setKind(Address::RegBase);
1296 Addr.setReg(AArch64::SP);
1297 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1299 if (!EmitStore(ArgVT, ArgReg, Addr))
1306 bool AArch64FastISel::FinishCall(CallLoweringInfo &CLI, MVT RetVT,
1307 unsigned NumBytes) {
1308 CallingConv::ID CC = CLI.CallConv;
1310 // Issue CALLSEQ_END
1311 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
1313 .addImm(NumBytes).addImm(0);
1315 // Now the return value.
1316 if (RetVT != MVT::isVoid) {
1317 SmallVector<CCValAssign, 16> RVLocs;
1318 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
1319 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC));
1321 // Only handle a single return value.
1322 if (RVLocs.size() != 1)
1325 // Copy all of the result registers out of their specified physreg.
1326 MVT CopyVT = RVLocs[0].getValVT();
1327 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1328 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1329 TII.get(TargetOpcode::COPY), ResultReg)
1330 .addReg(RVLocs[0].getLocReg());
1331 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1333 CLI.ResultReg = ResultReg;
1334 CLI.NumResultRegs = 1;
1340 bool AArch64FastISel::FastLowerCall(CallLoweringInfo &CLI) {
1341 CallingConv::ID CC = CLI.CallConv;
1342 bool IsVarArg = CLI.IsVarArg;
1343 const Value *Callee = CLI.Callee;
1344 const char *SymName = CLI.SymName;
1346 // Only handle global variable Callees.
1347 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
1351 // Let SDISel handle vararg functions.
1355 // FIXME: Only handle *simple* calls for now.
1357 if (CLI.RetTy->isVoidTy())
1358 RetVT = MVT::isVoid;
1359 else if (!isTypeLegal(CLI.RetTy, RetVT))
1362 for (auto Flag : CLI.OutFlags)
1363 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1366 // Set up the argument vectors.
1367 SmallVector<MVT, 16> OutVTs;
1368 OutVTs.reserve(CLI.OutVals.size());
1370 for (auto *Val : CLI.OutVals) {
1372 if (!isTypeLegal(Val->getType(), VT) &&
1373 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1376 // We don't handle vector parameters yet.
1377 if (VT.isVector() || VT.getSizeInBits() > 64)
1380 OutVTs.push_back(VT);
1383 // Handle the arguments now that we've gotten them.
1385 if (!ProcessCallArgs(CLI, OutVTs, NumBytes))
1389 MachineInstrBuilder MIB;
1390 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BL));
1393 MIB.addGlobalAddress(GV, 0, 0);
1395 MIB.addExternalSymbol(SymName, 0);
1397 // Add implicit physical register uses to the call.
1398 for (auto Reg : CLI.OutRegs)
1399 MIB.addReg(Reg, RegState::Implicit);
1401 // Add a register mask with the call-preserved registers.
1402 // Proper defs for return values will be added by setPhysRegsDeadExcept().
1403 MIB.addRegMask(TRI.getCallPreservedMask(CC));
1405 // Finish off the call including any return values.
1406 return FinishCall(CLI, RetVT, NumBytes);
1409 bool AArch64FastISel::IsMemCpySmall(uint64_t Len, unsigned Alignment) {
1411 return Len / Alignment <= 4;
1416 bool AArch64FastISel::TryEmitSmallMemCpy(Address Dest, Address Src,
1417 uint64_t Len, unsigned Alignment) {
1418 // Make sure we don't bloat code by inlining very large memcpy's.
1419 if (!IsMemCpySmall(Len, Alignment))
1422 int64_t UnscaledOffset = 0;
1423 Address OrigDest = Dest;
1424 Address OrigSrc = Src;
1428 if (!Alignment || Alignment >= 8) {
1439 // Bound based on alignment.
1440 if (Len >= 4 && Alignment == 4)
1442 else if (Len >= 2 && Alignment == 2)
1451 RV = EmitLoad(VT, ResultReg, Src);
1455 RV = EmitStore(VT, ResultReg, Dest);
1459 int64_t Size = VT.getSizeInBits() / 8;
1461 UnscaledOffset += Size;
1463 // We need to recompute the unscaled offset for each iteration.
1464 Dest.setOffset(OrigDest.getOffset() + UnscaledOffset);
1465 Src.setOffset(OrigSrc.getOffset() + UnscaledOffset);
1471 /// \brief Check if it is possible to fold the condition from the XALU intrinsic
1472 /// into the user. The condition code will only be updated on success.
1473 bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode &CC,
1474 const Instruction *I,
1475 const Value *Cond) {
1476 if (!isa<ExtractValueInst>(Cond))
1479 const auto *EV = cast<ExtractValueInst>(Cond);
1480 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
1483 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
1485 const Function *Callee = II->getCalledFunction();
1487 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
1488 if (!isTypeLegal(RetTy, RetVT))
1491 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1494 AArch64CC::CondCode TmpCC;
1495 switch (II->getIntrinsicID()) {
1496 default: return false;
1497 case Intrinsic::sadd_with_overflow:
1498 case Intrinsic::ssub_with_overflow: TmpCC = AArch64CC::VS; break;
1499 case Intrinsic::uadd_with_overflow: TmpCC = AArch64CC::HS; break;
1500 case Intrinsic::usub_with_overflow: TmpCC = AArch64CC::LO; break;
1501 case Intrinsic::smul_with_overflow:
1502 case Intrinsic::umul_with_overflow: TmpCC = AArch64CC::NE; break;
1505 // Check if both instructions are in the same basic block.
1506 if (II->getParent() != I->getParent())
1509 // Make sure nothing is in the way
1510 BasicBlock::const_iterator Start = I;
1511 BasicBlock::const_iterator End = II;
1512 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
1513 // We only expect extractvalue instructions between the intrinsic and the
1514 // instruction to be selected.
1515 if (!isa<ExtractValueInst>(Itr))
1518 // Check that the extractvalue operand comes from the intrinsic.
1519 const auto *EVI = cast<ExtractValueInst>(Itr);
1520 if (EVI->getAggregateOperand() != II)
1528 bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
1529 // FIXME: Handle more intrinsics.
1530 switch (II->getIntrinsicID()) {
1531 default: return false;
1532 case Intrinsic::frameaddress: {
1533 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
1534 MFI->setFrameAddressIsTaken(true);
1536 const AArch64RegisterInfo *RegInfo =
1537 static_cast<const AArch64RegisterInfo *>(TM.getRegisterInfo());
1538 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
1539 unsigned SrcReg = FramePtr;
1541 // Recursively load frame address
1547 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
1549 DestReg = createResultReg(&AArch64::GPR64RegClass);
1550 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1551 TII.get(AArch64::LDRXui), DestReg)
1552 .addReg(SrcReg).addImm(0);
1556 UpdateValueMap(II, SrcReg);
1559 case Intrinsic::memcpy:
1560 case Intrinsic::memmove: {
1561 const auto *MTI = cast<MemTransferInst>(II);
1562 // Don't handle volatile.
1563 if (MTI->isVolatile())
1566 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
1567 // we would emit dead code because we don't currently handle memmoves.
1568 bool IsMemCpy = (II->getIntrinsicID() == Intrinsic::memcpy);
1569 if (isa<ConstantInt>(MTI->getLength()) && IsMemCpy) {
1570 // Small memcpy's are common enough that we want to do them without a call
1572 uint64_t Len = cast<ConstantInt>(MTI->getLength())->getZExtValue();
1573 unsigned Alignment = MTI->getAlignment();
1574 if (IsMemCpySmall(Len, Alignment)) {
1576 if (!ComputeAddress(MTI->getRawDest(), Dest) ||
1577 !ComputeAddress(MTI->getRawSource(), Src))
1579 if (TryEmitSmallMemCpy(Dest, Src, Len, Alignment))
1584 if (!MTI->getLength()->getType()->isIntegerTy(64))
1587 if (MTI->getSourceAddressSpace() > 255 || MTI->getDestAddressSpace() > 255)
1588 // Fast instruction selection doesn't support the special
1592 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1593 return LowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
1595 case Intrinsic::memset: {
1596 const MemSetInst *MSI = cast<MemSetInst>(II);
1597 // Don't handle volatile.
1598 if (MSI->isVolatile())
1601 if (!MSI->getLength()->getType()->isIntegerTy(64))
1604 if (MSI->getDestAddressSpace() > 255)
1605 // Fast instruction selection doesn't support the special
1609 return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
1611 case Intrinsic::trap: {
1612 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK))
1616 case Intrinsic::sadd_with_overflow:
1617 case Intrinsic::uadd_with_overflow:
1618 case Intrinsic::ssub_with_overflow:
1619 case Intrinsic::usub_with_overflow:
1620 case Intrinsic::smul_with_overflow:
1621 case Intrinsic::umul_with_overflow: {
1622 // This implements the basic lowering of the xalu with overflow intrinsics.
1623 const Function *Callee = II->getCalledFunction();
1624 auto *Ty = cast<StructType>(Callee->getReturnType());
1625 Type *RetTy = Ty->getTypeAtIndex(0U);
1626 Type *CondTy = Ty->getTypeAtIndex(1);
1629 if (!isTypeLegal(RetTy, VT))
1632 if (VT != MVT::i32 && VT != MVT::i64)
1635 const Value *LHS = II->getArgOperand(0);
1636 const Value *RHS = II->getArgOperand(1);
1637 // Canonicalize immediate to the RHS.
1638 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
1639 isCommutativeIntrinsic(II))
1640 std::swap(LHS, RHS);
1642 unsigned LHSReg = getRegForValue(LHS);
1645 bool LHSIsKill = hasTrivialKill(LHS);
1647 unsigned RHSReg = 0;
1648 bool RHSIsKill = false;
1650 if (!isa<ConstantInt>(RHS)) {
1651 RHSReg = getRegForValue(RHS);
1654 RHSIsKill = hasTrivialKill(RHS);
1659 unsigned MulReg = 0;
1660 AArch64CC::CondCode CC = AArch64CC::Invalid;
1661 bool Is64Bit = VT == MVT::i64;
1662 switch (II->getIntrinsicID()) {
1663 default: llvm_unreachable("Unexpected intrinsic!");
1664 case Intrinsic::sadd_with_overflow:
1666 Opc = Is64Bit ? AArch64::ADDSXri : AArch64::ADDSWri;
1668 Opc = Is64Bit ? AArch64::ADDSXrr : AArch64::ADDSWrr;
1671 case Intrinsic::uadd_with_overflow:
1673 Opc = Is64Bit ? AArch64::ADDSXri : AArch64::ADDSWri;
1675 Opc = Is64Bit ? AArch64::ADDSXrr : AArch64::ADDSWrr;
1678 case Intrinsic::ssub_with_overflow:
1680 Opc = Is64Bit ? AArch64::SUBSXri : AArch64::SUBSWri;
1682 Opc = Is64Bit ? AArch64::SUBSXrr : AArch64::SUBSWrr;
1685 case Intrinsic::usub_with_overflow:
1687 Opc = Is64Bit ? AArch64::SUBSXri : AArch64::SUBSWri;
1689 Opc = Is64Bit ? AArch64::SUBSXrr : AArch64::SUBSWrr;
1692 case Intrinsic::smul_with_overflow: {
1695 RHSReg = getRegForValue(RHS);
1698 RHSIsKill = hasTrivialKill(RHS);
1700 if (VT == MVT::i32) {
1701 MulReg = Emit_SMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1702 unsigned ShiftReg = Emit_LSR_ri(MVT::i64, MulReg, false, 32);
1703 MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
1705 ShiftReg = FastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
1707 unsigned CmpReg = createResultReg(TLI.getRegClassFor(VT));
1708 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1709 TII.get(AArch64::SUBSWrs), CmpReg)
1710 .addReg(ShiftReg, getKillRegState(true))
1711 .addReg(MulReg, getKillRegState(false))
1712 .addImm(159); // 159 <-> asr #31
1714 assert(VT == MVT::i64 && "Unexpected value type.");
1715 MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1716 unsigned SMULHReg = FastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
1718 unsigned CmpReg = createResultReg(TLI.getRegClassFor(VT));
1719 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1720 TII.get(AArch64::SUBSXrs), CmpReg)
1721 .addReg(SMULHReg, getKillRegState(true))
1722 .addReg(MulReg, getKillRegState(false))
1723 .addImm(191); // 191 <-> asr #63
1727 case Intrinsic::umul_with_overflow: {
1730 RHSReg = getRegForValue(RHS);
1733 RHSIsKill = hasTrivialKill(RHS);
1735 if (VT == MVT::i32) {
1736 MulReg = Emit_UMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1737 unsigned CmpReg = createResultReg(TLI.getRegClassFor(MVT::i64));
1738 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1739 TII.get(AArch64::SUBSXrs), CmpReg)
1740 .addReg(AArch64::XZR, getKillRegState(true))
1741 .addReg(MulReg, getKillRegState(false))
1742 .addImm(96); // 96 <-> lsr #32
1743 MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
1746 assert(VT == MVT::i64 && "Unexpected value type.");
1747 MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1748 unsigned UMULHReg = FastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
1750 unsigned CmpReg = createResultReg(TLI.getRegClassFor(VT));
1751 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1752 TII.get(AArch64::SUBSXrr), CmpReg)
1753 .addReg(AArch64::XZR, getKillRegState(true))
1754 .addReg(UMULHReg, getKillRegState(false));
1760 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
1762 MachineInstrBuilder MIB;
1763 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
1765 .addReg(LHSReg, getKillRegState(LHSIsKill));
1767 MIB.addImm(cast<ConstantInt>(RHS)->getZExtValue());
1769 MIB.addReg(RHSReg, getKillRegState(RHSIsKill));
1772 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1773 TII.get(TargetOpcode::COPY), ResultReg)
1776 unsigned ResultReg2 = FuncInfo.CreateRegs(CondTy);
1777 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
1778 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
1780 .addReg(AArch64::WZR, getKillRegState(true))
1781 .addReg(AArch64::WZR, getKillRegState(true))
1782 .addImm(getInvertedCondCode(CC));
1784 UpdateValueMap(II, ResultReg, 2);
1791 bool AArch64FastISel::SelectRet(const Instruction *I) {
1792 const ReturnInst *Ret = cast<ReturnInst>(I);
1793 const Function &F = *I->getParent()->getParent();
1795 if (!FuncInfo.CanLowerReturn)
1801 // Build a list of return value registers.
1802 SmallVector<unsigned, 4> RetRegs;
1804 if (Ret->getNumOperands() > 0) {
1805 CallingConv::ID CC = F.getCallingConv();
1806 SmallVector<ISD::OutputArg, 4> Outs;
1807 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
1809 // Analyze operands of the call, assigning locations to each operand.
1810 SmallVector<CCValAssign, 16> ValLocs;
1811 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
1813 CCAssignFn *RetCC = CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
1814 : RetCC_AArch64_AAPCS;
1815 CCInfo.AnalyzeReturn(Outs, RetCC);
1817 // Only handle a single return value for now.
1818 if (ValLocs.size() != 1)
1821 CCValAssign &VA = ValLocs[0];
1822 const Value *RV = Ret->getOperand(0);
1824 // Don't bother handling odd stuff for now.
1825 if (VA.getLocInfo() != CCValAssign::Full)
1827 // Only handle register returns for now.
1830 unsigned Reg = getRegForValue(RV);
1834 unsigned SrcReg = Reg + VA.getValNo();
1835 unsigned DestReg = VA.getLocReg();
1836 // Avoid a cross-class copy. This is very unlikely.
1837 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1840 EVT RVEVT = TLI.getValueType(RV->getType());
1841 if (!RVEVT.isSimple())
1844 // Vectors (of > 1 lane) in big endian need tricky handling.
1845 if (RVEVT.isVector() && RVEVT.getVectorNumElements() > 1)
1848 MVT RVVT = RVEVT.getSimpleVT();
1849 if (RVVT == MVT::f128)
1851 MVT DestVT = VA.getValVT();
1852 // Special handling for extended integers.
1853 if (RVVT != DestVT) {
1854 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1857 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1860 bool isZExt = Outs[0].Flags.isZExt();
1861 SrcReg = EmitIntExt(RVVT, SrcReg, DestVT, isZExt);
1867 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1868 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1870 // Add register to return instruction.
1871 RetRegs.push_back(VA.getLocReg());
1874 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1875 TII.get(AArch64::RET_ReallyLR));
1876 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1877 MIB.addReg(RetRegs[i], RegState::Implicit);
1881 bool AArch64FastISel::SelectTrunc(const Instruction *I) {
1882 Type *DestTy = I->getType();
1883 Value *Op = I->getOperand(0);
1884 Type *SrcTy = Op->getType();
1886 EVT SrcEVT = TLI.getValueType(SrcTy, true);
1887 EVT DestEVT = TLI.getValueType(DestTy, true);
1888 if (!SrcEVT.isSimple())
1890 if (!DestEVT.isSimple())
1893 MVT SrcVT = SrcEVT.getSimpleVT();
1894 MVT DestVT = DestEVT.getSimpleVT();
1896 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
1899 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 &&
1903 unsigned SrcReg = getRegForValue(Op);
1907 // If we're truncating from i64 to a smaller non-legal type then generate an
1908 // AND. Otherwise, we know the high bits are undefined and a truncate doesn't
1909 // generate any code.
1910 if (SrcVT == MVT::i64) {
1912 switch (DestVT.SimpleTy) {
1914 // Trunc i64 to i32 is handled by the target-independent fast-isel.
1926 // Issue an extract_subreg to get the lower 32-bits.
1927 unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, /*Kill=*/true,
1929 MRI.constrainRegClass(Reg32, &AArch64::GPR32RegClass);
1930 // Create the AND instruction which performs the actual truncation.
1931 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
1932 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
1935 .addImm(AArch64_AM::encodeLogicalImmediate(Mask, 32));
1939 UpdateValueMap(I, SrcReg);
1943 unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) {
1944 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 ||
1945 DestVT == MVT::i64) &&
1946 "Unexpected value type.");
1947 // Handle i8 and i16 as i32.
1948 if (DestVT == MVT::i8 || DestVT == MVT::i16)
1952 MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1953 unsigned ResultReg = createResultReg(&AArch64::GPR32spRegClass);
1954 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
1957 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1959 if (DestVT == MVT::i64) {
1960 // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
1961 // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
1962 unsigned Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1963 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1964 TII.get(AArch64::SUBREG_TO_REG), Reg64)
1967 .addImm(AArch64::sub_32);
1972 if (DestVT == MVT::i64) {
1973 // FIXME: We're SExt i1 to i64.
1976 unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
1977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SBFMWri),
1986 unsigned AArch64FastISel::Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
1987 unsigned Op1, bool Op1IsKill) {
1989 switch (RetVT.SimpleTy) {
1995 Opc = AArch64::MADDWrrr; ZReg = AArch64::WZR; break;
1997 Opc = AArch64::MADDXrrr; ZReg = AArch64::XZR; break;
2000 // Create the base instruction, then add the operands.
2001 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2003 .addReg(Op0, getKillRegState(Op0IsKill))
2004 .addReg(Op1, getKillRegState(Op1IsKill))
2005 .addReg(ZReg, getKillRegState(true));
2010 unsigned AArch64FastISel::Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
2011 unsigned Op1, bool Op1IsKill) {
2012 if (RetVT != MVT::i64)
2015 // Create the base instruction, then add the operands.
2016 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
2017 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SMADDLrrr),
2019 .addReg(Op0, getKillRegState(Op0IsKill))
2020 .addReg(Op1, getKillRegState(Op1IsKill))
2021 .addReg(AArch64::XZR, getKillRegState(true));
2026 unsigned AArch64FastISel::Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
2027 unsigned Op1, bool Op1IsKill) {
2028 if (RetVT != MVT::i64)
2031 // Create the base instruction, then add the operands.
2032 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
2033 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::UMADDLrrr),
2035 .addReg(Op0, getKillRegState(Op0IsKill))
2036 .addReg(Op1, getKillRegState(Op1IsKill))
2037 .addReg(AArch64::XZR, getKillRegState(true));
2042 unsigned AArch64FastISel::Emit_LSL_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
2044 unsigned Opc, ImmR, ImmS;
2045 switch (RetVT.SimpleTy) {
2051 Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS = 31 - Shift; break;
2053 Opc = AArch64::UBFMXri; ImmR = -Shift % 64; ImmS = 63 - Shift; break;
2056 return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, ImmR,
2060 unsigned AArch64FastISel::Emit_LSR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
2063 switch (RetVT.SimpleTy) {
2069 Opc = AArch64::UBFMWri; ImmS = 31; break;
2071 Opc = AArch64::UBFMXri; ImmS = 63; break;
2074 return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
2078 unsigned AArch64FastISel::Emit_ASR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
2081 switch (RetVT.SimpleTy) {
2087 Opc = AArch64::SBFMWri; ImmS = 31; break;
2089 Opc = AArch64::SBFMXri; ImmS = 63; break;
2092 return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
2096 unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
2098 assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?");
2100 // FastISel does not have plumbing to deal with extensions where the SrcVT or
2101 // DestVT are odd things, so test to make sure that they are both types we can
2102 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
2103 // bail out to SelectionDAG.
2104 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) &&
2105 (DestVT != MVT::i32) && (DestVT != MVT::i64)) ||
2106 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) &&
2107 (SrcVT != MVT::i16) && (SrcVT != MVT::i32)))
2113 switch (SrcVT.SimpleTy) {
2117 return Emiti1Ext(SrcReg, DestVT, isZExt);
2119 if (DestVT == MVT::i64)
2120 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
2122 Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
2126 if (DestVT == MVT::i64)
2127 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
2129 Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
2133 assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?");
2134 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
2139 // Handle i8 and i16 as i32.
2140 if (DestVT == MVT::i8 || DestVT == MVT::i16)
2142 else if (DestVT == MVT::i64) {
2143 unsigned Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2144 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2145 TII.get(AArch64::SUBREG_TO_REG), Src64)
2148 .addImm(AArch64::sub_32);
2152 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
2153 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2161 bool AArch64FastISel::SelectIntExt(const Instruction *I) {
2162 // On ARM, in general, integer casts don't involve legal types; this code
2163 // handles promotable integers. The high bits for a type smaller than
2164 // the register size are assumed to be undefined.
2165 Type *DestTy = I->getType();
2166 Value *Src = I->getOperand(0);
2167 Type *SrcTy = Src->getType();
2169 bool isZExt = isa<ZExtInst>(I);
2170 unsigned SrcReg = getRegForValue(Src);
2174 EVT SrcEVT = TLI.getValueType(SrcTy, true);
2175 EVT DestEVT = TLI.getValueType(DestTy, true);
2176 if (!SrcEVT.isSimple())
2178 if (!DestEVT.isSimple())
2181 MVT SrcVT = SrcEVT.getSimpleVT();
2182 MVT DestVT = DestEVT.getSimpleVT();
2183 unsigned ResultReg = EmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2186 UpdateValueMap(I, ResultReg);
2190 bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
2191 EVT DestEVT = TLI.getValueType(I->getType(), true);
2192 if (!DestEVT.isSimple())
2195 MVT DestVT = DestEVT.getSimpleVT();
2196 if (DestVT != MVT::i64 && DestVT != MVT::i32)
2200 bool is64bit = (DestVT == MVT::i64);
2201 switch (ISDOpcode) {
2205 DivOpc = is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
2208 DivOpc = is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
2211 unsigned MSubOpc = is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
2212 unsigned Src0Reg = getRegForValue(I->getOperand(0));
2216 unsigned Src1Reg = getRegForValue(I->getOperand(1));
2220 unsigned QuotReg = createResultReg(TLI.getRegClassFor(DestVT));
2221 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(DivOpc), QuotReg)
2224 // The remainder is computed as numerator - (quotient * denominator) using the
2225 // MSUB instruction.
2226 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
2227 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MSubOpc), ResultReg)
2231 UpdateValueMap(I, ResultReg);
2235 bool AArch64FastISel::SelectMul(const Instruction *I) {
2236 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType(), true);
2237 if (!SrcEVT.isSimple())
2239 MVT SrcVT = SrcEVT.getSimpleVT();
2241 // Must be simple value type. Don't handle vectors.
2242 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
2246 unsigned Src0Reg = getRegForValue(I->getOperand(0));
2249 bool Src0IsKill = hasTrivialKill(I->getOperand(0));
2251 unsigned Src1Reg = getRegForValue(I->getOperand(1));
2254 bool Src1IsKill = hasTrivialKill(I->getOperand(1));
2256 unsigned ResultReg =
2257 Emit_MUL_rr(SrcVT, Src0Reg, Src0IsKill, Src1Reg, Src1IsKill);
2262 UpdateValueMap(I, ResultReg);
2266 bool AArch64FastISel::SelectShift(const Instruction *I, bool IsLeftShift,
2267 bool IsArithmetic) {
2268 EVT RetEVT = TLI.getValueType(I->getType(), true);
2269 if (!RetEVT.isSimple())
2271 MVT RetVT = RetEVT.getSimpleVT();
2273 if (!isa<ConstantInt>(I->getOperand(1)))
2276 unsigned Op0Reg = getRegForValue(I->getOperand(0));
2279 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
2281 uint64_t ShiftVal = cast<ConstantInt>(I->getOperand(1))->getZExtValue();
2285 ResultReg = Emit_LSL_ri(RetVT, Op0Reg, Op0IsKill, ShiftVal);
2288 ResultReg = Emit_ASR_ri(RetVT, Op0Reg, Op0IsKill, ShiftVal);
2290 ResultReg = Emit_LSR_ri(RetVT, Op0Reg, Op0IsKill, ShiftVal);
2296 UpdateValueMap(I, ResultReg);
2300 bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) {
2301 switch (I->getOpcode()) {
2304 case Instruction::Load:
2305 return SelectLoad(I);
2306 case Instruction::Store:
2307 return SelectStore(I);
2308 case Instruction::Br:
2309 return SelectBranch(I);
2310 case Instruction::IndirectBr:
2311 return SelectIndirectBr(I);
2312 case Instruction::FCmp:
2313 case Instruction::ICmp:
2314 return SelectCmp(I);
2315 case Instruction::Select:
2316 return SelectSelect(I);
2317 case Instruction::FPExt:
2318 return SelectFPExt(I);
2319 case Instruction::FPTrunc:
2320 return SelectFPTrunc(I);
2321 case Instruction::FPToSI:
2322 return SelectFPToInt(I, /*Signed=*/true);
2323 case Instruction::FPToUI:
2324 return SelectFPToInt(I, /*Signed=*/false);
2325 case Instruction::SIToFP:
2326 return SelectIntToFP(I, /*Signed=*/true);
2327 case Instruction::UIToFP:
2328 return SelectIntToFP(I, /*Signed=*/false);
2329 case Instruction::SRem:
2330 return SelectRem(I, ISD::SREM);
2331 case Instruction::URem:
2332 return SelectRem(I, ISD::UREM);
2333 case Instruction::Ret:
2334 return SelectRet(I);
2335 case Instruction::Trunc:
2336 return SelectTrunc(I);
2337 case Instruction::ZExt:
2338 case Instruction::SExt:
2339 return SelectIntExt(I);
2341 // FIXME: All of these should really be handled by the target-independent
2342 // selector -> improve FastISel tblgen.
2343 case Instruction::Mul:
2344 return SelectMul(I);
2345 case Instruction::Shl:
2346 return SelectShift(I, /*IsLeftShift=*/true, /*IsArithmetic=*/false);
2347 case Instruction::LShr:
2348 return SelectShift(I, /*IsLeftShift=*/false, /*IsArithmetic=*/false);
2349 case Instruction::AShr:
2350 return SelectShift(I, /*IsLeftShift=*/false, /*IsArithmetic=*/true);
2353 // Silence warnings.
2354 (void)&CC_AArch64_DarwinPCS_VarArg;
2358 llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &funcInfo,
2359 const TargetLibraryInfo *libInfo) {
2360 return new AArch64FastISel(funcInfo, libInfo);