1 //===-- AArch6464FastISel.cpp - AArch64 FastISel implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the AArch64-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // AArch64GenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "AArch64Subtarget.h"
18 #include "AArch64TargetMachine.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GetElementPtrTypeIterator.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/Support/CommandLine.h"
42 class AArch64FastISel : public FastISel {
58 const GlobalValue *GV;
61 Address() : Kind(RegBase), Offset(0), GV(nullptr) { Base.Reg = 0; }
62 void setKind(BaseKind K) { Kind = K; }
63 BaseKind getKind() const { return Kind; }
64 bool isRegBase() const { return Kind == RegBase; }
65 bool isFIBase() const { return Kind == FrameIndexBase; }
66 void setReg(unsigned Reg) {
67 assert(isRegBase() && "Invalid base register access!");
70 unsigned getReg() const {
71 assert(isRegBase() && "Invalid base register access!");
74 void setFI(unsigned FI) {
75 assert(isFIBase() && "Invalid base frame index access!");
78 unsigned getFI() const {
79 assert(isFIBase() && "Invalid base frame index access!");
82 void setOffset(int64_t O) { Offset = O; }
83 int64_t getOffset() { return Offset; }
85 void setGlobalValue(const GlobalValue *G) { GV = G; }
86 const GlobalValue *getGlobalValue() { return GV; }
88 bool isValid() { return isFIBase() || (isRegBase() && getReg() != 0); }
91 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
92 /// make the right decision when generating code for different targets.
93 const AArch64Subtarget *Subtarget;
96 bool FastLowerCall(CallLoweringInfo &CLI) override;
97 bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
100 // Selection routines.
101 bool SelectLoad(const Instruction *I);
102 bool SelectStore(const Instruction *I);
103 bool SelectBranch(const Instruction *I);
104 bool SelectIndirectBr(const Instruction *I);
105 bool SelectCmp(const Instruction *I);
106 bool SelectSelect(const Instruction *I);
107 bool SelectFPExt(const Instruction *I);
108 bool SelectFPTrunc(const Instruction *I);
109 bool SelectFPToInt(const Instruction *I, bool Signed);
110 bool SelectIntToFP(const Instruction *I, bool Signed);
111 bool SelectRem(const Instruction *I, unsigned ISDOpcode);
112 bool SelectRet(const Instruction *I);
113 bool SelectTrunc(const Instruction *I);
114 bool SelectIntExt(const Instruction *I);
115 bool SelectMul(const Instruction *I);
116 bool SelectShift(const Instruction *I, bool IsLeftShift, bool IsArithmetic);
118 // Utility helper routines.
119 bool isTypeLegal(Type *Ty, MVT &VT);
120 bool isLoadStoreTypeLegal(Type *Ty, MVT &VT);
121 bool ComputeAddress(const Value *Obj, Address &Addr);
122 bool ComputeCallAddress(const Value *V, Address &Addr);
123 bool SimplifyAddress(Address &Addr, MVT VT, int64_t ScaleFactor,
125 void AddLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB,
126 unsigned Flags, bool UseUnscaled);
127 bool IsMemCpySmall(uint64_t Len, unsigned Alignment);
128 bool TryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
130 bool foldXALUIntrinsic(AArch64CC::CondCode &CC, const Instruction *I,
134 bool EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt);
135 bool EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
136 bool UseUnscaled = false);
137 bool EmitStore(MVT VT, unsigned SrcReg, Address Addr,
138 bool UseUnscaled = false);
139 unsigned EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
140 unsigned Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
141 unsigned Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
142 unsigned Op1, bool Op1IsKill);
143 unsigned Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
144 unsigned Op1, bool Op1IsKill);
145 unsigned Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
146 unsigned Op1, bool Op1IsKill);
147 unsigned Emit_LSL_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t Imm);
148 unsigned Emit_LSR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t Imm);
149 unsigned Emit_ASR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t Imm);
151 unsigned AArch64MaterializeFP(const ConstantFP *CFP, MVT VT);
152 unsigned AArch64MaterializeGV(const GlobalValue *GV);
154 // Call handling routines.
156 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
157 bool ProcessCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
159 bool FinishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
162 // Backend specific FastISel code.
163 unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
164 unsigned TargetMaterializeConstant(const Constant *C) override;
166 explicit AArch64FastISel(FunctionLoweringInfo &funcInfo,
167 const TargetLibraryInfo *libInfo)
168 : FastISel(funcInfo, libInfo) {
169 Subtarget = &TM.getSubtarget<AArch64Subtarget>();
170 Context = &funcInfo.Fn->getContext();
173 bool TargetSelectInstruction(const Instruction *I) override;
175 #include "AArch64GenFastISel.inc"
178 } // end anonymous namespace
180 #include "AArch64GenCallingConv.inc"
182 CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {
183 if (CC == CallingConv::WebKit_JS)
184 return CC_AArch64_WebKit_JS;
185 return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
188 unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
189 assert(TLI.getValueType(AI->getType(), true) == MVT::i64 &&
190 "Alloca should always return a pointer.");
192 // Don't handle dynamic allocas.
193 if (!FuncInfo.StaticAllocaMap.count(AI))
196 DenseMap<const AllocaInst *, int>::iterator SI =
197 FuncInfo.StaticAllocaMap.find(AI);
199 if (SI != FuncInfo.StaticAllocaMap.end()) {
200 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
201 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
203 .addFrameIndex(SI->second)
212 unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) {
213 if (VT != MVT::f32 && VT != MVT::f64)
216 const APFloat Val = CFP->getValueAPF();
217 bool is64bit = (VT == MVT::f64);
219 // This checks to see if we can use FMOV instructions to materialize
220 // a constant, otherwise we have to materialize via the constant pool.
221 if (TLI.isFPImmLegal(Val, VT)) {
225 Imm = AArch64_AM::getFP64Imm(Val);
226 Opc = AArch64::FMOVDi;
228 Imm = AArch64_AM::getFP32Imm(Val);
229 Opc = AArch64::FMOVSi;
231 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
232 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
237 // Materialize via constant pool. MachineConstantPool wants an explicit
239 unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
241 Align = DL.getTypeAllocSize(CFP->getType());
243 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
244 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
245 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
246 ADRPReg).addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGE);
248 unsigned Opc = is64bit ? AArch64::LDRDui : AArch64::LDRSui;
249 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
250 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
252 .addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
256 unsigned AArch64FastISel::AArch64MaterializeGV(const GlobalValue *GV) {
257 // We can't handle thread-local variables quickly yet.
258 if (GV->isThreadLocal())
261 // MachO still uses GOT for large code-model accesses, but ELF requires
262 // movz/movk sequences, which FastISel doesn't handle yet.
263 if (TM.getCodeModel() != CodeModel::Small && !Subtarget->isTargetMachO())
266 unsigned char OpFlags = Subtarget->ClassifyGlobalReference(GV, TM);
268 EVT DestEVT = TLI.getValueType(GV->getType(), true);
269 if (!DestEVT.isSimple())
272 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
275 if (OpFlags & AArch64II::MO_GOT) {
277 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
279 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGE);
281 ResultReg = createResultReg(&AArch64::GPR64RegClass);
282 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::LDRXui),
285 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
290 ADRPReg).addGlobalAddress(GV, 0, AArch64II::MO_PAGE);
292 ResultReg = createResultReg(&AArch64::GPR64spRegClass);
293 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
296 .addGlobalAddress(GV, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC)
302 unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) {
303 EVT CEVT = TLI.getValueType(C->getType(), true);
305 // Only handle simple types.
306 if (!CEVT.isSimple())
308 MVT VT = CEVT.getSimpleVT();
310 // FIXME: Handle ConstantInt.
311 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
312 return AArch64MaterializeFP(CFP, VT);
313 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
314 return AArch64MaterializeGV(GV);
319 // Computes the address to get to an object.
320 bool AArch64FastISel::ComputeAddress(const Value *Obj, Address &Addr) {
321 const User *U = nullptr;
322 unsigned Opcode = Instruction::UserOp1;
323 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
324 // Don't walk into other basic blocks unless the object is an alloca from
325 // another block, otherwise it may not have a virtual register assigned.
326 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
327 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
328 Opcode = I->getOpcode();
331 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
332 Opcode = C->getOpcode();
336 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
337 if (Ty->getAddressSpace() > 255)
338 // Fast instruction selection doesn't support the special
345 case Instruction::BitCast: {
346 // Look through bitcasts.
347 return ComputeAddress(U->getOperand(0), Addr);
349 case Instruction::IntToPtr: {
350 // Look past no-op inttoptrs.
351 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
352 return ComputeAddress(U->getOperand(0), Addr);
355 case Instruction::PtrToInt: {
356 // Look past no-op ptrtoints.
357 if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
358 return ComputeAddress(U->getOperand(0), Addr);
361 case Instruction::GetElementPtr: {
362 Address SavedAddr = Addr;
363 uint64_t TmpOffset = Addr.getOffset();
365 // Iterate through the GEP folding the constants into offsets where
367 gep_type_iterator GTI = gep_type_begin(U);
368 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
370 const Value *Op = *i;
371 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
372 const StructLayout *SL = DL.getStructLayout(STy);
373 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
374 TmpOffset += SL->getElementOffset(Idx);
376 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
378 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
379 // Constant-offset addressing.
380 TmpOffset += CI->getSExtValue() * S;
383 if (canFoldAddIntoGEP(U, Op)) {
384 // A compatible add with a constant operand. Fold the constant.
386 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
387 TmpOffset += CI->getSExtValue() * S;
388 // Iterate on the other operand.
389 Op = cast<AddOperator>(Op)->getOperand(0);
393 goto unsupported_gep;
398 // Try to grab the base operand now.
399 Addr.setOffset(TmpOffset);
400 if (ComputeAddress(U->getOperand(0), Addr))
403 // We failed, restore everything and try the other options.
409 case Instruction::Alloca: {
410 const AllocaInst *AI = cast<AllocaInst>(Obj);
411 DenseMap<const AllocaInst *, int>::iterator SI =
412 FuncInfo.StaticAllocaMap.find(AI);
413 if (SI != FuncInfo.StaticAllocaMap.end()) {
414 Addr.setKind(Address::FrameIndexBase);
415 Addr.setFI(SI->second);
422 // Try to get this in a register if nothing else has worked.
424 Addr.setReg(getRegForValue(Obj));
425 return Addr.isValid();
428 bool AArch64FastISel::ComputeCallAddress(const Value *V, Address &Addr) {
429 const User *U = nullptr;
430 unsigned Opcode = Instruction::UserOp1;
433 if (const auto *I = dyn_cast<Instruction>(V)) {
434 Opcode = I->getOpcode();
436 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
437 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
438 Opcode = C->getOpcode();
444 case Instruction::BitCast:
445 // Look past bitcasts if its operand is in the same BB.
447 return ComputeCallAddress(U->getOperand(0), Addr);
449 case Instruction::IntToPtr:
450 // Look past no-op inttoptrs if its operand is in the same BB.
452 TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
453 return ComputeCallAddress(U->getOperand(0), Addr);
455 case Instruction::PtrToInt:
456 // Look past no-op ptrtoints if its operand is in the same BB.
458 TLI.getValueType(U->getType()) == TLI.getPointerTy())
459 return ComputeCallAddress(U->getOperand(0), Addr);
463 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
464 Addr.setGlobalValue(GV);
468 // If all else fails, try to materialize the value in a register.
469 if (!Addr.getGlobalValue()) {
470 Addr.setReg(getRegForValue(V));
471 return Addr.getReg() != 0;
478 bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) {
479 EVT evt = TLI.getValueType(Ty, true);
481 // Only handle simple types.
482 if (evt == MVT::Other || !evt.isSimple())
484 VT = evt.getSimpleVT();
486 // This is a legal type, but it's not something we handle in fast-isel.
490 // Handle all other legal types, i.e. a register that will directly hold this
492 return TLI.isTypeLegal(VT);
495 bool AArch64FastISel::isLoadStoreTypeLegal(Type *Ty, MVT &VT) {
496 if (isTypeLegal(Ty, VT))
499 // If this is a type than can be sign or zero-extended to a basic operation
500 // go ahead and accept it now. For stores, this reflects truncation.
501 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
507 bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT,
508 int64_t ScaleFactor, bool UseUnscaled) {
509 bool needsLowering = false;
510 int64_t Offset = Addr.getOffset();
511 switch (VT.SimpleTy) {
522 // Using scaled, 12-bit, unsigned immediate offsets.
523 needsLowering = ((Offset & 0xfff) != Offset);
525 // Using unscaled, 9-bit, signed immediate offsets.
526 needsLowering = (Offset > 256 || Offset < -256);
530 //If this is a stack pointer and the offset needs to be simplified then put
531 // the alloca address into a register, set the base type back to register and
532 // continue. This should almost never happen.
533 if (needsLowering && Addr.getKind() == Address::FrameIndexBase) {
534 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
535 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
537 .addFrameIndex(Addr.getFI())
540 Addr.setKind(Address::RegBase);
541 Addr.setReg(ResultReg);
544 // Since the offset is too large for the load/store instruction get the
545 // reg+offset into a register.
547 uint64_t UnscaledOffset = Addr.getOffset() * ScaleFactor;
548 unsigned ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(), false,
549 UnscaledOffset, MVT::i64);
552 Addr.setReg(ResultReg);
558 void AArch64FastISel::AddLoadStoreOperands(Address &Addr,
559 const MachineInstrBuilder &MIB,
560 unsigned Flags, bool UseUnscaled) {
561 int64_t Offset = Addr.getOffset();
562 // Frame base works a bit differently. Handle it separately.
563 if (Addr.getKind() == Address::FrameIndexBase) {
564 int FI = Addr.getFI();
565 // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size
566 // and alignment should be based on the VT.
567 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
568 MachinePointerInfo::getFixedStack(FI, Offset), Flags,
569 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
570 // Now add the rest of the operands.
571 MIB.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
573 // Now add the rest of the operands.
574 MIB.addReg(Addr.getReg());
579 bool AArch64FastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
581 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
582 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
583 if (!UseUnscaled && Addr.getOffset() < 0)
587 const TargetRegisterClass *RC;
589 int64_t ScaleFactor = 0;
590 switch (VT.SimpleTy) {
595 // Intentional fall-through.
597 Opc = UseUnscaled ? AArch64::LDURBBi : AArch64::LDRBBui;
598 RC = &AArch64::GPR32RegClass;
602 Opc = UseUnscaled ? AArch64::LDURHHi : AArch64::LDRHHui;
603 RC = &AArch64::GPR32RegClass;
607 Opc = UseUnscaled ? AArch64::LDURWi : AArch64::LDRWui;
608 RC = &AArch64::GPR32RegClass;
612 Opc = UseUnscaled ? AArch64::LDURXi : AArch64::LDRXui;
613 RC = &AArch64::GPR64RegClass;
617 Opc = UseUnscaled ? AArch64::LDURSi : AArch64::LDRSui;
618 RC = TLI.getRegClassFor(VT);
622 Opc = UseUnscaled ? AArch64::LDURDi : AArch64::LDRDui;
623 RC = TLI.getRegClassFor(VT);
629 int64_t Offset = Addr.getOffset();
630 if (Offset & (ScaleFactor - 1))
631 // Retry using an unscaled, 9-bit, signed immediate offset.
632 return EmitLoad(VT, ResultReg, Addr, /*UseUnscaled*/ true);
634 Addr.setOffset(Offset / ScaleFactor);
637 // Simplify this down to something we can handle.
638 if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled))
641 // Create the base instruction, then add the operands.
642 ResultReg = createResultReg(RC);
643 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
644 TII.get(Opc), ResultReg);
645 AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, UseUnscaled);
647 // Loading an i1 requires special handling.
649 MRI.constrainRegClass(ResultReg, &AArch64::GPR32RegClass);
650 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
651 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
654 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
660 bool AArch64FastISel::SelectLoad(const Instruction *I) {
662 // Verify we have a legal type before going any further. Currently, we handle
663 // simple types that will directly fit in a register (i32/f32/i64/f64) or
664 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
665 if (!isLoadStoreTypeLegal(I->getType(), VT) || cast<LoadInst>(I)->isAtomic())
668 // See if we can handle this address.
670 if (!ComputeAddress(I->getOperand(0), Addr))
674 if (!EmitLoad(VT, ResultReg, Addr))
677 UpdateValueMap(I, ResultReg);
681 bool AArch64FastISel::EmitStore(MVT VT, unsigned SrcReg, Address Addr,
683 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
684 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
685 if (!UseUnscaled && Addr.getOffset() < 0)
690 int64_t ScaleFactor = 0;
691 // Using scaled, 12-bit, unsigned immediate offsets.
692 switch (VT.SimpleTy) {
698 StrOpc = UseUnscaled ? AArch64::STURBBi : AArch64::STRBBui;
702 StrOpc = UseUnscaled ? AArch64::STURHHi : AArch64::STRHHui;
706 StrOpc = UseUnscaled ? AArch64::STURWi : AArch64::STRWui;
710 StrOpc = UseUnscaled ? AArch64::STURXi : AArch64::STRXui;
714 StrOpc = UseUnscaled ? AArch64::STURSi : AArch64::STRSui;
718 StrOpc = UseUnscaled ? AArch64::STURDi : AArch64::STRDui;
724 int64_t Offset = Addr.getOffset();
725 if (Offset & (ScaleFactor - 1))
726 // Retry using an unscaled, 9-bit, signed immediate offset.
727 return EmitStore(VT, SrcReg, Addr, /*UseUnscaled*/ true);
729 Addr.setOffset(Offset / ScaleFactor);
732 // Simplify this down to something we can handle.
733 if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled))
736 // Storing an i1 requires special handling.
738 MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
739 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
740 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
743 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
746 // Create the base instruction, then add the operands.
747 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
748 TII.get(StrOpc)).addReg(SrcReg);
749 AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, UseUnscaled);
753 bool AArch64FastISel::SelectStore(const Instruction *I) {
755 Value *Op0 = I->getOperand(0);
756 // Verify we have a legal type before going any further. Currently, we handle
757 // simple types that will directly fit in a register (i32/f32/i64/f64) or
758 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
759 if (!isLoadStoreTypeLegal(Op0->getType(), VT) ||
760 cast<StoreInst>(I)->isAtomic())
763 // Get the value to be stored into a register.
764 unsigned SrcReg = getRegForValue(Op0);
768 // See if we can handle this address.
770 if (!ComputeAddress(I->getOperand(1), Addr))
773 if (!EmitStore(VT, SrcReg, Addr))
778 static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) {
780 case CmpInst::FCMP_ONE:
781 case CmpInst::FCMP_UEQ:
783 // AL is our "false" for now. The other two need more compares.
784 return AArch64CC::AL;
785 case CmpInst::ICMP_EQ:
786 case CmpInst::FCMP_OEQ:
787 return AArch64CC::EQ;
788 case CmpInst::ICMP_SGT:
789 case CmpInst::FCMP_OGT:
790 return AArch64CC::GT;
791 case CmpInst::ICMP_SGE:
792 case CmpInst::FCMP_OGE:
793 return AArch64CC::GE;
794 case CmpInst::ICMP_UGT:
795 case CmpInst::FCMP_UGT:
796 return AArch64CC::HI;
797 case CmpInst::FCMP_OLT:
798 return AArch64CC::MI;
799 case CmpInst::ICMP_ULE:
800 case CmpInst::FCMP_OLE:
801 return AArch64CC::LS;
802 case CmpInst::FCMP_ORD:
803 return AArch64CC::VC;
804 case CmpInst::FCMP_UNO:
805 return AArch64CC::VS;
806 case CmpInst::FCMP_UGE:
807 return AArch64CC::PL;
808 case CmpInst::ICMP_SLT:
809 case CmpInst::FCMP_ULT:
810 return AArch64CC::LT;
811 case CmpInst::ICMP_SLE:
812 case CmpInst::FCMP_ULE:
813 return AArch64CC::LE;
814 case CmpInst::FCMP_UNE:
815 case CmpInst::ICMP_NE:
816 return AArch64CC::NE;
817 case CmpInst::ICMP_UGE:
818 return AArch64CC::HS;
819 case CmpInst::ICMP_ULT:
820 return AArch64CC::LO;
824 bool AArch64FastISel::SelectBranch(const Instruction *I) {
825 const BranchInst *BI = cast<BranchInst>(I);
826 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
827 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
829 AArch64CC::CondCode CC = AArch64CC::NE;
830 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
831 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
832 // We may not handle every CC for now.
833 CC = getCompareCC(CI->getPredicate());
834 if (CC == AArch64CC::AL)
838 if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
842 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
845 FuncInfo.MBB->addSuccessor(TBB);
847 FastEmitBranch(FBB, DbgLoc);
850 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
852 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
853 (isLoadStoreTypeLegal(TI->getOperand(0)->getType(), SrcVT))) {
854 unsigned CondReg = getRegForValue(TI->getOperand(0));
858 // Issue an extract_subreg to get the lower 32-bits.
859 if (SrcVT == MVT::i64)
860 CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, /*Kill=*/true,
863 MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
864 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
865 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
866 TII.get(AArch64::ANDWri), ANDReg)
868 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
869 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
870 TII.get(AArch64::SUBSWri))
876 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
880 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
883 FuncInfo.MBB->addSuccessor(TBB);
884 FastEmitBranch(FBB, DbgLoc);
887 } else if (const ConstantInt *CI =
888 dyn_cast<ConstantInt>(BI->getCondition())) {
889 uint64_t Imm = CI->getZExtValue();
890 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
891 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B))
893 FuncInfo.MBB->addSuccessor(Target);
895 } else if (foldXALUIntrinsic(CC, I, BI->getCondition())) {
896 // Fake request the condition, otherwise the intrinsic might be completely
898 unsigned CondReg = getRegForValue(BI->getCondition());
903 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
906 FuncInfo.MBB->addSuccessor(TBB);
908 FastEmitBranch(FBB, DbgLoc);
912 unsigned CondReg = getRegForValue(BI->getCondition());
916 // We've been divorced from our compare! Our block was split, and
917 // now our compare lives in a predecessor block. We musn't
918 // re-compare here, as the children of the compare aren't guaranteed
919 // live across the block boundary (we *could* check for this).
920 // Regardless, the compare has been done in the predecessor block,
921 // and it left a value for us in a virtual register. Ergo, we test
922 // the one-bit value left in the virtual register.
923 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri),
929 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
934 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
937 FuncInfo.MBB->addSuccessor(TBB);
938 FastEmitBranch(FBB, DbgLoc);
942 bool AArch64FastISel::SelectIndirectBr(const Instruction *I) {
943 const IndirectBrInst *BI = cast<IndirectBrInst>(I);
944 unsigned AddrReg = getRegForValue(BI->getOperand(0));
948 // Emit the indirect branch.
949 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BR))
952 // Make sure the CFG is up-to-date.
953 for (unsigned i = 0, e = BI->getNumSuccessors(); i != e; ++i)
954 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[BI->getSuccessor(i)]);
959 bool AArch64FastISel::EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt) {
960 Type *Ty = Src1Value->getType();
961 EVT SrcEVT = TLI.getValueType(Ty, true);
962 if (!SrcEVT.isSimple())
964 MVT SrcVT = SrcEVT.getSimpleVT();
966 // Check to see if the 2nd operand is a constant that we can encode directly
970 bool isNegativeImm = false;
971 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
972 if (SrcVT == MVT::i64 || SrcVT == MVT::i32 || SrcVT == MVT::i16 ||
973 SrcVT == MVT::i8 || SrcVT == MVT::i1) {
974 const APInt &CIVal = ConstInt->getValue();
976 Imm = (isZExt) ? CIVal.getZExtValue() : CIVal.getSExtValue();
977 if (CIVal.isNegative()) {
978 isNegativeImm = true;
981 // FIXME: We can handle more immediates using shifts.
982 UseImm = ((Imm & 0xfff) == Imm);
984 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
985 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
986 if (ConstFP->isZero() && !ConstFP->isNegative())
993 bool needsExt = false;
994 switch (SrcVT.SimpleTy) {
1001 // Intentional fall-through.
1003 ZReg = AArch64::WZR;
1005 CmpOpc = isNegativeImm ? AArch64::ADDSWri : AArch64::SUBSWri;
1007 CmpOpc = AArch64::SUBSWrr;
1010 ZReg = AArch64::XZR;
1012 CmpOpc = isNegativeImm ? AArch64::ADDSXri : AArch64::SUBSXri;
1014 CmpOpc = AArch64::SUBSXrr;
1018 CmpOpc = UseImm ? AArch64::FCMPSri : AArch64::FCMPSrr;
1022 CmpOpc = UseImm ? AArch64::FCMPDri : AArch64::FCMPDrr;
1026 unsigned SrcReg1 = getRegForValue(Src1Value);
1032 SrcReg2 = getRegForValue(Src2Value);
1037 // We have i1, i8, or i16, we need to either zero extend or sign extend.
1039 SrcReg1 = EmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1043 SrcReg2 = EmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1051 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1057 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1063 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1066 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
1073 bool AArch64FastISel::SelectCmp(const Instruction *I) {
1074 const CmpInst *CI = cast<CmpInst>(I);
1076 // We may not handle every CC for now.
1077 AArch64CC::CondCode CC = getCompareCC(CI->getPredicate());
1078 if (CC == AArch64CC::AL)
1082 if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1085 // Now set a register based on the comparison.
1086 AArch64CC::CondCode invertedCC = getInvertedCondCode(CC);
1087 unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
1088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
1090 .addReg(AArch64::WZR)
1091 .addReg(AArch64::WZR)
1092 .addImm(invertedCC);
1094 UpdateValueMap(I, ResultReg);
1098 bool AArch64FastISel::SelectSelect(const Instruction *I) {
1099 const SelectInst *SI = cast<SelectInst>(I);
1101 EVT DestEVT = TLI.getValueType(SI->getType(), true);
1102 if (!DestEVT.isSimple())
1105 MVT DestVT = DestEVT.getSimpleVT();
1106 if (DestVT != MVT::i32 && DestVT != MVT::i64 && DestVT != MVT::f32 &&
1111 switch (DestVT.SimpleTy) {
1112 default: return false;
1113 case MVT::i32: SelectOpc = AArch64::CSELWr; break;
1114 case MVT::i64: SelectOpc = AArch64::CSELXr; break;
1115 case MVT::f32: SelectOpc = AArch64::FCSELSrrr; break;
1116 case MVT::f64: SelectOpc = AArch64::FCSELDrrr; break;
1119 const Value *Cond = SI->getCondition();
1120 bool NeedTest = true;
1121 AArch64CC::CondCode CC = AArch64CC::NE;
1122 if (foldXALUIntrinsic(CC, I, Cond))
1125 unsigned CondReg = getRegForValue(Cond);
1128 bool CondIsKill = hasTrivialKill(Cond);
1131 MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
1132 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
1133 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
1135 .addReg(CondReg, getKillRegState(CondIsKill))
1136 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1138 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri))
1145 unsigned TrueReg = getRegForValue(SI->getTrueValue());
1146 bool TrueIsKill = hasTrivialKill(SI->getTrueValue());
1148 unsigned FalseReg = getRegForValue(SI->getFalseValue());
1149 bool FalseIsKill = hasTrivialKill(SI->getFalseValue());
1151 if (!TrueReg || !FalseReg)
1154 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
1155 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SelectOpc),
1157 .addReg(TrueReg, getKillRegState(TrueIsKill))
1158 .addReg(FalseReg, getKillRegState(FalseIsKill))
1161 UpdateValueMap(I, ResultReg);
1165 bool AArch64FastISel::SelectFPExt(const Instruction *I) {
1166 Value *V = I->getOperand(0);
1167 if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy())
1170 unsigned Op = getRegForValue(V);
1174 unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
1175 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
1176 ResultReg).addReg(Op);
1177 UpdateValueMap(I, ResultReg);
1181 bool AArch64FastISel::SelectFPTrunc(const Instruction *I) {
1182 Value *V = I->getOperand(0);
1183 if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy())
1186 unsigned Op = getRegForValue(V);
1190 unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
1191 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
1192 ResultReg).addReg(Op);
1193 UpdateValueMap(I, ResultReg);
1197 // FPToUI and FPToSI
1198 bool AArch64FastISel::SelectFPToInt(const Instruction *I, bool Signed) {
1200 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
1203 unsigned SrcReg = getRegForValue(I->getOperand(0));
1207 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true);
1208 if (SrcVT == MVT::f128)
1212 if (SrcVT == MVT::f64) {
1214 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr;
1216 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr;
1219 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr;
1221 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
1223 unsigned ResultReg = createResultReg(
1224 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
1225 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
1227 UpdateValueMap(I, ResultReg);
1231 bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
1233 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
1235 assert ((DestVT == MVT::f32 || DestVT == MVT::f64) &&
1236 "Unexpected value type.");
1238 unsigned SrcReg = getRegForValue(I->getOperand(0));
1242 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true);
1244 // Handle sign-extension.
1245 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
1247 EmitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed);
1252 MRI.constrainRegClass(SrcReg, SrcVT == MVT::i64 ? &AArch64::GPR64RegClass
1253 : &AArch64::GPR32RegClass);
1256 if (SrcVT == MVT::i64) {
1258 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri;
1260 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri;
1263 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri;
1265 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
1268 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
1269 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
1271 UpdateValueMap(I, ResultReg);
1275 bool AArch64FastISel::ProcessCallArgs(CallLoweringInfo &CLI,
1276 SmallVectorImpl<MVT> &OutVTs,
1277 unsigned &NumBytes) {
1278 CallingConv::ID CC = CLI.CallConv;
1279 SmallVector<CCValAssign, 16> ArgLocs;
1280 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
1281 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1283 // Get a count of how many bytes are to be pushed on the stack.
1284 NumBytes = CCInfo.getNextStackOffset();
1286 // Issue CALLSEQ_START
1287 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
1291 // Process the args.
1292 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1293 CCValAssign &VA = ArgLocs[i];
1294 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1295 MVT ArgVT = OutVTs[VA.getValNo()];
1297 unsigned ArgReg = getRegForValue(ArgVal);
1301 // Handle arg promotion: SExt, ZExt, AExt.
1302 switch (VA.getLocInfo()) {
1303 case CCValAssign::Full:
1305 case CCValAssign::SExt: {
1306 MVT DestVT = VA.getLocVT();
1308 ArgReg = EmitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1313 case CCValAssign::AExt:
1314 // Intentional fall-through.
1315 case CCValAssign::ZExt: {
1316 MVT DestVT = VA.getLocVT();
1318 ArgReg = EmitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1324 llvm_unreachable("Unknown arg promotion!");
1327 // Now copy/store arg to correct locations.
1328 if (VA.isRegLoc() && !VA.needsCustom()) {
1329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1330 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1331 CLI.OutRegs.push_back(VA.getLocReg());
1332 } else if (VA.needsCustom()) {
1333 // FIXME: Handle custom args.
1336 assert(VA.isMemLoc() && "Assuming store on stack.");
1338 // Don't emit stores for undef values.
1339 if (isa<UndefValue>(ArgVal))
1342 // Need to store on the stack.
1343 unsigned ArgSize = (ArgVT.getSizeInBits() + 7) / 8;
1345 unsigned BEAlign = 0;
1346 if (ArgSize < 8 && !Subtarget->isLittleEndian())
1347 BEAlign = 8 - ArgSize;
1350 Addr.setKind(Address::RegBase);
1351 Addr.setReg(AArch64::SP);
1352 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1354 if (!EmitStore(ArgVT, ArgReg, Addr))
1361 bool AArch64FastISel::FinishCall(CallLoweringInfo &CLI, MVT RetVT,
1362 unsigned NumBytes) {
1363 CallingConv::ID CC = CLI.CallConv;
1365 // Issue CALLSEQ_END
1366 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
1367 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
1368 .addImm(NumBytes).addImm(0);
1370 // Now the return value.
1371 if (RetVT != MVT::isVoid) {
1372 SmallVector<CCValAssign, 16> RVLocs;
1373 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
1374 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC));
1376 // Only handle a single return value.
1377 if (RVLocs.size() != 1)
1380 // Copy all of the result registers out of their specified physreg.
1381 MVT CopyVT = RVLocs[0].getValVT();
1382 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1383 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1384 TII.get(TargetOpcode::COPY), ResultReg)
1385 .addReg(RVLocs[0].getLocReg());
1386 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1388 CLI.ResultReg = ResultReg;
1389 CLI.NumResultRegs = 1;
1395 bool AArch64FastISel::FastLowerCall(CallLoweringInfo &CLI) {
1396 CallingConv::ID CC = CLI.CallConv;
1397 bool IsVarArg = CLI.IsVarArg;
1398 const Value *Callee = CLI.Callee;
1399 const char *SymName = CLI.SymName;
1401 CodeModel::Model CM = TM.getCodeModel();
1402 // Only support the small and large code model.
1403 if (CM != CodeModel::Small && CM != CodeModel::Large)
1406 // FIXME: Add large code model support for ELF.
1407 if (CM == CodeModel::Large && !Subtarget->isTargetMachO())
1410 // Let SDISel handle vararg functions.
1414 // FIXME: Only handle *simple* calls for now.
1416 if (CLI.RetTy->isVoidTy())
1417 RetVT = MVT::isVoid;
1418 else if (!isTypeLegal(CLI.RetTy, RetVT))
1421 for (auto Flag : CLI.OutFlags)
1422 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1425 // Set up the argument vectors.
1426 SmallVector<MVT, 16> OutVTs;
1427 OutVTs.reserve(CLI.OutVals.size());
1429 for (auto *Val : CLI.OutVals) {
1431 if (!isTypeLegal(Val->getType(), VT) &&
1432 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1435 // We don't handle vector parameters yet.
1436 if (VT.isVector() || VT.getSizeInBits() > 64)
1439 OutVTs.push_back(VT);
1443 if (!ComputeCallAddress(Callee, Addr))
1446 // Handle the arguments now that we've gotten them.
1448 if (!ProcessCallArgs(CLI, OutVTs, NumBytes))
1452 MachineInstrBuilder MIB;
1453 if (CM == CodeModel::Small) {
1454 unsigned CallOpc = Addr.getReg() ? AArch64::BLR : AArch64::BL;
1455 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
1457 MIB.addExternalSymbol(SymName, 0);
1458 else if (Addr.getGlobalValue())
1459 MIB.addGlobalAddress(Addr.getGlobalValue(), 0, 0);
1460 else if (Addr.getReg())
1461 MIB.addReg(Addr.getReg());
1465 unsigned CallReg = 0;
1467 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
1468 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
1470 .addExternalSymbol(SymName, AArch64II::MO_GOT | AArch64II::MO_PAGE);
1472 CallReg = createResultReg(&AArch64::GPR64RegClass);
1473 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::LDRXui),
1476 .addExternalSymbol(SymName, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
1478 } else if (Addr.getGlobalValue()) {
1479 CallReg = AArch64MaterializeGV(Addr.getGlobalValue());
1480 } else if (Addr.getReg())
1481 CallReg = Addr.getReg();
1486 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1487 TII.get(AArch64::BLR)).addReg(CallReg);
1490 // Add implicit physical register uses to the call.
1491 for (auto Reg : CLI.OutRegs)
1492 MIB.addReg(Reg, RegState::Implicit);
1494 // Add a register mask with the call-preserved registers.
1495 // Proper defs for return values will be added by setPhysRegsDeadExcept().
1496 MIB.addRegMask(TRI.getCallPreservedMask(CC));
1500 // Finish off the call including any return values.
1501 return FinishCall(CLI, RetVT, NumBytes);
1504 bool AArch64FastISel::IsMemCpySmall(uint64_t Len, unsigned Alignment) {
1506 return Len / Alignment <= 4;
1511 bool AArch64FastISel::TryEmitSmallMemCpy(Address Dest, Address Src,
1512 uint64_t Len, unsigned Alignment) {
1513 // Make sure we don't bloat code by inlining very large memcpy's.
1514 if (!IsMemCpySmall(Len, Alignment))
1517 int64_t UnscaledOffset = 0;
1518 Address OrigDest = Dest;
1519 Address OrigSrc = Src;
1523 if (!Alignment || Alignment >= 8) {
1534 // Bound based on alignment.
1535 if (Len >= 4 && Alignment == 4)
1537 else if (Len >= 2 && Alignment == 2)
1546 RV = EmitLoad(VT, ResultReg, Src);
1550 RV = EmitStore(VT, ResultReg, Dest);
1554 int64_t Size = VT.getSizeInBits() / 8;
1556 UnscaledOffset += Size;
1558 // We need to recompute the unscaled offset for each iteration.
1559 Dest.setOffset(OrigDest.getOffset() + UnscaledOffset);
1560 Src.setOffset(OrigSrc.getOffset() + UnscaledOffset);
1566 /// \brief Check if it is possible to fold the condition from the XALU intrinsic
1567 /// into the user. The condition code will only be updated on success.
1568 bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode &CC,
1569 const Instruction *I,
1570 const Value *Cond) {
1571 if (!isa<ExtractValueInst>(Cond))
1574 const auto *EV = cast<ExtractValueInst>(Cond);
1575 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
1578 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
1580 const Function *Callee = II->getCalledFunction();
1582 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
1583 if (!isTypeLegal(RetTy, RetVT))
1586 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1589 AArch64CC::CondCode TmpCC;
1590 switch (II->getIntrinsicID()) {
1591 default: return false;
1592 case Intrinsic::sadd_with_overflow:
1593 case Intrinsic::ssub_with_overflow: TmpCC = AArch64CC::VS; break;
1594 case Intrinsic::uadd_with_overflow: TmpCC = AArch64CC::HS; break;
1595 case Intrinsic::usub_with_overflow: TmpCC = AArch64CC::LO; break;
1596 case Intrinsic::smul_with_overflow:
1597 case Intrinsic::umul_with_overflow: TmpCC = AArch64CC::NE; break;
1600 // Check if both instructions are in the same basic block.
1601 if (II->getParent() != I->getParent())
1604 // Make sure nothing is in the way
1605 BasicBlock::const_iterator Start = I;
1606 BasicBlock::const_iterator End = II;
1607 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
1608 // We only expect extractvalue instructions between the intrinsic and the
1609 // instruction to be selected.
1610 if (!isa<ExtractValueInst>(Itr))
1613 // Check that the extractvalue operand comes from the intrinsic.
1614 const auto *EVI = cast<ExtractValueInst>(Itr);
1615 if (EVI->getAggregateOperand() != II)
1623 bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
1624 // FIXME: Handle more intrinsics.
1625 switch (II->getIntrinsicID()) {
1626 default: return false;
1627 case Intrinsic::frameaddress: {
1628 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
1629 MFI->setFrameAddressIsTaken(true);
1631 const AArch64RegisterInfo *RegInfo =
1632 static_cast<const AArch64RegisterInfo *>(TM.getRegisterInfo());
1633 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
1634 unsigned SrcReg = FramePtr;
1636 // Recursively load frame address
1642 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
1644 DestReg = createResultReg(&AArch64::GPR64RegClass);
1645 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1646 TII.get(AArch64::LDRXui), DestReg)
1647 .addReg(SrcReg).addImm(0);
1651 UpdateValueMap(II, SrcReg);
1654 case Intrinsic::memcpy:
1655 case Intrinsic::memmove: {
1656 const auto *MTI = cast<MemTransferInst>(II);
1657 // Don't handle volatile.
1658 if (MTI->isVolatile())
1661 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
1662 // we would emit dead code because we don't currently handle memmoves.
1663 bool IsMemCpy = (II->getIntrinsicID() == Intrinsic::memcpy);
1664 if (isa<ConstantInt>(MTI->getLength()) && IsMemCpy) {
1665 // Small memcpy's are common enough that we want to do them without a call
1667 uint64_t Len = cast<ConstantInt>(MTI->getLength())->getZExtValue();
1668 unsigned Alignment = MTI->getAlignment();
1669 if (IsMemCpySmall(Len, Alignment)) {
1671 if (!ComputeAddress(MTI->getRawDest(), Dest) ||
1672 !ComputeAddress(MTI->getRawSource(), Src))
1674 if (TryEmitSmallMemCpy(Dest, Src, Len, Alignment))
1679 if (!MTI->getLength()->getType()->isIntegerTy(64))
1682 if (MTI->getSourceAddressSpace() > 255 || MTI->getDestAddressSpace() > 255)
1683 // Fast instruction selection doesn't support the special
1687 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1688 return LowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
1690 case Intrinsic::memset: {
1691 const MemSetInst *MSI = cast<MemSetInst>(II);
1692 // Don't handle volatile.
1693 if (MSI->isVolatile())
1696 if (!MSI->getLength()->getType()->isIntegerTy(64))
1699 if (MSI->getDestAddressSpace() > 255)
1700 // Fast instruction selection doesn't support the special
1704 return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
1706 case Intrinsic::trap: {
1707 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK))
1711 case Intrinsic::sqrt: {
1712 Type *RetTy = II->getCalledFunction()->getReturnType();
1715 if (!isTypeLegal(RetTy, VT))
1718 unsigned Op0Reg = getRegForValue(II->getOperand(0));
1721 bool Op0IsKill = hasTrivialKill(II->getOperand(0));
1723 unsigned ResultReg = FastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
1727 UpdateValueMap(II, ResultReg);
1730 case Intrinsic::sadd_with_overflow:
1731 case Intrinsic::uadd_with_overflow:
1732 case Intrinsic::ssub_with_overflow:
1733 case Intrinsic::usub_with_overflow:
1734 case Intrinsic::smul_with_overflow:
1735 case Intrinsic::umul_with_overflow: {
1736 // This implements the basic lowering of the xalu with overflow intrinsics.
1737 const Function *Callee = II->getCalledFunction();
1738 auto *Ty = cast<StructType>(Callee->getReturnType());
1739 Type *RetTy = Ty->getTypeAtIndex(0U);
1740 Type *CondTy = Ty->getTypeAtIndex(1);
1743 if (!isTypeLegal(RetTy, VT))
1746 if (VT != MVT::i32 && VT != MVT::i64)
1749 const Value *LHS = II->getArgOperand(0);
1750 const Value *RHS = II->getArgOperand(1);
1751 // Canonicalize immediate to the RHS.
1752 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
1753 isCommutativeIntrinsic(II))
1754 std::swap(LHS, RHS);
1756 unsigned LHSReg = getRegForValue(LHS);
1759 bool LHSIsKill = hasTrivialKill(LHS);
1761 unsigned RHSReg = 0;
1762 bool RHSIsKill = false;
1764 if (!isa<ConstantInt>(RHS)) {
1765 RHSReg = getRegForValue(RHS);
1768 RHSIsKill = hasTrivialKill(RHS);
1773 unsigned MulReg = 0;
1774 AArch64CC::CondCode CC = AArch64CC::Invalid;
1775 bool Is64Bit = VT == MVT::i64;
1776 switch (II->getIntrinsicID()) {
1777 default: llvm_unreachable("Unexpected intrinsic!");
1778 case Intrinsic::sadd_with_overflow:
1780 Opc = Is64Bit ? AArch64::ADDSXri : AArch64::ADDSWri;
1782 Opc = Is64Bit ? AArch64::ADDSXrr : AArch64::ADDSWrr;
1785 case Intrinsic::uadd_with_overflow:
1787 Opc = Is64Bit ? AArch64::ADDSXri : AArch64::ADDSWri;
1789 Opc = Is64Bit ? AArch64::ADDSXrr : AArch64::ADDSWrr;
1792 case Intrinsic::ssub_with_overflow:
1794 Opc = Is64Bit ? AArch64::SUBSXri : AArch64::SUBSWri;
1796 Opc = Is64Bit ? AArch64::SUBSXrr : AArch64::SUBSWrr;
1799 case Intrinsic::usub_with_overflow:
1801 Opc = Is64Bit ? AArch64::SUBSXri : AArch64::SUBSWri;
1803 Opc = Is64Bit ? AArch64::SUBSXrr : AArch64::SUBSWrr;
1806 case Intrinsic::smul_with_overflow: {
1809 RHSReg = getRegForValue(RHS);
1812 RHSIsKill = hasTrivialKill(RHS);
1814 if (VT == MVT::i32) {
1815 MulReg = Emit_SMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1816 unsigned ShiftReg = Emit_LSR_ri(MVT::i64, MulReg, false, 32);
1817 MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
1819 ShiftReg = FastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
1821 unsigned CmpReg = createResultReg(TLI.getRegClassFor(VT));
1822 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1823 TII.get(AArch64::SUBSWrs), CmpReg)
1824 .addReg(ShiftReg, getKillRegState(true))
1825 .addReg(MulReg, getKillRegState(false))
1826 .addImm(159); // 159 <-> asr #31
1828 assert(VT == MVT::i64 && "Unexpected value type.");
1829 MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1830 unsigned SMULHReg = FastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
1832 unsigned CmpReg = createResultReg(TLI.getRegClassFor(VT));
1833 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1834 TII.get(AArch64::SUBSXrs), CmpReg)
1835 .addReg(SMULHReg, getKillRegState(true))
1836 .addReg(MulReg, getKillRegState(false))
1837 .addImm(191); // 191 <-> asr #63
1841 case Intrinsic::umul_with_overflow: {
1844 RHSReg = getRegForValue(RHS);
1847 RHSIsKill = hasTrivialKill(RHS);
1849 if (VT == MVT::i32) {
1850 MulReg = Emit_UMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1851 unsigned CmpReg = createResultReg(TLI.getRegClassFor(MVT::i64));
1852 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1853 TII.get(AArch64::SUBSXrs), CmpReg)
1854 .addReg(AArch64::XZR, getKillRegState(true))
1855 .addReg(MulReg, getKillRegState(false))
1856 .addImm(96); // 96 <-> lsr #32
1857 MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
1860 assert(VT == MVT::i64 && "Unexpected value type.");
1861 MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
1862 unsigned UMULHReg = FastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
1864 unsigned CmpReg = createResultReg(TLI.getRegClassFor(VT));
1865 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1866 TII.get(AArch64::SUBSXrr), CmpReg)
1867 .addReg(AArch64::XZR, getKillRegState(true))
1868 .addReg(UMULHReg, getKillRegState(false));
1874 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
1876 MachineInstrBuilder MIB;
1877 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
1879 .addReg(LHSReg, getKillRegState(LHSIsKill));
1881 MIB.addImm(cast<ConstantInt>(RHS)->getZExtValue());
1883 MIB.addReg(RHSReg, getKillRegState(RHSIsKill));
1886 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1887 TII.get(TargetOpcode::COPY), ResultReg)
1890 unsigned ResultReg2 = FuncInfo.CreateRegs(CondTy);
1891 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
1892 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
1894 .addReg(AArch64::WZR, getKillRegState(true))
1895 .addReg(AArch64::WZR, getKillRegState(true))
1896 .addImm(getInvertedCondCode(CC));
1898 UpdateValueMap(II, ResultReg, 2);
1905 bool AArch64FastISel::SelectRet(const Instruction *I) {
1906 const ReturnInst *Ret = cast<ReturnInst>(I);
1907 const Function &F = *I->getParent()->getParent();
1909 if (!FuncInfo.CanLowerReturn)
1915 // Build a list of return value registers.
1916 SmallVector<unsigned, 4> RetRegs;
1918 if (Ret->getNumOperands() > 0) {
1919 CallingConv::ID CC = F.getCallingConv();
1920 SmallVector<ISD::OutputArg, 4> Outs;
1921 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
1923 // Analyze operands of the call, assigning locations to each operand.
1924 SmallVector<CCValAssign, 16> ValLocs;
1925 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
1927 CCAssignFn *RetCC = CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
1928 : RetCC_AArch64_AAPCS;
1929 CCInfo.AnalyzeReturn(Outs, RetCC);
1931 // Only handle a single return value for now.
1932 if (ValLocs.size() != 1)
1935 CCValAssign &VA = ValLocs[0];
1936 const Value *RV = Ret->getOperand(0);
1938 // Don't bother handling odd stuff for now.
1939 if (VA.getLocInfo() != CCValAssign::Full)
1941 // Only handle register returns for now.
1944 unsigned Reg = getRegForValue(RV);
1948 unsigned SrcReg = Reg + VA.getValNo();
1949 unsigned DestReg = VA.getLocReg();
1950 // Avoid a cross-class copy. This is very unlikely.
1951 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1954 EVT RVEVT = TLI.getValueType(RV->getType());
1955 if (!RVEVT.isSimple())
1958 // Vectors (of > 1 lane) in big endian need tricky handling.
1959 if (RVEVT.isVector() && RVEVT.getVectorNumElements() > 1)
1962 MVT RVVT = RVEVT.getSimpleVT();
1963 if (RVVT == MVT::f128)
1965 MVT DestVT = VA.getValVT();
1966 // Special handling for extended integers.
1967 if (RVVT != DestVT) {
1968 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1971 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1974 bool isZExt = Outs[0].Flags.isZExt();
1975 SrcReg = EmitIntExt(RVVT, SrcReg, DestVT, isZExt);
1981 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1982 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1984 // Add register to return instruction.
1985 RetRegs.push_back(VA.getLocReg());
1988 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1989 TII.get(AArch64::RET_ReallyLR));
1990 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1991 MIB.addReg(RetRegs[i], RegState::Implicit);
1995 bool AArch64FastISel::SelectTrunc(const Instruction *I) {
1996 Type *DestTy = I->getType();
1997 Value *Op = I->getOperand(0);
1998 Type *SrcTy = Op->getType();
2000 EVT SrcEVT = TLI.getValueType(SrcTy, true);
2001 EVT DestEVT = TLI.getValueType(DestTy, true);
2002 if (!SrcEVT.isSimple())
2004 if (!DestEVT.isSimple())
2007 MVT SrcVT = SrcEVT.getSimpleVT();
2008 MVT DestVT = DestEVT.getSimpleVT();
2010 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
2013 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 &&
2017 unsigned SrcReg = getRegForValue(Op);
2021 // If we're truncating from i64 to a smaller non-legal type then generate an
2022 // AND. Otherwise, we know the high bits are undefined and a truncate doesn't
2023 // generate any code.
2024 if (SrcVT == MVT::i64) {
2026 switch (DestVT.SimpleTy) {
2028 // Trunc i64 to i32 is handled by the target-independent fast-isel.
2040 // Issue an extract_subreg to get the lower 32-bits.
2041 unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, /*Kill=*/true,
2043 MRI.constrainRegClass(Reg32, &AArch64::GPR32RegClass);
2044 // Create the AND instruction which performs the actual truncation.
2045 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
2046 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
2049 .addImm(AArch64_AM::encodeLogicalImmediate(Mask, 32));
2053 UpdateValueMap(I, SrcReg);
2057 unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) {
2058 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 ||
2059 DestVT == MVT::i64) &&
2060 "Unexpected value type.");
2061 // Handle i8 and i16 as i32.
2062 if (DestVT == MVT::i8 || DestVT == MVT::i16)
2066 MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2067 unsigned ResultReg = createResultReg(&AArch64::GPR32spRegClass);
2068 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
2071 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
2073 if (DestVT == MVT::i64) {
2074 // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
2075 // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
2076 unsigned Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2077 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2078 TII.get(AArch64::SUBREG_TO_REG), Reg64)
2081 .addImm(AArch64::sub_32);
2086 if (DestVT == MVT::i64) {
2087 // FIXME: We're SExt i1 to i64.
2090 unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
2091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SBFMWri),
2100 unsigned AArch64FastISel::Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
2101 unsigned Op1, bool Op1IsKill) {
2103 switch (RetVT.SimpleTy) {
2109 Opc = AArch64::MADDWrrr; ZReg = AArch64::WZR; break;
2111 Opc = AArch64::MADDXrrr; ZReg = AArch64::XZR; break;
2114 // Create the base instruction, then add the operands.
2115 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2116 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2117 .addReg(Op0, getKillRegState(Op0IsKill))
2118 .addReg(Op1, getKillRegState(Op1IsKill))
2119 .addReg(ZReg, getKillRegState(true));
2124 unsigned AArch64FastISel::Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
2125 unsigned Op1, bool Op1IsKill) {
2126 if (RetVT != MVT::i64)
2129 // Create the base instruction, then add the operands.
2130 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
2131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SMADDLrrr),
2133 .addReg(Op0, getKillRegState(Op0IsKill))
2134 .addReg(Op1, getKillRegState(Op1IsKill))
2135 .addReg(AArch64::XZR, getKillRegState(true));
2140 unsigned AArch64FastISel::Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
2141 unsigned Op1, bool Op1IsKill) {
2142 if (RetVT != MVT::i64)
2145 // Create the base instruction, then add the operands.
2146 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
2147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::UMADDLrrr),
2149 .addReg(Op0, getKillRegState(Op0IsKill))
2150 .addReg(Op1, getKillRegState(Op1IsKill))
2151 .addReg(AArch64::XZR, getKillRegState(true));
2156 unsigned AArch64FastISel::Emit_LSL_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
2158 unsigned Opc, ImmR, ImmS;
2159 switch (RetVT.SimpleTy) {
2165 Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS = 31 - Shift; break;
2167 Opc = AArch64::UBFMXri; ImmR = -Shift % 64; ImmS = 63 - Shift; break;
2170 return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, ImmR,
2174 unsigned AArch64FastISel::Emit_LSR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
2177 switch (RetVT.SimpleTy) {
2183 Opc = AArch64::UBFMWri; ImmS = 31; break;
2185 Opc = AArch64::UBFMXri; ImmS = 63; break;
2188 return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
2192 unsigned AArch64FastISel::Emit_ASR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
2195 switch (RetVT.SimpleTy) {
2201 Opc = AArch64::SBFMWri; ImmS = 31; break;
2203 Opc = AArch64::SBFMXri; ImmS = 63; break;
2206 return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
2210 unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
2212 assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?");
2214 // FastISel does not have plumbing to deal with extensions where the SrcVT or
2215 // DestVT are odd things, so test to make sure that they are both types we can
2216 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
2217 // bail out to SelectionDAG.
2218 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) &&
2219 (DestVT != MVT::i32) && (DestVT != MVT::i64)) ||
2220 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) &&
2221 (SrcVT != MVT::i16) && (SrcVT != MVT::i32)))
2227 switch (SrcVT.SimpleTy) {
2231 return Emiti1Ext(SrcReg, DestVT, isZExt);
2233 if (DestVT == MVT::i64)
2234 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
2236 Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
2240 if (DestVT == MVT::i64)
2241 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
2243 Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
2247 assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?");
2248 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
2253 // Handle i8 and i16 as i32.
2254 if (DestVT == MVT::i8 || DestVT == MVT::i16)
2256 else if (DestVT == MVT::i64) {
2257 unsigned Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2258 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2259 TII.get(AArch64::SUBREG_TO_REG), Src64)
2262 .addImm(AArch64::sub_32);
2266 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
2267 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2275 bool AArch64FastISel::SelectIntExt(const Instruction *I) {
2276 // On ARM, in general, integer casts don't involve legal types; this code
2277 // handles promotable integers. The high bits for a type smaller than
2278 // the register size are assumed to be undefined.
2279 Type *DestTy = I->getType();
2280 Value *Src = I->getOperand(0);
2281 Type *SrcTy = Src->getType();
2283 bool isZExt = isa<ZExtInst>(I);
2284 unsigned SrcReg = getRegForValue(Src);
2288 EVT SrcEVT = TLI.getValueType(SrcTy, true);
2289 EVT DestEVT = TLI.getValueType(DestTy, true);
2290 if (!SrcEVT.isSimple())
2292 if (!DestEVT.isSimple())
2295 MVT SrcVT = SrcEVT.getSimpleVT();
2296 MVT DestVT = DestEVT.getSimpleVT();
2297 unsigned ResultReg = EmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2300 UpdateValueMap(I, ResultReg);
2304 bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
2305 EVT DestEVT = TLI.getValueType(I->getType(), true);
2306 if (!DestEVT.isSimple())
2309 MVT DestVT = DestEVT.getSimpleVT();
2310 if (DestVT != MVT::i64 && DestVT != MVT::i32)
2314 bool is64bit = (DestVT == MVT::i64);
2315 switch (ISDOpcode) {
2319 DivOpc = is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
2322 DivOpc = is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
2325 unsigned MSubOpc = is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
2326 unsigned Src0Reg = getRegForValue(I->getOperand(0));
2330 unsigned Src1Reg = getRegForValue(I->getOperand(1));
2334 unsigned QuotReg = createResultReg(TLI.getRegClassFor(DestVT));
2335 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(DivOpc), QuotReg)
2338 // The remainder is computed as numerator - (quotient * denominator) using the
2339 // MSUB instruction.
2340 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
2341 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MSubOpc), ResultReg)
2345 UpdateValueMap(I, ResultReg);
2349 bool AArch64FastISel::SelectMul(const Instruction *I) {
2350 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType(), true);
2351 if (!SrcEVT.isSimple())
2353 MVT SrcVT = SrcEVT.getSimpleVT();
2355 // Must be simple value type. Don't handle vectors.
2356 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
2360 unsigned Src0Reg = getRegForValue(I->getOperand(0));
2363 bool Src0IsKill = hasTrivialKill(I->getOperand(0));
2365 unsigned Src1Reg = getRegForValue(I->getOperand(1));
2368 bool Src1IsKill = hasTrivialKill(I->getOperand(1));
2370 unsigned ResultReg =
2371 Emit_MUL_rr(SrcVT, Src0Reg, Src0IsKill, Src1Reg, Src1IsKill);
2376 UpdateValueMap(I, ResultReg);
2380 bool AArch64FastISel::SelectShift(const Instruction *I, bool IsLeftShift,
2381 bool IsArithmetic) {
2382 EVT RetEVT = TLI.getValueType(I->getType(), true);
2383 if (!RetEVT.isSimple())
2385 MVT RetVT = RetEVT.getSimpleVT();
2387 if (!isa<ConstantInt>(I->getOperand(1)))
2390 unsigned Op0Reg = getRegForValue(I->getOperand(0));
2393 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
2395 uint64_t ShiftVal = cast<ConstantInt>(I->getOperand(1))->getZExtValue();
2399 ResultReg = Emit_LSL_ri(RetVT, Op0Reg, Op0IsKill, ShiftVal);
2402 ResultReg = Emit_ASR_ri(RetVT, Op0Reg, Op0IsKill, ShiftVal);
2404 ResultReg = Emit_LSR_ri(RetVT, Op0Reg, Op0IsKill, ShiftVal);
2410 UpdateValueMap(I, ResultReg);
2414 bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) {
2415 switch (I->getOpcode()) {
2418 case Instruction::Load:
2419 return SelectLoad(I);
2420 case Instruction::Store:
2421 return SelectStore(I);
2422 case Instruction::Br:
2423 return SelectBranch(I);
2424 case Instruction::IndirectBr:
2425 return SelectIndirectBr(I);
2426 case Instruction::FCmp:
2427 case Instruction::ICmp:
2428 return SelectCmp(I);
2429 case Instruction::Select:
2430 return SelectSelect(I);
2431 case Instruction::FPExt:
2432 return SelectFPExt(I);
2433 case Instruction::FPTrunc:
2434 return SelectFPTrunc(I);
2435 case Instruction::FPToSI:
2436 return SelectFPToInt(I, /*Signed=*/true);
2437 case Instruction::FPToUI:
2438 return SelectFPToInt(I, /*Signed=*/false);
2439 case Instruction::SIToFP:
2440 return SelectIntToFP(I, /*Signed=*/true);
2441 case Instruction::UIToFP:
2442 return SelectIntToFP(I, /*Signed=*/false);
2443 case Instruction::SRem:
2444 return SelectRem(I, ISD::SREM);
2445 case Instruction::URem:
2446 return SelectRem(I, ISD::UREM);
2447 case Instruction::Ret:
2448 return SelectRet(I);
2449 case Instruction::Trunc:
2450 return SelectTrunc(I);
2451 case Instruction::ZExt:
2452 case Instruction::SExt:
2453 return SelectIntExt(I);
2455 // FIXME: All of these should really be handled by the target-independent
2456 // selector -> improve FastISel tblgen.
2457 case Instruction::Mul:
2458 return SelectMul(I);
2459 case Instruction::Shl:
2460 return SelectShift(I, /*IsLeftShift=*/true, /*IsArithmetic=*/false);
2461 case Instruction::LShr:
2462 return SelectShift(I, /*IsLeftShift=*/false, /*IsArithmetic=*/false);
2463 case Instruction::AShr:
2464 return SelectShift(I, /*IsLeftShift=*/false, /*IsArithmetic=*/true);
2467 // Silence warnings.
2468 (void)&CC_AArch64_DarwinPCS_VarArg;
2472 llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &funcInfo,
2473 const TargetLibraryInfo *libInfo) {
2474 return new AArch64FastISel(funcInfo, libInfo);