1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64TargetMachine.h"
18 #include "MCTargetDesc/AArch64MCTargetDesc.h"
19 #include "Utils/AArch64BaseInfo.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/TargetRegistry.h"
31 #define GET_INSTRINFO_CTOR_DTOR
32 #include "AArch64GenInstrInfo.inc"
36 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
37 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
40 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
41 MachineBasicBlock::iterator I, DebugLoc DL,
42 unsigned DestReg, unsigned SrcReg,
46 if (DestReg == AArch64::XSP || SrcReg == AArch64::XSP) {
47 // E.g. ADD xDst, xsp, #0 (, lsl #0)
48 BuildMI(MBB, I, DL, get(AArch64::ADDxxi_lsl0_s), DestReg)
52 } else if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
53 // E.g. ADD wDST, wsp, #0 (, lsl #0)
54 BuildMI(MBB, I, DL, get(AArch64::ADDwwi_lsl0_s), DestReg)
58 } else if (DestReg == AArch64::NZCV) {
59 assert(AArch64::GPR64RegClass.contains(SrcReg));
60 // E.g. MSR NZCV, xDST
61 BuildMI(MBB, I, DL, get(AArch64::MSRix))
62 .addImm(A64SysReg::NZCV)
64 } else if (SrcReg == AArch64::NZCV) {
65 assert(AArch64::GPR64RegClass.contains(DestReg));
66 // E.g. MRS xDST, NZCV
67 BuildMI(MBB, I, DL, get(AArch64::MRSxi), DestReg)
68 .addImm(A64SysReg::NZCV);
69 } else if (AArch64::GPR64RegClass.contains(DestReg)) {
70 if(AArch64::GPR64RegClass.contains(SrcReg)){
71 Opc = AArch64::ORRxxx_lsl;
72 ZeroReg = AArch64::XZR;
74 assert(AArch64::FPR64RegClass.contains(SrcReg));
75 BuildMI(MBB, I, DL, get(AArch64::FMOVxd), DestReg)
79 } else if (AArch64::GPR32RegClass.contains(DestReg)) {
80 if(AArch64::GPR32RegClass.contains(SrcReg)){
81 Opc = AArch64::ORRwww_lsl;
82 ZeroReg = AArch64::WZR;
84 assert(AArch64::FPR32RegClass.contains(SrcReg));
85 BuildMI(MBB, I, DL, get(AArch64::FMOVws), DestReg)
89 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
90 if(AArch64::FPR32RegClass.contains(SrcReg)){
91 BuildMI(MBB, I, DL, get(AArch64::FMOVss), DestReg)
96 assert(AArch64::GPR32RegClass.contains(SrcReg));
97 BuildMI(MBB, I, DL, get(AArch64::FMOVsw), DestReg)
101 } else if (AArch64::FPR64RegClass.contains(DestReg)) {
102 if(AArch64::FPR64RegClass.contains(SrcReg)){
103 BuildMI(MBB, I, DL, get(AArch64::FMOVdd), DestReg)
108 assert(AArch64::GPR64RegClass.contains(SrcReg));
109 BuildMI(MBB, I, DL, get(AArch64::FMOVdx), DestReg)
113 } else if (AArch64::FPR128RegClass.contains(DestReg)) {
114 assert(AArch64::FPR128RegClass.contains(SrcReg));
116 // If NEON is enable, we use ORR to implement this copy.
117 // If NEON isn't available, emit STR and LDR to handle this.
118 if(getSubTarget().hasNEON()) {
119 BuildMI(MBB, I, DL, get(AArch64::ORRvvv_16B), DestReg)
124 BuildMI(MBB, I, DL, get(AArch64::LSFP128_PreInd_STR), AArch64::XSP)
126 .addReg(AArch64::XSP)
127 .addImm(0x1ff & -16);
129 BuildMI(MBB, I, DL, get(AArch64::LSFP128_PostInd_LDR), DestReg)
130 .addReg(AArch64::XSP, RegState::Define)
131 .addReg(AArch64::XSP)
136 CopyPhysRegTuple(MBB, I, DL, DestReg, SrcReg);
140 // E.g. ORR xDst, xzr, xSrc, lsl #0
141 BuildMI(MBB, I, DL, get(Opc), DestReg)
147 void AArch64InstrInfo::CopyPhysRegTuple(MachineBasicBlock &MBB,
148 MachineBasicBlock::iterator I,
149 DebugLoc DL, unsigned DestReg,
150 unsigned SrcReg) const {
153 if (AArch64::DPairRegClass.contains(DestReg, SrcReg)) {
156 } else if (AArch64::DTripleRegClass.contains(DestReg, SrcReg)) {
159 } else if (AArch64::DQuadRegClass.contains(DestReg, SrcReg)) {
162 } else if (AArch64::QPairRegClass.contains(DestReg, SrcReg)) {
165 } else if (AArch64::QTripleRegClass.contains(DestReg, SrcReg)) {
168 } else if (AArch64::QQuadRegClass.contains(DestReg, SrcReg)) {
172 llvm_unreachable("Unknown register class");
174 unsigned BeginIdx = IsQRegs ? AArch64::qsub_0 : AArch64::dsub_0;
176 const TargetRegisterInfo *TRI = &getRegisterInfo();
177 // Copy register tuples backward when the first Dest reg overlaps
179 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
180 BeginIdx = BeginIdx + (SubRegs - 1);
184 unsigned Opc = IsQRegs ? AArch64::ORRvvv_16B : AArch64::ORRvvv_8B;
185 for (unsigned i = 0; i != SubRegs; ++i) {
186 unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
187 unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
188 assert(Dst && Src && "Bad sub-register");
189 BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst)
196 /// Does the Opcode represent a conditional branch that we can remove and re-add
197 /// at the end of a basic block?
198 static bool isCondBranch(unsigned Opc) {
199 return Opc == AArch64::Bcc || Opc == AArch64::CBZw || Opc == AArch64::CBZx ||
200 Opc == AArch64::CBNZw || Opc == AArch64::CBNZx ||
201 Opc == AArch64::TBZwii || Opc == AArch64::TBZxii ||
202 Opc == AArch64::TBNZwii || Opc == AArch64::TBNZxii;
205 /// Takes apart a given conditional branch MachineInstr (see isCondBranch),
206 /// setting TBB to the destination basic block and populating the Cond vector
207 /// with data necessary to recreate the conditional branch at a later
208 /// date. First element will be the opcode, and subsequent ones define the
209 /// conditions being branched on in an instruction-specific manner.
210 static void classifyCondBranch(MachineInstr *I, MachineBasicBlock *&TBB,
211 SmallVectorImpl<MachineOperand> &Cond) {
212 switch(I->getOpcode()) {
218 // These instructions just have one predicate operand in position 0 (either
219 // a condition code or a register being compared).
220 Cond.push_back(MachineOperand::CreateImm(I->getOpcode()));
221 Cond.push_back(I->getOperand(0));
222 TBB = I->getOperand(1).getMBB();
224 case AArch64::TBZwii:
225 case AArch64::TBZxii:
226 case AArch64::TBNZwii:
227 case AArch64::TBNZxii:
228 // These have two predicate operands: a register and a bit position.
229 Cond.push_back(MachineOperand::CreateImm(I->getOpcode()));
230 Cond.push_back(I->getOperand(0));
231 Cond.push_back(I->getOperand(1));
232 TBB = I->getOperand(2).getMBB();
235 llvm_unreachable("Unknown conditional branch to classify");
241 AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
242 MachineBasicBlock *&FBB,
243 SmallVectorImpl<MachineOperand> &Cond,
244 bool AllowModify) const {
245 // If the block has no terminators, it just falls into the block after it.
246 MachineBasicBlock::iterator I = MBB.end();
247 if (I == MBB.begin())
250 while (I->isDebugValue()) {
251 if (I == MBB.begin())
255 if (!isUnpredicatedTerminator(I))
258 // Get the last instruction in the block.
259 MachineInstr *LastInst = I;
261 // If there is only one terminator instruction, process it.
262 unsigned LastOpc = LastInst->getOpcode();
263 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
264 if (LastOpc == AArch64::Bimm) {
265 TBB = LastInst->getOperand(0).getMBB();
268 if (isCondBranch(LastOpc)) {
269 classifyCondBranch(LastInst, TBB, Cond);
272 return true; // Can't handle indirect branch.
275 // Get the instruction before it if it is a terminator.
276 MachineInstr *SecondLastInst = I;
277 unsigned SecondLastOpc = SecondLastInst->getOpcode();
279 // If AllowModify is true and the block ends with two or more unconditional
280 // branches, delete all but the first unconditional branch.
281 if (AllowModify && LastOpc == AArch64::Bimm) {
282 while (SecondLastOpc == AArch64::Bimm) {
283 LastInst->eraseFromParent();
284 LastInst = SecondLastInst;
285 LastOpc = LastInst->getOpcode();
286 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
287 // Return now the only terminator is an unconditional branch.
288 TBB = LastInst->getOperand(0).getMBB();
292 SecondLastOpc = SecondLastInst->getOpcode();
297 // If there are three terminators, we don't know what sort of block this is.
298 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
301 // If the block ends with a B and a Bcc, handle it.
302 if (LastOpc == AArch64::Bimm) {
303 if (SecondLastOpc == AArch64::Bcc) {
304 TBB = SecondLastInst->getOperand(1).getMBB();
305 Cond.push_back(MachineOperand::CreateImm(AArch64::Bcc));
306 Cond.push_back(SecondLastInst->getOperand(0));
307 FBB = LastInst->getOperand(0).getMBB();
309 } else if (isCondBranch(SecondLastOpc)) {
310 classifyCondBranch(SecondLastInst, TBB, Cond);
311 FBB = LastInst->getOperand(0).getMBB();
316 // If the block ends with two unconditional branches, handle it. The second
317 // one is not executed, so remove it.
318 if (SecondLastOpc == AArch64::Bimm && LastOpc == AArch64::Bimm) {
319 TBB = SecondLastInst->getOperand(0).getMBB();
322 I->eraseFromParent();
326 // Otherwise, can't handle this.
330 bool AArch64InstrInfo::ReverseBranchCondition(
331 SmallVectorImpl<MachineOperand> &Cond) const {
332 switch (Cond[0].getImm()) {
334 A64CC::CondCodes CC = static_cast<A64CC::CondCodes>(Cond[1].getImm());
335 CC = A64InvertCondCode(CC);
340 Cond[0].setImm(AArch64::CBNZw);
343 Cond[0].setImm(AArch64::CBNZx);
346 Cond[0].setImm(AArch64::CBZw);
349 Cond[0].setImm(AArch64::CBZx);
351 case AArch64::TBZwii:
352 Cond[0].setImm(AArch64::TBNZwii);
354 case AArch64::TBZxii:
355 Cond[0].setImm(AArch64::TBNZxii);
357 case AArch64::TBNZwii:
358 Cond[0].setImm(AArch64::TBZwii);
360 case AArch64::TBNZxii:
361 Cond[0].setImm(AArch64::TBZxii);
364 llvm_unreachable("Unknown branch type");
370 AArch64InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
371 MachineBasicBlock *FBB,
372 const SmallVectorImpl<MachineOperand> &Cond,
374 if (FBB == 0 && Cond.empty()) {
375 BuildMI(&MBB, DL, get(AArch64::Bimm)).addMBB(TBB);
377 } else if (FBB == 0) {
378 MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
379 for (int i = 1, e = Cond.size(); i != e; ++i)
380 MIB.addOperand(Cond[i]);
385 MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
386 for (int i = 1, e = Cond.size(); i != e; ++i)
387 MIB.addOperand(Cond[i]);
390 BuildMI(&MBB, DL, get(AArch64::Bimm)).addMBB(FBB);
394 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
395 MachineBasicBlock::iterator I = MBB.end();
396 if (I == MBB.begin()) return 0;
398 while (I->isDebugValue()) {
399 if (I == MBB.begin())
403 if (I->getOpcode() != AArch64::Bimm && !isCondBranch(I->getOpcode()))
406 // Remove the branch.
407 I->eraseFromParent();
411 if (I == MBB.begin()) return 1;
413 if (!isCondBranch(I->getOpcode()))
416 // Remove the branch.
417 I->eraseFromParent();
422 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const {
423 MachineInstr &MI = *MBBI;
424 MachineBasicBlock &MBB = *MI.getParent();
426 unsigned Opcode = MI.getOpcode();
428 case AArch64::TLSDESC_BLRx: {
429 MachineInstr *NewMI =
430 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(AArch64::TLSDESCCALL))
431 .addOperand(MI.getOperand(1));
432 MI.setDesc(get(AArch64::BLRx));
434 llvm::finalizeBundle(MBB, NewMI, *++MBBI);
445 AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
446 MachineBasicBlock::iterator MBBI,
447 unsigned SrcReg, bool isKill,
449 const TargetRegisterClass *RC,
450 const TargetRegisterInfo *TRI) const {
451 DebugLoc DL = MBB.findDebugLoc(MBBI);
452 MachineFunction &MF = *MBB.getParent();
453 MachineFrameInfo &MFI = *MF.getFrameInfo();
454 unsigned Align = MFI.getObjectAlignment(FrameIdx);
456 MachineMemOperand *MMO
457 = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
458 MachineMemOperand::MOStore,
459 MFI.getObjectSize(FrameIdx),
462 unsigned StoreOp = 0;
463 if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
464 switch(RC->getSize()) {
465 case 4: StoreOp = AArch64::LS32_STR; break;
466 case 8: StoreOp = AArch64::LS64_STR; break;
468 llvm_unreachable("Unknown size for regclass");
470 } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) ||
471 RC->hasType(MVT::f128)) {
472 switch (RC->getSize()) {
473 case 4: StoreOp = AArch64::LSFP32_STR; break;
474 case 8: StoreOp = AArch64::LSFP64_STR; break;
475 case 16: StoreOp = AArch64::LSFP128_STR; break;
477 llvm_unreachable("Unknown size for regclass");
479 } else { // For a super register class has more than one sub registers
480 if (AArch64::DPairRegClass.hasSubClassEq(RC))
481 StoreOp = AArch64::ST1x2_8B;
482 else if (AArch64::DTripleRegClass.hasSubClassEq(RC))
483 StoreOp = AArch64::ST1x3_8B;
484 else if (AArch64::DQuadRegClass.hasSubClassEq(RC))
485 StoreOp = AArch64::ST1x4_8B;
486 else if (AArch64::QPairRegClass.hasSubClassEq(RC))
487 StoreOp = AArch64::ST1x2_16B;
488 else if (AArch64::QTripleRegClass.hasSubClassEq(RC))
489 StoreOp = AArch64::ST1x3_16B;
490 else if (AArch64::QQuadRegClass.hasSubClassEq(RC))
491 StoreOp = AArch64::ST1x4_16B;
493 llvm_unreachable("Unknown reg class");
495 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
496 // Vector store has different operands from other store instructions.
497 NewMI.addFrameIndex(FrameIdx)
498 .addReg(SrcReg, getKillRegState(isKill))
503 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
504 NewMI.addReg(SrcReg, getKillRegState(isKill))
505 .addFrameIndex(FrameIdx)
512 AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
513 MachineBasicBlock::iterator MBBI,
514 unsigned DestReg, int FrameIdx,
515 const TargetRegisterClass *RC,
516 const TargetRegisterInfo *TRI) const {
517 DebugLoc DL = MBB.findDebugLoc(MBBI);
518 MachineFunction &MF = *MBB.getParent();
519 MachineFrameInfo &MFI = *MF.getFrameInfo();
520 unsigned Align = MFI.getObjectAlignment(FrameIdx);
522 MachineMemOperand *MMO
523 = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
524 MachineMemOperand::MOLoad,
525 MFI.getObjectSize(FrameIdx),
529 if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
530 switch(RC->getSize()) {
531 case 4: LoadOp = AArch64::LS32_LDR; break;
532 case 8: LoadOp = AArch64::LS64_LDR; break;
534 llvm_unreachable("Unknown size for regclass");
536 } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) ||
537 RC->hasType(MVT::f128)) {
538 switch (RC->getSize()) {
539 case 4: LoadOp = AArch64::LSFP32_LDR; break;
540 case 8: LoadOp = AArch64::LSFP64_LDR; break;
541 case 16: LoadOp = AArch64::LSFP128_LDR; break;
543 llvm_unreachable("Unknown size for regclass");
545 } else { // For a super register class has more than one sub registers
546 if (AArch64::DPairRegClass.hasSubClassEq(RC))
547 LoadOp = AArch64::LD1x2_8B;
548 else if (AArch64::DTripleRegClass.hasSubClassEq(RC))
549 LoadOp = AArch64::LD1x3_8B;
550 else if (AArch64::DQuadRegClass.hasSubClassEq(RC))
551 LoadOp = AArch64::LD1x4_8B;
552 else if (AArch64::QPairRegClass.hasSubClassEq(RC))
553 LoadOp = AArch64::LD1x2_16B;
554 else if (AArch64::QTripleRegClass.hasSubClassEq(RC))
555 LoadOp = AArch64::LD1x3_16B;
556 else if (AArch64::QQuadRegClass.hasSubClassEq(RC))
557 LoadOp = AArch64::LD1x4_16B;
559 llvm_unreachable("Unknown reg class");
561 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg);
562 // Vector load has different operands from other load instructions.
563 NewMI.addFrameIndex(FrameIdx)
568 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg);
569 NewMI.addFrameIndex(FrameIdx)
574 unsigned AArch64InstrInfo::estimateRSStackLimit(MachineFunction &MF) const {
575 unsigned Limit = (1 << 16) - 1;
576 for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
577 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
579 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
580 if (!I->getOperand(i).isFI()) continue;
582 // When using ADDxxi_lsl0_s to get the address of a stack object, 0xfff
583 // is the largest offset guaranteed to fit in the immediate offset.
584 if (I->getOpcode() == AArch64::ADDxxi_lsl0_s) {
585 Limit = std::min(Limit, 0xfffu);
589 int AccessScale, MinOffset, MaxOffset;
590 getAddressConstraints(*I, AccessScale, MinOffset, MaxOffset);
591 Limit = std::min(Limit, static_cast<unsigned>(MaxOffset));
593 break; // At most one FI per instruction
600 void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI,
601 int &AccessScale, int &MinOffset,
602 int &MaxOffset) const {
603 switch (MI.getOpcode()) {
604 default: llvm_unreachable("Unkown load/store kind");
605 case TargetOpcode::DBG_VALUE:
610 case AArch64::LS8_LDR: case AArch64::LS8_STR:
611 case AArch64::LSFP8_LDR: case AArch64::LSFP8_STR:
612 case AArch64::LDRSBw:
613 case AArch64::LDRSBx:
618 case AArch64::LS16_LDR: case AArch64::LS16_STR:
619 case AArch64::LSFP16_LDR: case AArch64::LSFP16_STR:
620 case AArch64::LDRSHw:
621 case AArch64::LDRSHx:
624 MaxOffset = 0xfff * AccessScale;
626 case AArch64::LS32_LDR: case AArch64::LS32_STR:
627 case AArch64::LSFP32_LDR: case AArch64::LSFP32_STR:
628 case AArch64::LDRSWx:
629 case AArch64::LDPSWx:
632 MaxOffset = 0xfff * AccessScale;
634 case AArch64::LS64_LDR: case AArch64::LS64_STR:
635 case AArch64::LSFP64_LDR: case AArch64::LSFP64_STR:
639 MaxOffset = 0xfff * AccessScale;
641 case AArch64::LSFP128_LDR: case AArch64::LSFP128_STR:
644 MaxOffset = 0xfff * AccessScale;
646 case AArch64::LSPair32_LDR: case AArch64::LSPair32_STR:
647 case AArch64::LSFPPair32_LDR: case AArch64::LSFPPair32_STR:
649 MinOffset = -0x40 * AccessScale;
650 MaxOffset = 0x3f * AccessScale;
652 case AArch64::LSPair64_LDR: case AArch64::LSPair64_STR:
653 case AArch64::LSFPPair64_LDR: case AArch64::LSFPPair64_STR:
655 MinOffset = -0x40 * AccessScale;
656 MaxOffset = 0x3f * AccessScale;
658 case AArch64::LSFPPair128_LDR: case AArch64::LSFPPair128_STR:
660 MinOffset = -0x40 * AccessScale;
661 MaxOffset = 0x3f * AccessScale;
663 case AArch64::LD1x2_8B: case AArch64::ST1x2_8B:
666 MaxOffset = 0xfff * AccessScale;
668 case AArch64::LD1x3_8B: case AArch64::ST1x3_8B:
671 MaxOffset = 0xfff * AccessScale;
673 case AArch64::LD1x4_8B: case AArch64::ST1x4_8B:
674 case AArch64::LD1x2_16B: case AArch64::ST1x2_16B:
677 MaxOffset = 0xfff * AccessScale;
679 case AArch64::LD1x3_16B: case AArch64::ST1x3_16B:
682 MaxOffset = 0xfff * AccessScale;
684 case AArch64::LD1x4_16B: case AArch64::ST1x4_16B:
687 MaxOffset = 0xfff * AccessScale;
692 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
693 const MCInstrDesc &MCID = MI.getDesc();
694 const MachineBasicBlock &MBB = *MI.getParent();
695 const MachineFunction &MF = *MBB.getParent();
696 const MCAsmInfo &MAI = *MF.getTarget().getMCAsmInfo();
699 return MCID.getSize();
701 if (MI.getOpcode() == AArch64::INLINEASM)
702 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), MAI);
707 switch (MI.getOpcode()) {
708 case TargetOpcode::BUNDLE:
709 return getInstBundleLength(MI);
710 case TargetOpcode::IMPLICIT_DEF:
711 case TargetOpcode::KILL:
712 case TargetOpcode::PROLOG_LABEL:
713 case TargetOpcode::EH_LABEL:
714 case TargetOpcode::DBG_VALUE:
716 case AArch64::TLSDESCCALL:
719 llvm_unreachable("Unknown instruction class");
723 unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const {
725 MachineBasicBlock::const_instr_iterator I = MI;
726 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
727 while (++I != E && I->isInsideBundle()) {
728 assert(!I->isBundle() && "No nested bundle!");
729 Size += getInstSizeInBytes(*I);
734 bool llvm::rewriteA64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
735 unsigned FrameReg, int &Offset,
736 const AArch64InstrInfo &TII) {
737 MachineBasicBlock &MBB = *MI.getParent();
738 MachineFunction &MF = *MBB.getParent();
739 MachineFrameInfo &MFI = *MF.getFrameInfo();
741 MFI.getObjectOffset(FrameRegIdx);
742 llvm_unreachable("Unimplemented rewriteFrameIndex");
745 void llvm::emitRegUpdate(MachineBasicBlock &MBB,
746 MachineBasicBlock::iterator MBBI,
747 DebugLoc dl, const TargetInstrInfo &TII,
748 unsigned DstReg, unsigned SrcReg, unsigned ScratchReg,
749 int64_t NumBytes, MachineInstr::MIFlag MIFlags) {
750 if (NumBytes == 0 && DstReg == SrcReg)
752 else if (abs64(NumBytes) & ~0xffffff) {
753 // Generically, we have to materialize the offset into a temporary register
754 // and subtract it. There are a couple of ways this could be done, for now
755 // we'll use a movz/movk or movn/movk sequence.
756 uint64_t Bits = static_cast<uint64_t>(abs64(NumBytes));
757 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVZxii), ScratchReg)
758 .addImm(0xffff & Bits).addImm(0)
759 .setMIFlags(MIFlags);
763 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
765 .addImm(0xffff & Bits).addImm(1)
766 .setMIFlags(MIFlags);
771 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
773 .addImm(0xffff & Bits).addImm(2)
774 .setMIFlags(MIFlags);
779 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
781 .addImm(0xffff & Bits).addImm(3)
782 .setMIFlags(MIFlags);
785 // ADD DST, SRC, xTMP (, lsl #0)
786 unsigned AddOp = NumBytes > 0 ? AArch64::ADDxxx_uxtx : AArch64::SUBxxx_uxtx;
787 BuildMI(MBB, MBBI, dl, TII.get(AddOp), DstReg)
788 .addReg(SrcReg, RegState::Kill)
789 .addReg(ScratchReg, RegState::Kill)
795 // Now we know that the adjustment can be done in at most two add/sub
796 // (immediate) instructions, which is always more efficient than a
797 // literal-pool load, or even a hypothetical movz/movk/add sequence
799 // Decide whether we're doing addition or subtraction
800 unsigned LowOp, HighOp;
802 LowOp = AArch64::ADDxxi_lsl0_s;
803 HighOp = AArch64::ADDxxi_lsl12_s;
805 LowOp = AArch64::SUBxxi_lsl0_s;
806 HighOp = AArch64::SUBxxi_lsl12_s;
807 NumBytes = abs64(NumBytes);
810 // If we're here, at the very least a move needs to be produced, which just
811 // happens to be materializable by an ADD.
812 if ((NumBytes & 0xfff) || NumBytes == 0) {
813 BuildMI(MBB, MBBI, dl, TII.get(LowOp), DstReg)
814 .addReg(SrcReg, RegState::Kill)
815 .addImm(NumBytes & 0xfff)
818 // Next update should use the register we've just defined.
822 if (NumBytes & 0xfff000) {
823 BuildMI(MBB, MBBI, dl, TII.get(HighOp), DstReg)
824 .addReg(SrcReg, RegState::Kill)
825 .addImm(NumBytes >> 12)
830 void llvm::emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
831 DebugLoc dl, const TargetInstrInfo &TII,
832 unsigned ScratchReg, int64_t NumBytes,
833 MachineInstr::MIFlag MIFlags) {
834 emitRegUpdate(MBB, MI, dl, TII, AArch64::XSP, AArch64::XSP, AArch64::X16,
840 struct LDTLSCleanup : public MachineFunctionPass {
842 LDTLSCleanup() : MachineFunctionPass(ID) {}
844 virtual bool runOnMachineFunction(MachineFunction &MF) {
845 AArch64MachineFunctionInfo* MFI
846 = MF.getInfo<AArch64MachineFunctionInfo>();
847 if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
848 // No point folding accesses if there isn't at least two.
852 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
853 return VisitNode(DT->getRootNode(), 0);
856 // Visit the dominator subtree rooted at Node in pre-order.
857 // If TLSBaseAddrReg is non-null, then use that to replace any
858 // TLS_base_addr instructions. Otherwise, create the register
859 // when the first such instruction is seen, and then use it
860 // as we encounter more instructions.
861 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
862 MachineBasicBlock *BB = Node->getBlock();
863 bool Changed = false;
865 // Traverse the current block.
866 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
868 switch (I->getOpcode()) {
869 case AArch64::TLSDESC_BLRx:
870 // Make sure it's a local dynamic access.
871 if (!I->getOperand(1).isSymbol() ||
872 strcmp(I->getOperand(1).getSymbolName(), "_TLS_MODULE_BASE_"))
876 I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
878 I = SetRegister(I, &TLSBaseAddrReg);
886 // Visit the children of this block in the dominator tree.
887 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
889 Changed |= VisitNode(*I, TLSBaseAddrReg);
895 // Replace the TLS_base_addr instruction I with a copy from
896 // TLSBaseAddrReg, returning the new instruction.
897 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
898 unsigned TLSBaseAddrReg) {
899 MachineFunction *MF = I->getParent()->getParent();
900 const AArch64TargetMachine *TM =
901 static_cast<const AArch64TargetMachine *>(&MF->getTarget());
902 const AArch64InstrInfo *TII = TM->getInstrInfo();
904 // Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
905 // code sequence assumes the address will be.
906 MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
907 TII->get(TargetOpcode::COPY),
909 .addReg(TLSBaseAddrReg);
911 // Erase the TLS_base_addr instruction.
912 I->eraseFromParent();
917 // Create a virtal register in *TLSBaseAddrReg, and populate it by
918 // inserting a copy instruction after I. Returns the new instruction.
919 MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
920 MachineFunction *MF = I->getParent()->getParent();
921 const AArch64TargetMachine *TM =
922 static_cast<const AArch64TargetMachine *>(&MF->getTarget());
923 const AArch64InstrInfo *TII = TM->getInstrInfo();
925 // Create a virtual register for the TLS base address.
926 MachineRegisterInfo &RegInfo = MF->getRegInfo();
927 *TLSBaseAddrReg = RegInfo.createVirtualRegister(&AArch64::GPR64RegClass);
929 // Insert a copy from X0 to TLSBaseAddrReg for later.
930 MachineInstr *Next = I->getNextNode();
931 MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
932 TII->get(TargetOpcode::COPY),
934 .addReg(AArch64::X0);
939 virtual const char *getPassName() const {
940 return "Local Dynamic TLS Access Clean-up";
943 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
944 AU.setPreservesCFG();
945 AU.addRequired<MachineDominatorTree>();
946 MachineFunctionPass::getAnalysisUsage(AU);
951 char LDTLSCleanup::ID = 0;
953 llvm::createAArch64CleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }