1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64TargetMachine.h"
18 #include "MCTargetDesc/AArch64MCTargetDesc.h"
19 #include "Utils/AArch64BaseInfo.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/TargetRegistry.h"
32 #define GET_INSTRINFO_CTOR_DTOR
33 #include "AArch64GenInstrInfo.inc"
37 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
38 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
41 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
42 MachineBasicBlock::iterator I, DebugLoc DL,
43 unsigned DestReg, unsigned SrcReg,
47 if (DestReg == AArch64::XSP || SrcReg == AArch64::XSP) {
48 // E.g. ADD xDst, xsp, #0 (, lsl #0)
49 BuildMI(MBB, I, DL, get(AArch64::ADDxxi_lsl0_s), DestReg)
53 } else if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
54 // E.g. ADD wDST, wsp, #0 (, lsl #0)
55 BuildMI(MBB, I, DL, get(AArch64::ADDwwi_lsl0_s), DestReg)
59 } else if (DestReg == AArch64::NZCV) {
60 assert(AArch64::GPR64RegClass.contains(SrcReg));
61 // E.g. MSR NZCV, xDST
62 BuildMI(MBB, I, DL, get(AArch64::MSRix))
63 .addImm(A64SysReg::NZCV)
65 } else if (SrcReg == AArch64::NZCV) {
66 assert(AArch64::GPR64RegClass.contains(DestReg));
67 // E.g. MRS xDST, NZCV
68 BuildMI(MBB, I, DL, get(AArch64::MRSxi), DestReg)
69 .addImm(A64SysReg::NZCV);
70 } else if (AArch64::GPR64RegClass.contains(DestReg)) {
71 if(AArch64::GPR64RegClass.contains(SrcReg)){
72 Opc = AArch64::ORRxxx_lsl;
73 ZeroReg = AArch64::XZR;
75 assert(AArch64::FPR64RegClass.contains(SrcReg));
76 BuildMI(MBB, I, DL, get(AArch64::FMOVxd), DestReg)
80 } else if (AArch64::GPR32RegClass.contains(DestReg)) {
81 if(AArch64::GPR32RegClass.contains(SrcReg)){
82 Opc = AArch64::ORRwww_lsl;
83 ZeroReg = AArch64::WZR;
85 assert(AArch64::FPR32RegClass.contains(SrcReg));
86 BuildMI(MBB, I, DL, get(AArch64::FMOVws), DestReg)
90 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
91 if(AArch64::FPR32RegClass.contains(SrcReg)){
92 BuildMI(MBB, I, DL, get(AArch64::FMOVss), DestReg)
97 assert(AArch64::GPR32RegClass.contains(SrcReg));
98 BuildMI(MBB, I, DL, get(AArch64::FMOVsw), DestReg)
102 } else if (AArch64::FPR64RegClass.contains(DestReg)) {
103 if(AArch64::FPR64RegClass.contains(SrcReg)){
104 BuildMI(MBB, I, DL, get(AArch64::FMOVdd), DestReg)
109 assert(AArch64::GPR64RegClass.contains(SrcReg));
110 BuildMI(MBB, I, DL, get(AArch64::FMOVdx), DestReg)
114 } else if (AArch64::FPR128RegClass.contains(DestReg)) {
115 assert(AArch64::FPR128RegClass.contains(SrcReg));
117 // If NEON is enable, we use ORR to implement this copy.
118 // If NEON isn't available, emit STR and LDR to handle this.
119 if(getSubTarget().hasNEON()) {
120 BuildMI(MBB, I, DL, get(AArch64::ORRvvv_16B), DestReg)
125 BuildMI(MBB, I, DL, get(AArch64::LSFP128_PreInd_STR), AArch64::XSP)
127 .addReg(AArch64::XSP)
128 .addImm(0x1ff & -16);
130 BuildMI(MBB, I, DL, get(AArch64::LSFP128_PostInd_LDR), DestReg)
131 .addReg(AArch64::XSP, RegState::Define)
132 .addReg(AArch64::XSP)
137 llvm_unreachable("Unknown register class in copyPhysReg");
140 // E.g. ORR xDst, xzr, xSrc, lsl #0
141 BuildMI(MBB, I, DL, get(Opc), DestReg)
147 /// Does the Opcode represent a conditional branch that we can remove and re-add
148 /// at the end of a basic block?
149 static bool isCondBranch(unsigned Opc) {
150 return Opc == AArch64::Bcc || Opc == AArch64::CBZw || Opc == AArch64::CBZx ||
151 Opc == AArch64::CBNZw || Opc == AArch64::CBNZx ||
152 Opc == AArch64::TBZwii || Opc == AArch64::TBZxii ||
153 Opc == AArch64::TBNZwii || Opc == AArch64::TBNZxii;
156 /// Takes apart a given conditional branch MachineInstr (see isCondBranch),
157 /// setting TBB to the destination basic block and populating the Cond vector
158 /// with data necessary to recreate the conditional branch at a later
159 /// date. First element will be the opcode, and subsequent ones define the
160 /// conditions being branched on in an instruction-specific manner.
161 static void classifyCondBranch(MachineInstr *I, MachineBasicBlock *&TBB,
162 SmallVectorImpl<MachineOperand> &Cond) {
163 switch(I->getOpcode()) {
169 // These instructions just have one predicate operand in position 0 (either
170 // a condition code or a register being compared).
171 Cond.push_back(MachineOperand::CreateImm(I->getOpcode()));
172 Cond.push_back(I->getOperand(0));
173 TBB = I->getOperand(1).getMBB();
175 case AArch64::TBZwii:
176 case AArch64::TBZxii:
177 case AArch64::TBNZwii:
178 case AArch64::TBNZxii:
179 // These have two predicate operands: a register and a bit position.
180 Cond.push_back(MachineOperand::CreateImm(I->getOpcode()));
181 Cond.push_back(I->getOperand(0));
182 Cond.push_back(I->getOperand(1));
183 TBB = I->getOperand(2).getMBB();
186 llvm_unreachable("Unknown conditional branch to classify");
192 AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
193 MachineBasicBlock *&FBB,
194 SmallVectorImpl<MachineOperand> &Cond,
195 bool AllowModify) const {
196 // If the block has no terminators, it just falls into the block after it.
197 MachineBasicBlock::iterator I = MBB.end();
198 if (I == MBB.begin())
201 while (I->isDebugValue()) {
202 if (I == MBB.begin())
206 if (!isUnpredicatedTerminator(I))
209 // Get the last instruction in the block.
210 MachineInstr *LastInst = I;
212 // If there is only one terminator instruction, process it.
213 unsigned LastOpc = LastInst->getOpcode();
214 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
215 if (LastOpc == AArch64::Bimm) {
216 TBB = LastInst->getOperand(0).getMBB();
219 if (isCondBranch(LastOpc)) {
220 classifyCondBranch(LastInst, TBB, Cond);
223 return true; // Can't handle indirect branch.
226 // Get the instruction before it if it is a terminator.
227 MachineInstr *SecondLastInst = I;
228 unsigned SecondLastOpc = SecondLastInst->getOpcode();
230 // If AllowModify is true and the block ends with two or more unconditional
231 // branches, delete all but the first unconditional branch.
232 if (AllowModify && LastOpc == AArch64::Bimm) {
233 while (SecondLastOpc == AArch64::Bimm) {
234 LastInst->eraseFromParent();
235 LastInst = SecondLastInst;
236 LastOpc = LastInst->getOpcode();
237 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
238 // Return now the only terminator is an unconditional branch.
239 TBB = LastInst->getOperand(0).getMBB();
243 SecondLastOpc = SecondLastInst->getOpcode();
248 // If there are three terminators, we don't know what sort of block this is.
249 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
252 // If the block ends with a B and a Bcc, handle it.
253 if (LastOpc == AArch64::Bimm) {
254 if (SecondLastOpc == AArch64::Bcc) {
255 TBB = SecondLastInst->getOperand(1).getMBB();
256 Cond.push_back(MachineOperand::CreateImm(AArch64::Bcc));
257 Cond.push_back(SecondLastInst->getOperand(0));
258 FBB = LastInst->getOperand(0).getMBB();
260 } else if (isCondBranch(SecondLastOpc)) {
261 classifyCondBranch(SecondLastInst, TBB, Cond);
262 FBB = LastInst->getOperand(0).getMBB();
267 // If the block ends with two unconditional branches, handle it. The second
268 // one is not executed, so remove it.
269 if (SecondLastOpc == AArch64::Bimm && LastOpc == AArch64::Bimm) {
270 TBB = SecondLastInst->getOperand(0).getMBB();
273 I->eraseFromParent();
277 // Otherwise, can't handle this.
281 bool AArch64InstrInfo::ReverseBranchCondition(
282 SmallVectorImpl<MachineOperand> &Cond) const {
283 switch (Cond[0].getImm()) {
285 A64CC::CondCodes CC = static_cast<A64CC::CondCodes>(Cond[1].getImm());
286 CC = A64InvertCondCode(CC);
291 Cond[0].setImm(AArch64::CBNZw);
294 Cond[0].setImm(AArch64::CBNZx);
297 Cond[0].setImm(AArch64::CBZw);
300 Cond[0].setImm(AArch64::CBZx);
302 case AArch64::TBZwii:
303 Cond[0].setImm(AArch64::TBNZwii);
305 case AArch64::TBZxii:
306 Cond[0].setImm(AArch64::TBNZxii);
308 case AArch64::TBNZwii:
309 Cond[0].setImm(AArch64::TBZwii);
311 case AArch64::TBNZxii:
312 Cond[0].setImm(AArch64::TBZxii);
315 llvm_unreachable("Unknown branch type");
321 AArch64InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
322 MachineBasicBlock *FBB,
323 const SmallVectorImpl<MachineOperand> &Cond,
325 if (FBB == 0 && Cond.empty()) {
326 BuildMI(&MBB, DL, get(AArch64::Bimm)).addMBB(TBB);
328 } else if (FBB == 0) {
329 MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
330 for (int i = 1, e = Cond.size(); i != e; ++i)
331 MIB.addOperand(Cond[i]);
336 MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
337 for (int i = 1, e = Cond.size(); i != e; ++i)
338 MIB.addOperand(Cond[i]);
341 BuildMI(&MBB, DL, get(AArch64::Bimm)).addMBB(FBB);
345 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
346 MachineBasicBlock::iterator I = MBB.end();
347 if (I == MBB.begin()) return 0;
349 while (I->isDebugValue()) {
350 if (I == MBB.begin())
354 if (I->getOpcode() != AArch64::Bimm && !isCondBranch(I->getOpcode()))
357 // Remove the branch.
358 I->eraseFromParent();
362 if (I == MBB.begin()) return 1;
364 if (!isCondBranch(I->getOpcode()))
367 // Remove the branch.
368 I->eraseFromParent();
373 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const {
374 MachineInstr &MI = *MBBI;
375 MachineBasicBlock &MBB = *MI.getParent();
377 unsigned Opcode = MI.getOpcode();
379 case AArch64::TLSDESC_BLRx: {
380 MachineInstr *NewMI =
381 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(AArch64::TLSDESCCALL))
382 .addOperand(MI.getOperand(1));
383 MI.setDesc(get(AArch64::BLRx));
385 llvm::finalizeBundle(MBB, NewMI, *++MBBI);
396 AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
397 MachineBasicBlock::iterator MBBI,
398 unsigned SrcReg, bool isKill,
400 const TargetRegisterClass *RC,
401 const TargetRegisterInfo *TRI) const {
402 DebugLoc DL = MBB.findDebugLoc(MBBI);
403 MachineFunction &MF = *MBB.getParent();
404 MachineFrameInfo &MFI = *MF.getFrameInfo();
405 unsigned Align = MFI.getObjectAlignment(FrameIdx);
407 MachineMemOperand *MMO
408 = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
409 MachineMemOperand::MOStore,
410 MFI.getObjectSize(FrameIdx),
413 unsigned StoreOp = 0;
414 if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
415 switch(RC->getSize()) {
416 case 4: StoreOp = AArch64::LS32_STR; break;
417 case 8: StoreOp = AArch64::LS64_STR; break;
419 llvm_unreachable("Unknown size for regclass");
421 } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) ||
422 RC->hasType(MVT::f128)) {
423 switch (RC->getSize()) {
424 case 4: StoreOp = AArch64::LSFP32_STR; break;
425 case 8: StoreOp = AArch64::LSFP64_STR; break;
426 case 16: StoreOp = AArch64::LSFP128_STR; break;
428 llvm_unreachable("Unknown size for regclass");
430 } else { // The spill of D tuples is implemented by Q tuples
431 if (RC == &AArch64::QPairRegClass)
432 StoreOp = AArch64::ST1x2_16B;
433 else if (RC == &AArch64::QTripleRegClass)
434 StoreOp = AArch64::ST1x3_16B;
435 else if (RC == &AArch64::QQuadRegClass)
436 StoreOp = AArch64::ST1x4_16B;
438 llvm_unreachable("Unknown reg class");
440 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
441 // Vector store has different operands from other store instructions.
442 NewMI.addFrameIndex(FrameIdx)
443 .addReg(SrcReg, getKillRegState(isKill))
448 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
449 NewMI.addReg(SrcReg, getKillRegState(isKill))
450 .addFrameIndex(FrameIdx)
457 AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
458 MachineBasicBlock::iterator MBBI,
459 unsigned DestReg, int FrameIdx,
460 const TargetRegisterClass *RC,
461 const TargetRegisterInfo *TRI) const {
462 DebugLoc DL = MBB.findDebugLoc(MBBI);
463 MachineFunction &MF = *MBB.getParent();
464 MachineFrameInfo &MFI = *MF.getFrameInfo();
465 unsigned Align = MFI.getObjectAlignment(FrameIdx);
467 MachineMemOperand *MMO
468 = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
469 MachineMemOperand::MOLoad,
470 MFI.getObjectSize(FrameIdx),
474 if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
475 switch(RC->getSize()) {
476 case 4: LoadOp = AArch64::LS32_LDR; break;
477 case 8: LoadOp = AArch64::LS64_LDR; break;
479 llvm_unreachable("Unknown size for regclass");
481 } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) ||
482 RC->hasType(MVT::f128)) {
483 switch (RC->getSize()) {
484 case 4: LoadOp = AArch64::LSFP32_LDR; break;
485 case 8: LoadOp = AArch64::LSFP64_LDR; break;
486 case 16: LoadOp = AArch64::LSFP128_LDR; break;
488 llvm_unreachable("Unknown size for regclass");
490 } else { // The spill of D tuples is implemented by Q tuples
491 if (RC == &AArch64::QPairRegClass)
492 LoadOp = AArch64::LD1x2_16B;
493 else if (RC == &AArch64::QTripleRegClass)
494 LoadOp = AArch64::LD1x3_16B;
495 else if (RC == &AArch64::QQuadRegClass)
496 LoadOp = AArch64::LD1x4_16B;
498 llvm_unreachable("Unknown reg class");
500 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg);
501 // Vector load has different operands from other load instructions.
502 NewMI.addFrameIndex(FrameIdx)
507 MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg);
508 NewMI.addFrameIndex(FrameIdx)
513 unsigned AArch64InstrInfo::estimateRSStackLimit(MachineFunction &MF) const {
514 unsigned Limit = (1 << 16) - 1;
515 for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
516 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
518 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
519 if (!I->getOperand(i).isFI()) continue;
521 // When using ADDxxi_lsl0_s to get the address of a stack object, 0xfff
522 // is the largest offset guaranteed to fit in the immediate offset.
523 if (I->getOpcode() == AArch64::ADDxxi_lsl0_s) {
524 Limit = std::min(Limit, 0xfffu);
528 int AccessScale, MinOffset, MaxOffset;
529 getAddressConstraints(*I, AccessScale, MinOffset, MaxOffset);
530 Limit = std::min(Limit, static_cast<unsigned>(MaxOffset));
532 break; // At most one FI per instruction
539 void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI,
540 int &AccessScale, int &MinOffset,
541 int &MaxOffset) const {
542 switch (MI.getOpcode()) {
543 default: llvm_unreachable("Unkown load/store kind");
544 case TargetOpcode::DBG_VALUE:
549 case AArch64::LS8_LDR: case AArch64::LS8_STR:
550 case AArch64::LSFP8_LDR: case AArch64::LSFP8_STR:
551 case AArch64::LDRSBw:
552 case AArch64::LDRSBx:
557 case AArch64::LS16_LDR: case AArch64::LS16_STR:
558 case AArch64::LSFP16_LDR: case AArch64::LSFP16_STR:
559 case AArch64::LDRSHw:
560 case AArch64::LDRSHx:
563 MaxOffset = 0xfff * AccessScale;
565 case AArch64::LS32_LDR: case AArch64::LS32_STR:
566 case AArch64::LSFP32_LDR: case AArch64::LSFP32_STR:
567 case AArch64::LDRSWx:
568 case AArch64::LDPSWx:
571 MaxOffset = 0xfff * AccessScale;
573 case AArch64::LS64_LDR: case AArch64::LS64_STR:
574 case AArch64::LSFP64_LDR: case AArch64::LSFP64_STR:
578 MaxOffset = 0xfff * AccessScale;
580 case AArch64::LSFP128_LDR: case AArch64::LSFP128_STR:
583 MaxOffset = 0xfff * AccessScale;
585 case AArch64::LSPair32_LDR: case AArch64::LSPair32_STR:
586 case AArch64::LSFPPair32_LDR: case AArch64::LSFPPair32_STR:
588 MinOffset = -0x40 * AccessScale;
589 MaxOffset = 0x3f * AccessScale;
591 case AArch64::LSPair64_LDR: case AArch64::LSPair64_STR:
592 case AArch64::LSFPPair64_LDR: case AArch64::LSFPPair64_STR:
594 MinOffset = -0x40 * AccessScale;
595 MaxOffset = 0x3f * AccessScale;
597 case AArch64::LSFPPair128_LDR: case AArch64::LSFPPair128_STR:
599 MinOffset = -0x40 * AccessScale;
600 MaxOffset = 0x3f * AccessScale;
602 case AArch64::LD1x2_16B: case AArch64::ST1x2_16B:
605 MaxOffset = 0xfff * AccessScale;
607 case AArch64::LD1x3_16B: case AArch64::ST1x3_16B:
610 MaxOffset = 0xfff * AccessScale;
612 case AArch64::LD1x4_16B: case AArch64::ST1x4_16B:
615 MaxOffset = 0xfff * AccessScale;
620 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
621 const MCInstrDesc &MCID = MI.getDesc();
622 const MachineBasicBlock &MBB = *MI.getParent();
623 const MachineFunction &MF = *MBB.getParent();
624 const MCAsmInfo &MAI = *MF.getTarget().getMCAsmInfo();
627 return MCID.getSize();
629 if (MI.getOpcode() == AArch64::INLINEASM)
630 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), MAI);
635 switch (MI.getOpcode()) {
636 case TargetOpcode::BUNDLE:
637 return getInstBundleLength(MI);
638 case TargetOpcode::IMPLICIT_DEF:
639 case TargetOpcode::KILL:
640 case TargetOpcode::PROLOG_LABEL:
641 case TargetOpcode::EH_LABEL:
642 case TargetOpcode::DBG_VALUE:
644 case AArch64::TLSDESCCALL:
647 llvm_unreachable("Unknown instruction class");
651 unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const {
653 MachineBasicBlock::const_instr_iterator I = MI;
654 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
655 while (++I != E && I->isInsideBundle()) {
656 assert(!I->isBundle() && "No nested bundle!");
657 Size += getInstSizeInBytes(*I);
662 bool llvm::rewriteA64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
663 unsigned FrameReg, int &Offset,
664 const AArch64InstrInfo &TII) {
665 MachineBasicBlock &MBB = *MI.getParent();
666 MachineFunction &MF = *MBB.getParent();
667 MachineFrameInfo &MFI = *MF.getFrameInfo();
669 MFI.getObjectOffset(FrameRegIdx);
670 llvm_unreachable("Unimplemented rewriteFrameIndex");
673 void llvm::emitRegUpdate(MachineBasicBlock &MBB,
674 MachineBasicBlock::iterator MBBI,
675 DebugLoc dl, const TargetInstrInfo &TII,
676 unsigned DstReg, unsigned SrcReg, unsigned ScratchReg,
677 int64_t NumBytes, MachineInstr::MIFlag MIFlags) {
678 if (NumBytes == 0 && DstReg == SrcReg)
680 else if (abs64(NumBytes) & ~0xffffff) {
681 // Generically, we have to materialize the offset into a temporary register
682 // and subtract it. There are a couple of ways this could be done, for now
683 // we'll use a movz/movk or movn/movk sequence.
684 uint64_t Bits = static_cast<uint64_t>(abs64(NumBytes));
685 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVZxii), ScratchReg)
686 .addImm(0xffff & Bits).addImm(0)
687 .setMIFlags(MIFlags);
691 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
693 .addImm(0xffff & Bits).addImm(1)
694 .setMIFlags(MIFlags);
699 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
701 .addImm(0xffff & Bits).addImm(2)
702 .setMIFlags(MIFlags);
707 BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
709 .addImm(0xffff & Bits).addImm(3)
710 .setMIFlags(MIFlags);
713 // ADD DST, SRC, xTMP (, lsl #0)
714 unsigned AddOp = NumBytes > 0 ? AArch64::ADDxxx_uxtx : AArch64::SUBxxx_uxtx;
715 BuildMI(MBB, MBBI, dl, TII.get(AddOp), DstReg)
716 .addReg(SrcReg, RegState::Kill)
717 .addReg(ScratchReg, RegState::Kill)
723 // Now we know that the adjustment can be done in at most two add/sub
724 // (immediate) instructions, which is always more efficient than a
725 // literal-pool load, or even a hypothetical movz/movk/add sequence
727 // Decide whether we're doing addition or subtraction
728 unsigned LowOp, HighOp;
730 LowOp = AArch64::ADDxxi_lsl0_s;
731 HighOp = AArch64::ADDxxi_lsl12_s;
733 LowOp = AArch64::SUBxxi_lsl0_s;
734 HighOp = AArch64::SUBxxi_lsl12_s;
735 NumBytes = abs64(NumBytes);
738 // If we're here, at the very least a move needs to be produced, which just
739 // happens to be materializable by an ADD.
740 if ((NumBytes & 0xfff) || NumBytes == 0) {
741 BuildMI(MBB, MBBI, dl, TII.get(LowOp), DstReg)
742 .addReg(SrcReg, RegState::Kill)
743 .addImm(NumBytes & 0xfff)
746 // Next update should use the register we've just defined.
750 if (NumBytes & 0xfff000) {
751 BuildMI(MBB, MBBI, dl, TII.get(HighOp), DstReg)
752 .addReg(SrcReg, RegState::Kill)
753 .addImm(NumBytes >> 12)
758 void llvm::emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
759 DebugLoc dl, const TargetInstrInfo &TII,
760 unsigned ScratchReg, int64_t NumBytes,
761 MachineInstr::MIFlag MIFlags) {
762 emitRegUpdate(MBB, MI, dl, TII, AArch64::XSP, AArch64::XSP, AArch64::X16,
768 struct LDTLSCleanup : public MachineFunctionPass {
770 LDTLSCleanup() : MachineFunctionPass(ID) {}
772 virtual bool runOnMachineFunction(MachineFunction &MF) {
773 AArch64MachineFunctionInfo* MFI
774 = MF.getInfo<AArch64MachineFunctionInfo>();
775 if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
776 // No point folding accesses if there isn't at least two.
780 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
781 return VisitNode(DT->getRootNode(), 0);
784 // Visit the dominator subtree rooted at Node in pre-order.
785 // If TLSBaseAddrReg is non-null, then use that to replace any
786 // TLS_base_addr instructions. Otherwise, create the register
787 // when the first such instruction is seen, and then use it
788 // as we encounter more instructions.
789 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
790 MachineBasicBlock *BB = Node->getBlock();
791 bool Changed = false;
793 // Traverse the current block.
794 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
796 switch (I->getOpcode()) {
797 case AArch64::TLSDESC_BLRx:
798 // Make sure it's a local dynamic access.
799 if (!I->getOperand(1).isSymbol() ||
800 strcmp(I->getOperand(1).getSymbolName(), "_TLS_MODULE_BASE_"))
804 I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
806 I = SetRegister(I, &TLSBaseAddrReg);
814 // Visit the children of this block in the dominator tree.
815 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
817 Changed |= VisitNode(*I, TLSBaseAddrReg);
823 // Replace the TLS_base_addr instruction I with a copy from
824 // TLSBaseAddrReg, returning the new instruction.
825 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
826 unsigned TLSBaseAddrReg) {
827 MachineFunction *MF = I->getParent()->getParent();
828 const AArch64TargetMachine *TM =
829 static_cast<const AArch64TargetMachine *>(&MF->getTarget());
830 const AArch64InstrInfo *TII = TM->getInstrInfo();
832 // Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
833 // code sequence assumes the address will be.
834 MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
835 TII->get(TargetOpcode::COPY),
837 .addReg(TLSBaseAddrReg);
839 // Erase the TLS_base_addr instruction.
840 I->eraseFromParent();
845 // Create a virtal register in *TLSBaseAddrReg, and populate it by
846 // inserting a copy instruction after I. Returns the new instruction.
847 MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
848 MachineFunction *MF = I->getParent()->getParent();
849 const AArch64TargetMachine *TM =
850 static_cast<const AArch64TargetMachine *>(&MF->getTarget());
851 const AArch64InstrInfo *TII = TM->getInstrInfo();
853 // Create a virtual register for the TLS base address.
854 MachineRegisterInfo &RegInfo = MF->getRegInfo();
855 *TLSBaseAddrReg = RegInfo.createVirtualRegister(&AArch64::GPR64RegClass);
857 // Insert a copy from X0 to TLSBaseAddrReg for later.
858 MachineInstr *Next = I->getNextNode();
859 MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
860 TII->get(TargetOpcode::COPY),
862 .addReg(AArch64::X0);
867 virtual const char *getPassName() const {
868 return "Local Dynamic TLS Access Clean-up";
871 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
872 AU.setPreservesCFG();
873 AU.addRequired<MachineDominatorTree>();
874 MachineFunctionPass::getAnalysisUsage(AU);
879 char LDTLSCleanup::ID = 0;
881 llvm::createAArch64CleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }