1 //===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that splits the constant pool up into 'islands'
11 // which are scattered through-out the function. This is required due to the
12 // limited pc-relative displacements that ARM has.
14 //===----------------------------------------------------------------------===//
17 #include "ARMMachineFunctionInfo.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "Thumb2InstrInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/Format.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
38 #define DEBUG_TYPE "arm-cp-islands"
40 STATISTIC(NumCPEs, "Number of constpool entries");
41 STATISTIC(NumSplit, "Number of uncond branches inserted");
42 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
43 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
44 STATISTIC(NumTBs, "Number of table branches generated");
45 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
46 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
47 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
48 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
49 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
53 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
54 cl::desc("Adjust basic block layout to better use TB[BH]"));
56 /// UnknownPadding - Return the worst case padding that could result from
57 /// unknown offset bits. This does not include alignment padding caused by
58 /// known offset bits.
60 /// @param LogAlign log2(alignment)
61 /// @param KnownBits Number of known low offset bits.
62 static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
63 if (KnownBits < LogAlign)
64 return (1u << LogAlign) - (1u << KnownBits);
69 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
70 /// requires constant pool entries to be scattered among the instructions
71 /// inside a function. To do this, it completely ignores the normal LLVM
72 /// constant pool; instead, it places constants wherever it feels like with
73 /// special instructions.
75 /// The terminology used in this pass includes:
76 /// Islands - Clumps of constants placed in the function.
77 /// Water - Potential places where an island could be formed.
78 /// CPE - A constant pool entry that has been placed somewhere, which
79 /// tracks a list of users.
80 class ARMConstantIslands : public MachineFunctionPass {
81 /// BasicBlockInfo - Information about the offset and size of a single
83 struct BasicBlockInfo {
84 /// Offset - Distance from the beginning of the function to the beginning
85 /// of this basic block.
87 /// Offsets are computed assuming worst case padding before an aligned
88 /// block. This means that subtracting basic block offsets always gives a
89 /// conservative estimate of the real distance which may be smaller.
91 /// Because worst case padding is used, the computed offset of an aligned
92 /// block may not actually be aligned.
95 /// Size - Size of the basic block in bytes. If the block contains
96 /// inline assembly, this is a worst case estimate.
98 /// The size does not include any alignment padding whether from the
99 /// beginning of the block, or from an aligned jump table at the end.
102 /// KnownBits - The number of low bits in Offset that are known to be
103 /// exact. The remaining bits of Offset are an upper bound.
106 /// Unalign - When non-zero, the block contains instructions (inline asm)
107 /// of unknown size. The real size may be smaller than Size bytes by a
108 /// multiple of 1 << Unalign.
111 /// PostAlign - When non-zero, the block terminator contains a .align
112 /// directive, so the end of the block is aligned to 1 << PostAlign
116 BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0),
119 /// Compute the number of known offset bits internally to this block.
120 /// This number should be used to predict worst case padding when
121 /// splitting the block.
122 unsigned internalKnownBits() const {
123 unsigned Bits = Unalign ? Unalign : KnownBits;
124 // If the block size isn't a multiple of the known bits, assume the
125 // worst case padding.
126 if (Size & ((1u << Bits) - 1))
127 Bits = countTrailingZeros(Size);
131 /// Compute the offset immediately following this block. If LogAlign is
132 /// specified, return the offset the successor block will get if it has
134 unsigned postOffset(unsigned LogAlign = 0) const {
135 unsigned PO = Offset + Size;
136 unsigned LA = std::max(unsigned(PostAlign), LogAlign);
139 // Add alignment padding from the terminator.
140 return PO + UnknownPadding(LA, internalKnownBits());
143 /// Compute the number of known low bits of postOffset. If this block
144 /// contains inline asm, the number of known bits drops to the
145 /// instruction alignment. An aligned terminator may increase the number
147 /// If LogAlign is given, also consider the alignment of the next block.
148 unsigned postKnownBits(unsigned LogAlign = 0) const {
149 return std::max(std::max(unsigned(PostAlign), LogAlign),
150 internalKnownBits());
154 std::vector<BasicBlockInfo> BBInfo;
156 /// WaterList - A sorted list of basic blocks where islands could be placed
157 /// (i.e. blocks that don't fall through to the following block, due
158 /// to a return, unreachable, or unconditional branch).
159 std::vector<MachineBasicBlock*> WaterList;
161 /// NewWaterList - The subset of WaterList that was created since the
162 /// previous iteration by inserting unconditional branches.
163 SmallSet<MachineBasicBlock*, 4> NewWaterList;
165 typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
167 /// CPUser - One user of a constant pool, keeping the machine instruction
168 /// pointer, the constant pool being referenced, and the max displacement
169 /// allowed from the instruction to the CP. The HighWaterMark records the
170 /// highest basic block where a new CPEntry can be placed. To ensure this
171 /// pass terminates, the CP entries are initially placed at the end of the
172 /// function and then move monotonically to lower addresses. The
173 /// exception to this rule is when the current CP entry for a particular
174 /// CPUser is out of range, but there is another CP entry for the same
175 /// constant value in range. We want to use the existing in-range CP
176 /// entry, but if it later moves out of range, the search for new water
177 /// should resume where it left off. The HighWaterMark is used to record
182 MachineBasicBlock *HighWaterMark;
187 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
188 bool neg, bool soimm)
189 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm),
190 KnownAlignment(false) {
191 HighWaterMark = CPEMI->getParent();
193 /// getMaxDisp - Returns the maximum displacement supported by MI.
194 /// Correct for unknown alignment.
195 /// Conservatively subtract 2 bytes to handle weird alignment effects.
196 unsigned getMaxDisp() const {
197 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
201 /// CPUsers - Keep track of all of the machine instructions that use various
202 /// constant pools and their max displacement.
203 std::vector<CPUser> CPUsers;
205 /// CPEntry - One per constant pool entry, keeping the machine instruction
206 /// pointer, the constpool index, and the number of CPUser's which
207 /// reference this entry.
212 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
213 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
216 /// CPEntries - Keep track of all of the constant pool entry machine
217 /// instructions. For each original constpool index (i.e. those that existed
218 /// upon entry to this pass), it keeps a vector of entries. Original
219 /// elements are cloned as we go along; the clones are put in the vector of
220 /// the original element, but have distinct CPIs.
222 /// The first half of CPEntries contains generic constants, the second half
223 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
224 /// which vector it will be in here.
225 std::vector<std::vector<CPEntry> > CPEntries;
227 /// Maps a JT index to the offset in CPEntries containing copies of that
228 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
229 DenseMap<int, int> JumpTableEntryIndices;
231 /// Maps a JT index to the LEA that actually uses the index to calculate its
233 DenseMap<int, int> JumpTableUserIndices;
235 /// ImmBranch - One per immediate branch, keeping the machine instruction
236 /// pointer, conditional or unconditional, the max displacement,
237 /// and (if isCond is true) the corresponding unconditional branch
241 unsigned MaxDisp : 31;
244 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
245 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
248 /// ImmBranches - Keep track of all the immediate branch instructions.
250 std::vector<ImmBranch> ImmBranches;
252 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
254 SmallVector<MachineInstr*, 4> PushPopMIs;
256 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
257 SmallVector<MachineInstr*, 4> T2JumpTables;
259 /// HasFarJump - True if any far jump instruction has been emitted during
260 /// the branch fix up pass.
264 MachineConstantPool *MCP;
265 const ARMBaseInstrInfo *TII;
266 const ARMSubtarget *STI;
267 ARMFunctionInfo *AFI;
273 ARMConstantIslands() : MachineFunctionPass(ID) {}
275 bool runOnMachineFunction(MachineFunction &MF) override;
277 const char *getPassName() const override {
278 return "ARM constant island placement and branch shortening pass";
282 void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
283 void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
284 bool BBHasFallthrough(MachineBasicBlock *MBB);
285 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
286 unsigned getCPELogAlign(const MachineInstr *CPEMI);
287 void scanFunctionJumpTables();
288 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
289 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
290 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
291 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
292 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
293 unsigned getCombinedIndex(const MachineInstr *CPEMI);
294 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
295 bool findAvailableWater(CPUser&U, unsigned UserOffset,
296 water_iterator &WaterIter);
297 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
298 MachineBasicBlock *&NewMBB);
299 bool handleConstantPoolUser(unsigned CPUserIndex);
300 void removeDeadCPEMI(MachineInstr *CPEMI);
301 bool removeUnusedCPEntries();
302 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
303 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
304 bool DoDump = false);
305 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
306 CPUser &U, unsigned &Growth);
307 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
308 bool fixupImmediateBr(ImmBranch &Br);
309 bool fixupConditionalBr(ImmBranch &Br);
310 bool fixupUnconditionalBr(ImmBranch &Br);
311 bool undoLRSpillRestore();
312 bool mayOptimizeThumb2Instruction(const MachineInstr *MI) const;
313 bool optimizeThumb2Instructions();
314 bool optimizeThumb2Branches();
315 bool reorderThumb2JumpTables();
316 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
317 unsigned &DeadSize, bool &CanDeleteLEA,
319 bool optimizeThumb2JumpTables();
320 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
321 MachineBasicBlock *JTBB);
323 void computeBlockSize(MachineBasicBlock *MBB);
324 unsigned getOffsetOf(MachineInstr *MI) const;
325 unsigned getUserOffset(CPUser&) const;
329 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
330 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
331 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
333 return isOffsetInRange(UserOffset, TrialOffset,
334 U.getMaxDisp(), U.NegOk, U.IsSoImm);
337 char ARMConstantIslands::ID = 0;
340 /// verify - check BBOffsets, BBSizes, alignment of islands
341 void ARMConstantIslands::verify() {
343 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
345 MachineBasicBlock *MBB = &*MBBI;
346 unsigned MBBId = MBB->getNumber();
347 assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
349 DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
350 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
351 CPUser &U = CPUsers[i];
352 unsigned UserOffset = getUserOffset(U);
353 // Verify offset using the real max displacement without the safety
355 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
356 /* DoDump = */ true)) {
357 DEBUG(dbgs() << "OK\n");
360 DEBUG(dbgs() << "Out of range.\n");
363 llvm_unreachable("Constant pool entry out of range!");
368 /// print block size and offset information - debugging
369 void ARMConstantIslands::dumpBBs() {
371 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
372 const BasicBlockInfo &BBI = BBInfo[J];
373 dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
374 << " kb=" << unsigned(BBI.KnownBits)
375 << " ua=" << unsigned(BBI.Unalign)
376 << " pa=" << unsigned(BBI.PostAlign)
377 << format(" size=%#x\n", BBInfo[J].Size);
382 /// createARMConstantIslandPass - returns an instance of the constpool
384 FunctionPass *llvm::createARMConstantIslandPass() {
385 return new ARMConstantIslands();
388 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
390 MCP = mf.getConstantPool();
392 DEBUG(dbgs() << "***** ARMConstantIslands: "
393 << MCP->getConstants().size() << " CP entries, aligned to "
394 << MCP->getConstantPoolAlignment() << " bytes *****\n");
396 STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
397 TII = STI->getInstrInfo();
398 AFI = MF->getInfo<ARMFunctionInfo>();
400 isThumb = AFI->isThumbFunction();
401 isThumb1 = AFI->isThumb1OnlyFunction();
402 isThumb2 = AFI->isThumb2Function();
406 // This pass invalidates liveness information when it splits basic blocks.
407 MF->getRegInfo().invalidateLiveness();
409 // Renumber all of the machine basic blocks in the function, guaranteeing that
410 // the numbers agree with the position of the block in the function.
411 MF->RenumberBlocks();
413 // Try to reorder and otherwise adjust the block layout to make good use
414 // of the TB[BH] instructions.
415 bool MadeChange = false;
416 if (isThumb2 && AdjustJumpTableBlocks) {
417 scanFunctionJumpTables();
418 MadeChange |= reorderThumb2JumpTables();
419 // Data is out of date, so clear it. It'll be re-computed later.
420 T2JumpTables.clear();
421 // Blocks may have shifted around. Keep the numbering up to date.
422 MF->RenumberBlocks();
425 // Perform the initial placement of the constant pool entries. To start with,
426 // we put them all at the end of the function.
427 std::vector<MachineInstr*> CPEMIs;
429 doInitialConstPlacement(CPEMIs);
431 if (MF->getJumpTableInfo())
432 doInitialJumpTablePlacement(CPEMIs);
434 /// The next UID to take is the first unused one.
435 AFI->initPICLabelUId(CPEMIs.size());
437 // Do the initial scan of the function, building up information about the
438 // sizes of each block, the location of all the water, and finding all of the
439 // constant pool users.
440 initializeFunctionInfo(CPEMIs);
444 // Functions with jump tables need an alignment of 4 because they use the ADR
445 // instruction, which aligns the PC to 4 bytes before adding an offset.
446 if (!T2JumpTables.empty())
447 MF->ensureAlignment(2);
449 /// Remove dead constant pool entries.
450 MadeChange |= removeUnusedCPEntries();
452 // Iteratively place constant pool entries and fix up branches until there
454 unsigned NoCPIters = 0, NoBRIters = 0;
456 DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
457 bool CPChange = false;
458 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
459 CPChange |= handleConstantPoolUser(i);
460 if (CPChange && ++NoCPIters > 30)
461 report_fatal_error("Constant Island pass failed to converge!");
464 // Clear NewWaterList now. If we split a block for branches, it should
465 // appear as "new water" for the next iteration of constant pool placement.
466 NewWaterList.clear();
468 DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
469 bool BRChange = false;
470 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
471 BRChange |= fixupImmediateBr(ImmBranches[i]);
472 if (BRChange && ++NoBRIters > 30)
473 report_fatal_error("Branch Fix Up pass failed to converge!");
476 if (!CPChange && !BRChange)
481 // Shrink 32-bit Thumb2 branch, load, and store instructions.
482 if (isThumb2 && !STI->prefers32BitThumb())
483 MadeChange |= optimizeThumb2Instructions();
485 // After a while, this might be made debug-only, but it is not expensive.
488 // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
489 // undo the spill / restore of LR if possible.
490 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
491 MadeChange |= undoLRSpillRestore();
493 // Save the mapping between original and cloned constpool entries.
494 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
495 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
496 const CPEntry & CPE = CPEntries[i][j];
497 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
498 AFI->recordCPEClone(i, CPE.CPI);
502 DEBUG(dbgs() << '\n'; dumpBBs());
508 JumpTableEntryIndices.clear();
509 JumpTableUserIndices.clear();
512 T2JumpTables.clear();
517 /// \brief Perform the initial placement of the regular constant pool entries.
518 /// To start with, we put them all at the end of the function.
520 ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
521 // Create the basic block to hold the CPE's.
522 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
525 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
526 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
528 // Mark the basic block as required by the const-pool.
529 BB->setAlignment(MaxAlign);
531 // The function needs to be as aligned as the basic blocks. The linker may
532 // move functions around based on their alignment.
533 MF->ensureAlignment(BB->getAlignment());
535 // Order the entries in BB by descending alignment. That ensures correct
536 // alignment of all entries as long as BB is sufficiently aligned. Keep
537 // track of the insertion point for each alignment. We are going to bucket
538 // sort the entries as they are created.
539 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
541 // Add all of the constants from the constant pool to the end block, use an
542 // identity mapping of CPI's to CPE's.
543 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
545 const DataLayout &TD = MF->getDataLayout();
546 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
547 unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
548 assert(Size >= 4 && "Too small constant pool entry");
549 unsigned Align = CPs[i].getAlignment();
550 assert(isPowerOf2_32(Align) && "Invalid alignment");
551 // Verify that all constant pool entries are a multiple of their alignment.
552 // If not, we would have to pad them out so that instructions stay aligned.
553 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
555 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
556 unsigned LogAlign = Log2_32(Align);
557 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
558 MachineInstr *CPEMI =
559 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
560 .addImm(i).addConstantPoolIndex(i).addImm(Size);
561 CPEMIs.push_back(CPEMI);
563 // Ensure that future entries with higher alignment get inserted before
564 // CPEMI. This is bucket sort with iterators.
565 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
566 if (InsPoint[a] == InsAt)
569 // Add a new CPEntry, but no corresponding CPUser yet.
570 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
572 DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
573 << Size << ", align = " << Align <<'\n');
578 /// \brief Do initial placement of the jump tables. Because Thumb2's TBB and TBH
579 /// instructions can be made more efficient if the jump table immediately
580 /// follows the instruction, it's best to place them immediately next to their
581 /// jumps to begin with. In almost all cases they'll never be moved from that
583 void ARMConstantIslands::doInitialJumpTablePlacement(
584 std::vector<MachineInstr *> &CPEMIs) {
585 unsigned i = CPEntries.size();
586 auto MJTI = MF->getJumpTableInfo();
587 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
589 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
590 for (MachineBasicBlock &MBB : *MF) {
591 auto MI = MBB.getLastNonDebugInstr();
596 switch (MI->getOpcode()) {
603 JTOpcode = ARM::JUMPTABLE_ADDRS;
606 JTOpcode = ARM::JUMPTABLE_INSTS;
609 JTOpcode = ARM::JUMPTABLE_TBB;
612 JTOpcode = ARM::JUMPTABLE_TBH;
616 unsigned NumOps = MI->getDesc().getNumOperands();
617 MachineOperand JTOp =
618 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
619 unsigned JTI = JTOp.getIndex();
620 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
621 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
622 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
623 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
624 DebugLoc(), TII->get(JTOpcode))
626 .addJumpTableIndex(JTI)
628 CPEMIs.push_back(CPEMI);
629 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
630 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
631 if (!LastCorrectlyNumberedBB)
632 LastCorrectlyNumberedBB = &MBB;
635 // If we did anything then we need to renumber the subsequent blocks.
636 if (LastCorrectlyNumberedBB)
637 MF->RenumberBlocks(LastCorrectlyNumberedBB);
640 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
641 /// into the block immediately after it.
642 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
643 // Get the next machine basic block in the function.
644 MachineFunction::iterator MBBI = MBB->getIterator();
645 // Can't fall off end of function.
646 if (std::next(MBBI) == MBB->getParent()->end())
649 MachineBasicBlock *NextBB = &*std::next(MBBI);
650 if (std::find(MBB->succ_begin(), MBB->succ_end(), NextBB) == MBB->succ_end())
653 // Try to analyze the end of the block. A potential fallthrough may already
654 // have an unconditional branch for whatever reason.
655 MachineBasicBlock *TBB, *FBB;
656 SmallVector<MachineOperand, 4> Cond;
657 bool TooDifficult = TII->AnalyzeBranch(*MBB, TBB, FBB, Cond);
658 return TooDifficult || FBB == nullptr;
661 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
662 /// look up the corresponding CPEntry.
663 ARMConstantIslands::CPEntry
664 *ARMConstantIslands::findConstPoolEntry(unsigned CPI,
665 const MachineInstr *CPEMI) {
666 std::vector<CPEntry> &CPEs = CPEntries[CPI];
667 // Number of entries per constpool index should be small, just do a
669 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
670 if (CPEs[i].CPEMI == CPEMI)
676 /// getCPELogAlign - Returns the required alignment of the constant pool entry
677 /// represented by CPEMI. Alignment is measured in log2(bytes) units.
678 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
679 switch (CPEMI->getOpcode()) {
680 case ARM::CONSTPOOL_ENTRY:
682 case ARM::JUMPTABLE_TBB:
684 case ARM::JUMPTABLE_TBH:
685 case ARM::JUMPTABLE_INSTS:
687 case ARM::JUMPTABLE_ADDRS:
690 llvm_unreachable("unknown constpool entry kind");
693 unsigned CPI = getCombinedIndex(CPEMI);
694 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
695 unsigned Align = MCP->getConstants()[CPI].getAlignment();
696 assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
697 return Log2_32(Align);
700 /// scanFunctionJumpTables - Do a scan of the function, building up
701 /// information about the sizes of each block and the locations of all
703 void ARMConstantIslands::scanFunctionJumpTables() {
704 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
706 MachineBasicBlock &MBB = *MBBI;
708 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
710 if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT)
711 T2JumpTables.push_back(I);
715 /// initializeFunctionInfo - Do the initial scan of the function, building up
716 /// information about the sizes of each block, the location of all the water,
717 /// and finding all of the constant pool users.
718 void ARMConstantIslands::
719 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
721 BBInfo.resize(MF->getNumBlockIDs());
723 // First thing, compute the size of all basic blocks, and see if the function
724 // has any inline assembly in it. If so, we have to be conservative about
725 // alignment assumptions, as we don't know for sure the size of any
726 // instructions in the inline assembly.
727 for (MachineBasicBlock &MBB : *MF)
728 computeBlockSize(&MBB);
730 // The known bits of the entry block offset are determined by the function
732 BBInfo.front().KnownBits = MF->getAlignment();
734 // Compute block offsets and known bits.
735 adjustBBOffsetsAfter(&MF->front());
737 // Now go back through the instructions and build up our data structures.
738 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
740 MachineBasicBlock &MBB = *MBBI;
742 // If this block doesn't fall through into the next MBB, then this is
743 // 'water' that a constant pool island could be placed.
744 if (!BBHasFallthrough(&MBB))
745 WaterList.push_back(&MBB);
747 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
749 if (I->isDebugValue())
752 unsigned Opc = I->getOpcode();
760 continue; // Ignore other JT branches
762 T2JumpTables.push_back(I);
763 continue; // Does not get an entry in ImmBranches
794 // Record this immediate branch.
795 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
796 ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
799 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
800 PushPopMIs.push_back(I);
802 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
803 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
804 Opc == ARM::JUMPTABLE_TBH)
807 // Scan the instructions for constant pool operands.
808 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
809 if (I->getOperand(op).isCPI() || I->getOperand(op).isJTI()) {
810 // We found one. The addressing mode tells us the max displacement
811 // from the PC that this instruction permits.
813 // Basic size info comes from the TSFlags field.
817 bool IsSoImm = false;
821 llvm_unreachable("Unknown addressing mode for CP reference!");
823 // Taking the address of a CP entry.
825 case ARM::LEApcrelJT:
826 // This takes a SoImm, which is 8 bit immediate rotated. We'll
827 // pretend the maximum offset is 255 * 4. Since each instruction
828 // 4 byte wide, this is always correct. We'll check for other
829 // displacements that fits in a SoImm as well.
835 case ARM::t2LEApcrel:
836 case ARM::t2LEApcrelJT:
841 case ARM::tLEApcrelJT:
850 Bits = 12; // +-offset_12
856 Scale = 4; // +(offset_8*4)
862 Scale = 4; // +-(offset_8*4)
867 // Remember that this is a user of a CP entry.
868 unsigned CPI = I->getOperand(op).getIndex();
869 if (I->getOperand(op).isJTI()) {
870 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
871 CPI = JumpTableEntryIndices[CPI];
874 MachineInstr *CPEMI = CPEMIs[CPI];
875 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
876 CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
878 // Increment corresponding CPEntry reference count.
879 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
880 assert(CPE && "Cannot find a corresponding CPEntry!");
883 // Instructions can only use one CP entry, don't bother scanning the
884 // rest of the operands.
891 /// computeBlockSize - Compute the size and some alignment information for MBB.
892 /// This function updates BBInfo directly.
893 void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
894 BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
899 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
901 BBI.Size += TII->GetInstSizeInBytes(I);
902 // For inline asm, GetInstSizeInBytes returns a conservative estimate.
903 // The actual size may be smaller, but still a multiple of the instr size.
904 if (I->isInlineAsm())
905 BBI.Unalign = isThumb ? 1 : 2;
906 // Also consider instructions that may be shrunk later.
907 else if (isThumb && mayOptimizeThumb2Instruction(I))
911 // tBR_JTr contains a .align 2 directive.
912 if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
914 MBB->getParent()->ensureAlignment(2);
918 /// getOffsetOf - Return the current offset of the specified machine instruction
919 /// from the start of the function. This offset changes as stuff is moved
920 /// around inside the function.
921 unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
922 MachineBasicBlock *MBB = MI->getParent();
924 // The offset is composed of two things: the sum of the sizes of all MBB's
925 // before this instruction's block, and the offset from the start of the block
927 unsigned Offset = BBInfo[MBB->getNumber()].Offset;
929 // Sum instructions before MI in MBB.
930 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
931 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
932 Offset += TII->GetInstSizeInBytes(I);
937 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
939 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
940 const MachineBasicBlock *RHS) {
941 return LHS->getNumber() < RHS->getNumber();
944 /// updateForInsertedWaterBlock - When a block is newly inserted into the
945 /// machine function, it upsets all of the block numbers. Renumber the blocks
946 /// and update the arrays that parallel this numbering.
947 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
948 // Renumber the MBB's to keep them consecutive.
949 NewBB->getParent()->RenumberBlocks(NewBB);
951 // Insert an entry into BBInfo to align it properly with the (newly
952 // renumbered) block numbers.
953 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
955 // Next, update WaterList. Specifically, we need to add NewMBB as having
956 // available water after it.
958 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
960 WaterList.insert(IP, NewBB);
964 /// Split the basic block containing MI into two blocks, which are joined by
965 /// an unconditional branch. Update data structures and renumber blocks to
966 /// account for this change and returns the newly created block.
967 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
968 MachineBasicBlock *OrigBB = MI->getParent();
970 // Create a new MBB for the code after the OrigBB.
971 MachineBasicBlock *NewBB =
972 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
973 MachineFunction::iterator MBBI = ++OrigBB->getIterator();
974 MF->insert(MBBI, NewBB);
976 // Splice the instructions starting with MI over to NewBB.
977 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
979 // Add an unconditional branch from OrigBB to NewBB.
980 // Note the new unconditional branch is not being recorded.
981 // There doesn't seem to be meaningful DebugInfo available; this doesn't
982 // correspond to anything in the source.
983 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
985 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
987 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB)
988 .addImm(ARMCC::AL).addReg(0);
991 // Update the CFG. All succs of OrigBB are now succs of NewBB.
992 NewBB->transferSuccessors(OrigBB);
994 // OrigBB branches to NewBB.
995 OrigBB->addSuccessor(NewBB);
997 // Update internal data structures to account for the newly inserted MBB.
998 // This is almost the same as updateForInsertedWaterBlock, except that
999 // the Water goes after OrigBB, not NewBB.
1000 MF->RenumberBlocks(NewBB);
1002 // Insert an entry into BBInfo to align it properly with the (newly
1003 // renumbered) block numbers.
1004 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
1006 // Next, update WaterList. Specifically, we need to add OrigMBB as having
1007 // available water after it (but not if it's already there, which happens
1008 // when splitting before a conditional branch that is followed by an
1009 // unconditional branch - in that case we want to insert NewBB).
1011 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
1013 MachineBasicBlock* WaterBB = *IP;
1014 if (WaterBB == OrigBB)
1015 WaterList.insert(std::next(IP), NewBB);
1017 WaterList.insert(IP, OrigBB);
1018 NewWaterList.insert(OrigBB);
1020 // Figure out how large the OrigBB is. As the first half of the original
1021 // block, it cannot contain a tablejump. The size includes
1022 // the new jump we added. (It should be possible to do this without
1023 // recounting everything, but it's very confusing, and this is rarely
1025 computeBlockSize(OrigBB);
1027 // Figure out how large the NewMBB is. As the second half of the original
1028 // block, it may contain a tablejump.
1029 computeBlockSize(NewBB);
1031 // All BBOffsets following these blocks must be modified.
1032 adjustBBOffsetsAfter(OrigBB);
1037 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
1038 /// displacement computation. Update U.KnownAlignment to match its current
1039 /// basic block location.
1040 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
1041 unsigned UserOffset = getOffsetOf(U.MI);
1042 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
1043 unsigned KnownBits = BBI.internalKnownBits();
1045 // The value read from PC is offset from the actual instruction address.
1046 UserOffset += (isThumb ? 4 : 8);
1048 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
1049 // Make sure U.getMaxDisp() returns a constrained range.
1050 U.KnownAlignment = (KnownBits >= 2);
1052 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
1053 // purposes of the displacement computation; compensate for that here.
1054 // For unknown alignments, getMaxDisp() constrains the range instead.
1055 if (isThumb && U.KnownAlignment)
1061 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
1062 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
1063 /// constant pool entry).
1064 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
1065 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
1066 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
1067 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
1068 unsigned TrialOffset, unsigned MaxDisp,
1069 bool NegativeOK, bool IsSoImm) {
1070 if (UserOffset <= TrialOffset) {
1071 // User before the Trial.
1072 if (TrialOffset - UserOffset <= MaxDisp)
1074 // FIXME: Make use full range of soimm values.
1075 } else if (NegativeOK) {
1076 if (UserOffset - TrialOffset <= MaxDisp)
1078 // FIXME: Make use full range of soimm values.
1083 /// isWaterInRange - Returns true if a CPE placed after the specified
1084 /// Water (a basic block) will be in range for the specific MI.
1086 /// Compute how much the function will grow by inserting a CPE after Water.
1087 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1088 MachineBasicBlock* Water, CPUser &U,
1090 unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
1091 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
1092 unsigned NextBlockOffset, NextBlockAlignment;
1093 MachineFunction::const_iterator NextBlock = Water->getIterator();
1094 if (++NextBlock == MF->end()) {
1095 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1096 NextBlockAlignment = 0;
1098 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1099 NextBlockAlignment = NextBlock->getAlignment();
1101 unsigned Size = U.CPEMI->getOperand(2).getImm();
1102 unsigned CPEEnd = CPEOffset + Size;
1104 // The CPE may be able to hide in the alignment padding before the next
1105 // block. It may also cause more padding to be required if it is more aligned
1106 // that the next block.
1107 if (CPEEnd > NextBlockOffset) {
1108 Growth = CPEEnd - NextBlockOffset;
1109 // Compute the padding that would go at the end of the CPE to align the next
1111 Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
1113 // If the CPE is to be inserted before the instruction, that will raise
1114 // the offset of the instruction. Also account for unknown alignment padding
1115 // in blocks between CPE and the user.
1116 if (CPEOffset < UserOffset)
1117 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
1119 // CPE fits in existing padding.
1122 return isOffsetInRange(UserOffset, CPEOffset, U);
1125 /// isCPEntryInRange - Returns true if the distance between specific MI and
1126 /// specific ConstPool entry instruction can fit in MI's displacement field.
1127 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1128 MachineInstr *CPEMI, unsigned MaxDisp,
1129 bool NegOk, bool DoDump) {
1130 unsigned CPEOffset = getOffsetOf(CPEMI);
1134 unsigned Block = MI->getParent()->getNumber();
1135 const BasicBlockInfo &BBI = BBInfo[Block];
1136 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1137 << " max delta=" << MaxDisp
1138 << format(" insn address=%#x", UserOffset)
1139 << " in BB#" << Block << ": "
1140 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1141 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1142 int(CPEOffset-UserOffset));
1146 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1150 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1151 /// unconditionally branches to its only successor.
1152 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1153 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1156 MachineBasicBlock *Succ = *MBB->succ_begin();
1157 MachineBasicBlock *Pred = *MBB->pred_begin();
1158 MachineInstr *PredMI = &Pred->back();
1159 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1160 || PredMI->getOpcode() == ARM::t2B)
1161 return PredMI->getOperand(0).getMBB() == Succ;
1166 void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
1167 unsigned BBNum = BB->getNumber();
1168 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
1169 // Get the offset and known bits at the end of the layout predecessor.
1170 // Include the alignment of the current block.
1171 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
1172 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
1173 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
1175 // This is where block i begins. Stop if the offset is already correct,
1176 // and we have updated 2 blocks. This is the maximum number of blocks
1177 // changed before calling this function.
1178 if (i > BBNum + 2 &&
1179 BBInfo[i].Offset == Offset &&
1180 BBInfo[i].KnownBits == KnownBits)
1183 BBInfo[i].Offset = Offset;
1184 BBInfo[i].KnownBits = KnownBits;
1188 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1189 /// and instruction CPEMI, and decrement its refcount. If the refcount
1190 /// becomes 0 remove the entry and instruction. Returns true if we removed
1191 /// the entry, false if we didn't.
1193 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1194 MachineInstr *CPEMI) {
1195 // Find the old entry. Eliminate it if it is no longer used.
1196 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1197 assert(CPE && "Unexpected!");
1198 if (--CPE->RefCount == 0) {
1199 removeDeadCPEMI(CPEMI);
1200 CPE->CPEMI = nullptr;
1207 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1208 if (CPEMI->getOperand(1).isCPI())
1209 return CPEMI->getOperand(1).getIndex();
1211 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1214 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1215 /// if not, see if an in-range clone of the CPE is in range, and if so,
1216 /// change the data structures so the user references the clone. Returns:
1217 /// 0 = no existing entry found
1218 /// 1 = entry found, and there were no code insertions or deletions
1219 /// 2 = entry found, and there were code insertions or deletions
1220 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
1222 MachineInstr *UserMI = U.MI;
1223 MachineInstr *CPEMI = U.CPEMI;
1225 // Check to see if the CPE is already in-range.
1226 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1228 DEBUG(dbgs() << "In range\n");
1232 // No. Look for previously created clones of the CPE that are in range.
1233 unsigned CPI = getCombinedIndex(CPEMI);
1234 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1235 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1236 // We already tried this one
1237 if (CPEs[i].CPEMI == CPEMI)
1239 // Removing CPEs can leave empty entries, skip
1240 if (CPEs[i].CPEMI == nullptr)
1242 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1244 DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1245 << CPEs[i].CPI << "\n");
1246 // Point the CPUser node to the replacement
1247 U.CPEMI = CPEs[i].CPEMI;
1248 // Change the CPI in the instruction operand to refer to the clone.
1249 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1250 if (UserMI->getOperand(j).isCPI()) {
1251 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1254 // Adjust the refcount of the clone...
1256 // ...and the original. If we didn't remove the old entry, none of the
1257 // addresses changed, so we don't need another pass.
1258 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1264 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1265 /// the specific unconditional branch instruction.
1266 static inline unsigned getUnconditionalBrDisp(int Opc) {
1269 return ((1<<10)-1)*2;
1271 return ((1<<23)-1)*2;
1276 return ((1<<23)-1)*4;
1279 /// findAvailableWater - Look for an existing entry in the WaterList in which
1280 /// we can place the CPE referenced from U so it's within range of U's MI.
1281 /// Returns true if found, false if not. If it returns true, WaterIter
1282 /// is set to the WaterList entry. For Thumb, prefer water that will not
1283 /// introduce padding to water that will. To ensure that this pass
1284 /// terminates, the CPE location for a particular CPUser is only allowed to
1285 /// move to a lower address, so search backward from the end of the list and
1286 /// prefer the first water that is in range.
1287 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1288 water_iterator &WaterIter) {
1289 if (WaterList.empty())
1292 unsigned BestGrowth = ~0u;
1293 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1295 MachineBasicBlock* WaterBB = *IP;
1296 // Check if water is in range and is either at a lower address than the
1297 // current "high water mark" or a new water block that was created since
1298 // the previous iteration by inserting an unconditional branch. In the
1299 // latter case, we want to allow resetting the high water mark back to
1300 // this new water since we haven't seen it before. Inserting branches
1301 // should be relatively uncommon and when it does happen, we want to be
1302 // sure to take advantage of it for all the CPEs near that block, so that
1303 // we don't insert more branches than necessary.
1305 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1306 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1307 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1308 Growth < BestGrowth) {
1309 // This is the least amount of required padding seen so far.
1310 BestGrowth = Growth;
1312 DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
1313 << " Growth=" << Growth << '\n');
1315 // Keep looking unless it is perfect.
1316 if (BestGrowth == 0)
1322 return BestGrowth != ~0u;
1325 /// createNewWater - No existing WaterList entry will work for
1326 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1327 /// block is used if in range, and the conditional branch munged so control
1328 /// flow is correct. Otherwise the block is split to create a hole with an
1329 /// unconditional branch around it. In either case NewMBB is set to a
1330 /// block following which the new island can be inserted (the WaterList
1331 /// is not adjusted).
1332 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1333 unsigned UserOffset,
1334 MachineBasicBlock *&NewMBB) {
1335 CPUser &U = CPUsers[CPUserIndex];
1336 MachineInstr *UserMI = U.MI;
1337 MachineInstr *CPEMI = U.CPEMI;
1338 unsigned CPELogAlign = getCPELogAlign(CPEMI);
1339 MachineBasicBlock *UserMBB = UserMI->getParent();
1340 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1342 // If the block does not end in an unconditional branch already, and if the
1343 // end of the block is within range, make new water there. (The addition
1344 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1345 // Thumb2, 2 on Thumb1.
1346 if (BBHasFallthrough(UserMBB)) {
1347 // Size of branch to insert.
1348 unsigned Delta = isThumb1 ? 2 : 4;
1349 // Compute the offset where the CPE will begin.
1350 unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
1352 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1353 DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
1354 << format(", expected CPE offset %#x\n", CPEOffset));
1355 NewMBB = &*++UserMBB->getIterator();
1356 // Add an unconditional branch from UserMBB to fallthrough block. Record
1357 // it for branch lengthening; this new branch will not get out of range,
1358 // but if the preceding conditional branch is out of range, the targets
1359 // will be exchanged, and the altered branch may be out of range, so the
1360 // machinery has to know about it.
1361 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1363 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1365 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB)
1366 .addImm(ARMCC::AL).addReg(0);
1367 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1368 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1369 MaxDisp, false, UncondBr));
1370 computeBlockSize(UserMBB);
1371 adjustBBOffsetsAfter(UserMBB);
1376 // What a big block. Find a place within the block to split it. This is a
1377 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1378 // entries are 4 bytes: if instruction I references island CPE, and
1379 // instruction I+1 references CPE', it will not work well to put CPE as far
1380 // forward as possible, since then CPE' cannot immediately follow it (that
1381 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1382 // need to create a new island. So, we make a first guess, then walk through
1383 // the instructions between the one currently being looked at and the
1384 // possible insertion point, and make sure any other instructions that
1385 // reference CPEs will be able to use the same island area; if not, we back
1386 // up the insertion point.
1388 // Try to split the block so it's fully aligned. Compute the latest split
1389 // point where we can add a 4-byte branch instruction, and then align to
1390 // LogAlign which is the largest possible alignment in the function.
1391 unsigned LogAlign = MF->getAlignment();
1392 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
1393 unsigned KnownBits = UserBBI.internalKnownBits();
1394 unsigned UPad = UnknownPadding(LogAlign, KnownBits);
1395 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1396 DEBUG(dbgs() << format("Split in middle of big block before %#x",
1399 // The 4 in the following is for the unconditional branch we'll be inserting
1400 // (allows for long branch on Thumb1). Alignment of the island is handled
1401 // inside isOffsetInRange.
1402 BaseInsertOffset -= 4;
1404 DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1405 << " la=" << LogAlign
1406 << " kb=" << KnownBits
1407 << " up=" << UPad << '\n');
1409 // This could point off the end of the block if we've already got constant
1410 // pool entries following this block; only the last one is in the water list.
1411 // Back past any possible branches (allow for a conditional and a maximally
1412 // long unconditional).
1413 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1414 // Ensure BaseInsertOffset is larger than the offset of the instruction
1415 // following UserMI so that the loop which searches for the split point
1416 // iterates at least once.
1418 std::max(UserBBI.postOffset() - UPad - 8,
1419 UserOffset + TII->GetInstSizeInBytes(UserMI) + 1);
1420 DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1422 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1423 CPEMI->getOperand(2).getImm();
1424 MachineBasicBlock::iterator MI = UserMI;
1426 unsigned CPUIndex = CPUserIndex+1;
1427 unsigned NumCPUsers = CPUsers.size();
1428 MachineInstr *LastIT = nullptr;
1429 for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
1430 Offset < BaseInsertOffset;
1431 Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
1432 assert(MI != UserMBB->end() && "Fell off end of block");
1433 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
1434 CPUser &U = CPUsers[CPUIndex];
1435 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1436 // Shift intertion point by one unit of alignment so it is within reach.
1437 BaseInsertOffset -= 1u << LogAlign;
1438 EndInsertOffset -= 1u << LogAlign;
1440 // This is overly conservative, as we don't account for CPEMIs being
1441 // reused within the block, but it doesn't matter much. Also assume CPEs
1442 // are added in order with alignment padding. We may eventually be able
1443 // to pack the aligned CPEs better.
1444 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1448 // Remember the last IT instruction.
1449 if (MI->getOpcode() == ARM::t2IT)
1455 // Avoid splitting an IT block.
1457 unsigned PredReg = 0;
1458 ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg);
1459 if (CC != ARMCC::AL)
1463 // We really must not split an IT block.
1464 DEBUG(unsigned PredReg;
1465 assert(!isThumb || getITInstrPredicate(MI, PredReg) == ARMCC::AL));
1467 NewMBB = splitBlockBeforeInstr(MI);
1470 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1471 /// is out-of-range. If so, pick up the constant pool value and move it some
1472 /// place in-range. Return true if we changed any addresses (thus must run
1473 /// another pass of branch lengthening), false otherwise.
1474 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
1475 CPUser &U = CPUsers[CPUserIndex];
1476 MachineInstr *UserMI = U.MI;
1477 MachineInstr *CPEMI = U.CPEMI;
1478 unsigned CPI = getCombinedIndex(CPEMI);
1479 unsigned Size = CPEMI->getOperand(2).getImm();
1480 // Compute this only once, it's expensive.
1481 unsigned UserOffset = getUserOffset(U);
1483 // See if the current entry is within range, or there is a clone of it
1485 int result = findInRangeCPEntry(U, UserOffset);
1486 if (result==1) return false;
1487 else if (result==2) return true;
1489 // No existing clone of this CPE is within range.
1490 // We will be generating a new clone. Get a UID for it.
1491 unsigned ID = AFI->createPICLabelUId();
1493 // Look for water where we can place this CPE.
1494 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1495 MachineBasicBlock *NewMBB;
1497 if (findAvailableWater(U, UserOffset, IP)) {
1498 DEBUG(dbgs() << "Found water in range\n");
1499 MachineBasicBlock *WaterBB = *IP;
1501 // If the original WaterList entry was "new water" on this iteration,
1502 // propagate that to the new island. This is just keeping NewWaterList
1503 // updated to match the WaterList, which will be updated below.
1504 if (NewWaterList.erase(WaterBB))
1505 NewWaterList.insert(NewIsland);
1507 // The new CPE goes before the following block (NewMBB).
1508 NewMBB = &*++WaterBB->getIterator();
1511 DEBUG(dbgs() << "No water found\n");
1512 createNewWater(CPUserIndex, UserOffset, NewMBB);
1514 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1515 // called while handling branches so that the water will be seen on the
1516 // next iteration for constant pools, but in this context, we don't want
1517 // it. Check for this so it will be removed from the WaterList.
1518 // Also remove any entry from NewWaterList.
1519 MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1520 IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
1521 if (IP != WaterList.end())
1522 NewWaterList.erase(WaterBB);
1524 // We are adding new water. Update NewWaterList.
1525 NewWaterList.insert(NewIsland);
1528 // Remove the original WaterList entry; we want subsequent insertions in
1529 // this vicinity to go after the one we're about to insert. This
1530 // considerably reduces the number of times we have to move the same CPE
1531 // more than once and is also important to ensure the algorithm terminates.
1532 if (IP != WaterList.end())
1533 WaterList.erase(IP);
1535 // Okay, we know we can put an island before NewMBB now, do it!
1536 MF->insert(NewMBB->getIterator(), NewIsland);
1538 // Update internal data structures to account for the newly inserted MBB.
1539 updateForInsertedWaterBlock(NewIsland);
1541 // Now that we have an island to add the CPE to, clone the original CPE and
1542 // add it to the island.
1543 U.HighWaterMark = NewIsland;
1544 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1545 .addImm(ID).addOperand(CPEMI->getOperand(1)).addImm(Size);
1546 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1549 // Decrement the old entry, and remove it if refcount becomes 0.
1550 decrementCPEReferenceCount(CPI, CPEMI);
1552 // Mark the basic block as aligned as required by the const-pool entry.
1553 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
1555 // Increase the size of the island block to account for the new entry.
1556 BBInfo[NewIsland->getNumber()].Size += Size;
1557 adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1559 // Finally, change the CPI in the instruction operand to be ID.
1560 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1561 if (UserMI->getOperand(i).isCPI()) {
1562 UserMI->getOperand(i).setIndex(ID);
1566 DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1567 << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
1572 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1573 /// sizes and offsets of impacted basic blocks.
1574 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1575 MachineBasicBlock *CPEBB = CPEMI->getParent();
1576 unsigned Size = CPEMI->getOperand(2).getImm();
1577 CPEMI->eraseFromParent();
1578 BBInfo[CPEBB->getNumber()].Size -= Size;
1579 // All succeeding offsets have the current size value added in, fix this.
1580 if (CPEBB->empty()) {
1581 BBInfo[CPEBB->getNumber()].Size = 0;
1583 // This block no longer needs to be aligned.
1584 CPEBB->setAlignment(0);
1586 // Entries are sorted by descending alignment, so realign from the front.
1587 CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
1589 adjustBBOffsetsAfter(CPEBB);
1590 // An island has only one predecessor BB and one successor BB. Check if
1591 // this BB's predecessor jumps directly to this BB's successor. This
1592 // shouldn't happen currently.
1593 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1594 // FIXME: remove the empty blocks after all the work is done?
1597 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1599 bool ARMConstantIslands::removeUnusedCPEntries() {
1600 unsigned MadeChange = false;
1601 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1602 std::vector<CPEntry> &CPEs = CPEntries[i];
1603 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1604 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1605 removeDeadCPEMI(CPEs[j].CPEMI);
1606 CPEs[j].CPEMI = nullptr;
1614 /// isBBInRange - Returns true if the distance between specific MI and
1615 /// specific BB can fit in MI's displacement field.
1616 bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
1618 unsigned PCAdj = isThumb ? 4 : 8;
1619 unsigned BrOffset = getOffsetOf(MI) + PCAdj;
1620 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1622 DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
1623 << " from BB#" << MI->getParent()->getNumber()
1624 << " max delta=" << MaxDisp
1625 << " from " << getOffsetOf(MI) << " to " << DestOffset
1626 << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
1628 if (BrOffset <= DestOffset) {
1629 // Branch before the Dest.
1630 if (DestOffset-BrOffset <= MaxDisp)
1633 if (BrOffset-DestOffset <= MaxDisp)
1639 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1640 /// away to fit in its displacement field.
1641 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1642 MachineInstr *MI = Br.MI;
1643 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1645 // Check to see if the DestBB is already in-range.
1646 if (isBBInRange(MI, DestBB, Br.MaxDisp))
1650 return fixupUnconditionalBr(Br);
1651 return fixupConditionalBr(Br);
1654 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1655 /// too far away to fit in its displacement field. If the LR register has been
1656 /// spilled in the epilogue, then we can use BL to implement a far jump.
1657 /// Otherwise, add an intermediate branch instruction to a branch.
1659 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1660 MachineInstr *MI = Br.MI;
1661 MachineBasicBlock *MBB = MI->getParent();
1663 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1665 // Use BL to implement far jump.
1666 Br.MaxDisp = (1 << 21) * 2;
1667 MI->setDesc(TII->get(ARM::tBfar));
1668 BBInfo[MBB->getNumber()].Size += 2;
1669 adjustBBOffsetsAfter(MBB);
1673 DEBUG(dbgs() << " Changed B to long jump " << *MI);
1678 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1679 /// far away to fit in its displacement field. It is converted to an inverse
1680 /// conditional branch + an unconditional branch to the destination.
1682 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1683 MachineInstr *MI = Br.MI;
1684 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1686 // Add an unconditional branch to the destination and invert the branch
1687 // condition to jump over it:
1693 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1694 CC = ARMCC::getOppositeCondition(CC);
1695 unsigned CCReg = MI->getOperand(2).getReg();
1697 // If the branch is at the end of its MBB and that has a fall-through block,
1698 // direct the updated conditional branch to the fall-through block. Otherwise,
1699 // split the MBB before the next instruction.
1700 MachineBasicBlock *MBB = MI->getParent();
1701 MachineInstr *BMI = &MBB->back();
1702 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1706 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1707 BMI->getOpcode() == Br.UncondBr) {
1708 // Last MI in the BB is an unconditional branch. Can we simply invert the
1709 // condition and swap destinations:
1715 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1716 if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
1717 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
1719 BMI->getOperand(0).setMBB(DestBB);
1720 MI->getOperand(0).setMBB(NewDest);
1721 MI->getOperand(1).setImm(CC);
1728 splitBlockBeforeInstr(MI);
1729 // No need for the branch to the next block. We're adding an unconditional
1730 // branch to the destination.
1731 int delta = TII->GetInstSizeInBytes(&MBB->back());
1732 BBInfo[MBB->getNumber()].Size -= delta;
1733 MBB->back().eraseFromParent();
1734 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1736 MachineBasicBlock *NextBB = &*++MBB->getIterator();
1738 DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
1739 << " also invert condition and change dest. to BB#"
1740 << NextBB->getNumber() << "\n");
1742 // Insert a new conditional branch and a new unconditional branch.
1743 // Also update the ImmBranch as well as adding a new entry for the new branch.
1744 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1745 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1746 Br.MI = &MBB->back();
1747 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
1749 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB)
1750 .addImm(ARMCC::AL).addReg(0);
1752 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1753 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
1754 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1755 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1757 // Remove the old conditional branch. It may or may not still be in MBB.
1758 BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
1759 MI->eraseFromParent();
1760 adjustBBOffsetsAfter(MBB);
1764 /// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
1765 /// LR / restores LR to pc. FIXME: This is done here because it's only possible
1766 /// to do this if tBfar is not used.
1767 bool ARMConstantIslands::undoLRSpillRestore() {
1768 bool MadeChange = false;
1769 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
1770 MachineInstr *MI = PushPopMIs[i];
1771 // First two operands are predicates.
1772 if (MI->getOpcode() == ARM::tPOP_RET &&
1773 MI->getOperand(2).getReg() == ARM::PC &&
1774 MI->getNumExplicitOperands() == 3) {
1775 // Create the new insn and copy the predicate from the old.
1776 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET))
1777 .addOperand(MI->getOperand(0))
1778 .addOperand(MI->getOperand(1));
1779 MI->eraseFromParent();
1786 // mayOptimizeThumb2Instruction - Returns true if optimizeThumb2Instructions
1787 // below may shrink MI.
1789 ARMConstantIslands::mayOptimizeThumb2Instruction(const MachineInstr *MI) const {
1790 switch(MI->getOpcode()) {
1791 // optimizeThumb2Instructions.
1792 case ARM::t2LEApcrel:
1794 // optimizeThumb2Branches.
1798 // optimizeThumb2JumpTables.
1805 bool ARMConstantIslands::optimizeThumb2Instructions() {
1806 bool MadeChange = false;
1808 // Shrink ADR and LDR from constantpool.
1809 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1810 CPUser &U = CPUsers[i];
1811 unsigned Opcode = U.MI->getOpcode();
1812 unsigned NewOpc = 0;
1817 case ARM::t2LEApcrel:
1818 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1819 NewOpc = ARM::tLEApcrel;
1825 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1826 NewOpc = ARM::tLDRpci;
1836 unsigned UserOffset = getUserOffset(U);
1837 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1839 // Be conservative with inline asm.
1840 if (!U.KnownAlignment)
1843 // FIXME: Check if offset is multiple of scale if scale is not 4.
1844 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1845 DEBUG(dbgs() << "Shrink: " << *U.MI);
1846 U.MI->setDesc(TII->get(NewOpc));
1847 MachineBasicBlock *MBB = U.MI->getParent();
1848 BBInfo[MBB->getNumber()].Size -= 2;
1849 adjustBBOffsetsAfter(MBB);
1855 MadeChange |= optimizeThumb2Branches();
1856 MadeChange |= optimizeThumb2JumpTables();
1860 bool ARMConstantIslands::optimizeThumb2Branches() {
1861 bool MadeChange = false;
1863 // The order in which branches appear in ImmBranches is approximately their
1864 // order within the function body. By visiting later branches first, we reduce
1865 // the distance between earlier forward branches and their targets, making it
1866 // more likely that the cbn?z optimization, which can only apply to forward
1867 // branches, will succeed.
1868 for (unsigned i = ImmBranches.size(); i != 0; --i) {
1869 ImmBranch &Br = ImmBranches[i-1];
1870 unsigned Opcode = Br.MI->getOpcode();
1871 unsigned NewOpc = 0;
1889 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1890 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1891 if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
1892 DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1893 Br.MI->setDesc(TII->get(NewOpc));
1894 MachineBasicBlock *MBB = Br.MI->getParent();
1895 BBInfo[MBB->getNumber()].Size -= 2;
1896 adjustBBOffsetsAfter(MBB);
1902 Opcode = Br.MI->getOpcode();
1903 if (Opcode != ARM::tBcc)
1906 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1907 // so this transformation is not safe.
1908 if (!Br.MI->killsRegister(ARM::CPSR))
1912 unsigned PredReg = 0;
1913 ARMCC::CondCodes Pred = getInstrPredicate(Br.MI, PredReg);
1914 if (Pred == ARMCC::EQ)
1916 else if (Pred == ARMCC::NE)
1917 NewOpc = ARM::tCBNZ;
1920 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1921 // Check if the distance is within 126. Subtract starting offset by 2
1922 // because the cmp will be eliminated.
1923 unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2;
1924 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1925 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
1926 MachineBasicBlock::iterator CmpMI = Br.MI;
1927 if (CmpMI != Br.MI->getParent()->begin()) {
1929 if (CmpMI->getOpcode() == ARM::tCMPi8) {
1930 unsigned Reg = CmpMI->getOperand(0).getReg();
1931 Pred = getInstrPredicate(CmpMI, PredReg);
1932 if (Pred == ARMCC::AL &&
1933 CmpMI->getOperand(1).getImm() == 0 &&
1934 isARMLowRegister(Reg)) {
1935 MachineBasicBlock *MBB = Br.MI->getParent();
1936 DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
1937 MachineInstr *NewBR =
1938 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
1939 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
1940 CmpMI->eraseFromParent();
1941 Br.MI->eraseFromParent();
1943 BBInfo[MBB->getNumber()].Size -= 2;
1944 adjustBBOffsetsAfter(MBB);
1956 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
1958 if (I.getOpcode() != ARM::t2ADDrs)
1961 if (I.getOperand(0).getReg() != EntryReg)
1964 if (I.getOperand(1).getReg() != BaseReg)
1967 // FIXME: what about CC and IdxReg?
1971 /// \brief While trying to form a TBB/TBH instruction, we may (if the table
1972 /// doesn't immediately follow the BR_JT) need access to the start of the
1973 /// jump-table. We know one instruction that produces such a register; this
1974 /// function works out whether that definition can be preserved to the BR_JT,
1975 /// possibly by removing an intervening addition (which is usually needed to
1976 /// calculate the actual entry to jump to).
1977 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
1978 MachineInstr *LEAMI,
1981 bool &BaseRegKill) {
1982 if (JumpMI->getParent() != LEAMI->getParent())
1985 // Now we hope that we have at least these instructions in the basic block:
1986 // BaseReg = t2LEA ...
1988 // EntryReg = t2ADDrs BaseReg, ...
1992 // We have to be very conservative about what we recognise here though. The
1993 // main perturbing factors to watch out for are:
1994 // + Spills at any point in the chain: not direct problems but we would
1995 // expect a blocking Def of the spilled register so in practice what we
1996 // can do is limited.
1997 // + EntryReg == BaseReg: this is the one situation we should allow a Def
1998 // of BaseReg, but only if the t2ADDrs can be removed.
1999 // + Some instruction other than t2ADDrs computing the entry. Not seen in
2000 // the wild, but we should be careful.
2001 unsigned EntryReg = JumpMI->getOperand(0).getReg();
2002 unsigned BaseReg = LEAMI->getOperand(0).getReg();
2004 CanDeleteLEA = true;
2005 BaseRegKill = false;
2006 MachineInstr *RemovableAdd = nullptr;
2007 MachineBasicBlock::iterator I(LEAMI);
2008 for (++I; &*I != JumpMI; ++I) {
2009 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
2014 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2015 const MachineOperand &MO = I->getOperand(K);
2016 if (!MO.isReg() || !MO.getReg())
2018 if (MO.isDef() && MO.getReg() == BaseReg)
2020 if (MO.isUse() && MO.getReg() == BaseReg) {
2021 BaseRegKill = BaseRegKill || MO.isKill();
2022 CanDeleteLEA = false;
2030 // Check the add really is removable, and that nothing else in the block
2031 // clobbers BaseReg.
2032 for (++I; &*I != JumpMI; ++I) {
2033 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2034 const MachineOperand &MO = I->getOperand(K);
2035 if (!MO.isReg() || !MO.getReg())
2037 if (MO.isDef() && MO.getReg() == BaseReg)
2039 if (MO.isUse() && MO.getReg() == EntryReg)
2040 RemovableAdd = nullptr;
2045 RemovableAdd->eraseFromParent();
2047 } else if (BaseReg == EntryReg) {
2048 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2053 // We reached the end of the block without seeing another definition of
2054 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2055 // used in the TBB/TBH if necessary.
2059 /// \brief Returns whether CPEMI is the first instruction in the block
2060 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2061 /// we can switch the first register to PC and usually remove the address
2062 /// calculation that preceded it.
2063 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2064 MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2065 MachineFunction *MF = MBB->getParent();
2068 return MBB != MF->end() && MBB->begin() != MBB->end() &&
2069 &*MBB->begin() == CPEMI;
2072 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2073 /// jumptables when it's possible.
2074 bool ARMConstantIslands::optimizeThumb2JumpTables() {
2075 bool MadeChange = false;
2077 // FIXME: After the tables are shrunk, can we get rid some of the
2078 // constantpool tables?
2079 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2080 if (!MJTI) return false;
2082 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2083 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2084 MachineInstr *MI = T2JumpTables[i];
2085 const MCInstrDesc &MCID = MI->getDesc();
2086 unsigned NumOps = MCID.getNumOperands();
2087 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2088 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2089 unsigned JTI = JTOP.getIndex();
2090 assert(JTI < JT.size());
2093 bool HalfWordOk = true;
2094 unsigned JTOffset = getOffsetOf(MI) + 4;
2095 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2096 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2097 MachineBasicBlock *MBB = JTBBs[j];
2098 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2099 // Negative offset is not ok. FIXME: We should change BB layout to make
2100 // sure all the branches are forward.
2101 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2103 unsigned TBHLimit = ((1<<16)-1)*2;
2104 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2106 if (!ByteOk && !HalfWordOk)
2110 if (!ByteOk && !HalfWordOk)
2113 MachineBasicBlock *MBB = MI->getParent();
2114 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2116 unsigned IdxReg = MI->getOperand(1).getReg();
2117 bool IdxRegKill = MI->getOperand(1).isKill();
2119 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2120 unsigned DeadSize = 0;
2121 bool CanDeleteLEA = false;
2122 bool BaseRegKill = false;
2123 bool PreservedBaseReg =
2124 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2126 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2129 DEBUG(dbgs() << "Shrink JT: " << *MI);
2130 MachineInstr *CPEMI = User.CPEMI;
2131 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2132 MachineBasicBlock::iterator MI_JT = MI;
2133 MachineInstr *NewJTMI =
2134 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2135 .addReg(User.MI->getOperand(0).getReg(),
2136 getKillRegState(BaseRegKill))
2137 .addReg(IdxReg, getKillRegState(IdxRegKill))
2138 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2139 .addImm(CPEMI->getOperand(0).getImm());
2140 DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
2142 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2143 CPEMI->setDesc(TII->get(JTOpc));
2145 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2146 NewJTMI->getOperand(0).setReg(ARM::PC);
2147 NewJTMI->getOperand(0).setIsKill(false);
2150 User.MI->eraseFromParent();
2153 // The LEA was eliminated, the TBB instruction becomes the only new user
2154 // of the jump table.
2158 User.IsSoImm = false;
2159 User.KnownAlignment = false;
2161 // The LEA couldn't be eliminated, so we must add another CPUser to
2162 // record the TBB or TBH use.
2163 int CPEntryIdx = JumpTableEntryIndices[JTI];
2164 auto &CPEs = CPEntries[CPEntryIdx];
2165 auto Entry = std::find_if(CPEs.begin(), CPEs.end(), [&](CPEntry &E) {
2166 return E.CPEMI == User.CPEMI;
2169 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2173 unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
2174 unsigned OrigSize = TII->GetInstSizeInBytes(MI);
2175 MI->eraseFromParent();
2177 int Delta = OrigSize - NewSize + DeadSize;
2178 BBInfo[MBB->getNumber()].Size -= Delta;
2179 adjustBBOffsetsAfter(MBB);
2188 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2189 /// jump tables always branch forwards, since that's what tbb and tbh need.
2190 bool ARMConstantIslands::reorderThumb2JumpTables() {
2191 bool MadeChange = false;
2193 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2194 if (!MJTI) return false;
2196 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2197 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2198 MachineInstr *MI = T2JumpTables[i];
2199 const MCInstrDesc &MCID = MI->getDesc();
2200 unsigned NumOps = MCID.getNumOperands();
2201 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2202 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2203 unsigned JTI = JTOP.getIndex();
2204 assert(JTI < JT.size());
2206 // We prefer if target blocks for the jump table come after the jump
2207 // instruction so we can use TB[BH]. Loop through the target blocks
2208 // and try to adjust them such that that's true.
2209 int JTNumber = MI->getParent()->getNumber();
2210 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2211 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2212 MachineBasicBlock *MBB = JTBBs[j];
2213 int DTNumber = MBB->getNumber();
2215 if (DTNumber < JTNumber) {
2216 // The destination precedes the switch. Try to move the block forward
2217 // so we have a positive offset.
2218 MachineBasicBlock *NewBB =
2219 adjustJTTargetBlockForward(MBB, MI->getParent());
2221 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2230 MachineBasicBlock *ARMConstantIslands::
2231 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2232 // If the destination block is terminated by an unconditional branch,
2233 // try to move it; otherwise, create a new block following the jump
2234 // table that branches back to the actual target. This is a very simple
2235 // heuristic. FIXME: We can definitely improve it.
2236 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2237 SmallVector<MachineOperand, 4> Cond;
2238 SmallVector<MachineOperand, 4> CondPrior;
2239 MachineFunction::iterator BBi = BB->getIterator();
2240 MachineFunction::iterator OldPrior = std::prev(BBi);
2242 // If the block terminator isn't analyzable, don't try to move the block
2243 bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
2245 // If the block ends in an unconditional branch, move it. The prior block
2246 // has to have an analyzable terminator for us to move this one. Be paranoid
2247 // and make sure we're not trying to move the entry block of the function.
2248 if (!B && Cond.empty() && BB != MF->begin() &&
2249 !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2250 BB->moveAfter(JTBB);
2251 OldPrior->updateTerminator();
2252 BB->updateTerminator();
2253 // Update numbering to account for the block being moved.
2254 MF->RenumberBlocks();
2259 // Create a new MBB for the code after the jump BB.
2260 MachineBasicBlock *NewBB =
2261 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2262 MachineFunction::iterator MBBI = ++JTBB->getIterator();
2263 MF->insert(MBBI, NewBB);
2265 // Add an unconditional branch from NewBB to BB.
2266 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2267 // correspond directly to anything in the source.
2268 assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?");
2269 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B)).addMBB(BB)
2270 .addImm(ARMCC::AL).addReg(0);
2272 // Update internal data structures to account for the newly inserted MBB.
2273 MF->RenumberBlocks(NewBB);
2276 NewBB->addSuccessor(BB);
2277 JTBB->removeSuccessor(BB);
2278 JTBB->addSuccessor(NewBB);