1 //===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that splits the constant pool up into 'islands'
11 // which are scattered through-out the function. This is required due to the
12 // limited pc-relative displacements that ARM has.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-cp-islands"
18 #include "ARMMachineFunctionInfo.h"
19 #include "Thumb2InstrInfo.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "llvm/CodeGen/MachineConstantPool.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/Format.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/ADT/SmallSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Support/CommandLine.h"
39 STATISTIC(NumCPEs, "Number of constpool entries");
40 STATISTIC(NumSplit, "Number of uncond branches inserted");
41 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
42 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
43 STATISTIC(NumTBs, "Number of table branches generated");
44 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
45 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
46 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
47 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
48 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
52 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
53 cl::desc("Adjust basic block layout to better use TB[BH]"));
55 // FIXME: This option should be removed once it has received sufficient testing.
57 AlignConstantIslands("arm-align-constant-islands", cl::Hidden, cl::init(true),
58 cl::desc("Align constant islands in code"));
60 /// UnknownPadding - Return the worst case padding that could result from
61 /// unknown offset bits. This does not include alignment padding caused by
62 /// known offset bits.
64 /// @param LogAlign log2(alignment)
65 /// @param KnownBits Number of known low offset bits.
66 static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
67 if (KnownBits < LogAlign)
68 return (1u << LogAlign) - (1u << KnownBits);
72 /// WorstCaseAlign - Assuming only the low KnownBits bits in Offset are exact,
73 /// add padding such that:
75 /// 1. The result is aligned to 1 << LogAlign.
77 /// 2. No other value of the unknown bits would require more padding.
79 /// This may add more padding than is required to satisfy just one of the
80 /// constraints. It is necessary to compute alignment this way to guarantee
81 /// that we don't underestimate the padding before an aligned block. If the
82 /// real padding before a block is larger than we think, constant pool entries
83 /// may go out of range.
84 static inline unsigned WorstCaseAlign(unsigned Offset, unsigned LogAlign,
86 // Add the worst possible padding that the unknown bits could cause.
87 Offset += UnknownPadding(LogAlign, KnownBits);
89 // Then align the result.
90 return RoundUpToAlignment(Offset, 1u << LogAlign);
94 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
95 /// requires constant pool entries to be scattered among the instructions
96 /// inside a function. To do this, it completely ignores the normal LLVM
97 /// constant pool; instead, it places constants wherever it feels like with
98 /// special instructions.
100 /// The terminology used in this pass includes:
101 /// Islands - Clumps of constants placed in the function.
102 /// Water - Potential places where an island could be formed.
103 /// CPE - A constant pool entry that has been placed somewhere, which
104 /// tracks a list of users.
105 class ARMConstantIslands : public MachineFunctionPass {
106 /// BasicBlockInfo - Information about the offset and size of a single
108 struct BasicBlockInfo {
109 /// Offset - Distance from the beginning of the function to the beginning
110 /// of this basic block.
112 /// The offset is always aligned as required by the basic block.
115 /// Size - Size of the basic block in bytes. If the block contains
116 /// inline assembly, this is a worst case estimate.
118 /// The size does not include any alignment padding whether from the
119 /// beginning of the block, or from an aligned jump table at the end.
122 /// KnownBits - The number of low bits in Offset that are known to be
123 /// exact. The remaining bits of Offset are an upper bound.
126 /// Unalign - When non-zero, the block contains instructions (inline asm)
127 /// of unknown size. The real size may be smaller than Size bytes by a
128 /// multiple of 1 << Unalign.
131 /// PostAlign - When non-zero, the block terminator contains a .align
132 /// directive, so the end of the block is aligned to 1 << PostAlign
136 BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0),
139 /// Compute the number of known offset bits internally to this block.
140 /// This number should be used to predict worst case padding when
141 /// splitting the block.
142 unsigned internalKnownBits() const {
143 return Unalign ? Unalign : KnownBits;
146 /// Compute the offset immediately following this block. If LogAlign is
147 /// specified, return the offset the successor block will get if it has
149 unsigned postOffset(unsigned LogAlign = 0) const {
150 unsigned PO = Offset + Size;
151 unsigned LA = std::max(unsigned(PostAlign), LogAlign);
154 // Add alignment padding from the terminator.
155 return WorstCaseAlign(PO, LA, internalKnownBits());
158 /// Compute the number of known low bits of postOffset. If this block
159 /// contains inline asm, the number of known bits drops to the
160 /// instruction alignment. An aligned terminator may increase the number
162 /// If LogAlign is given, also consider the alignment of the next block.
163 unsigned postKnownBits(unsigned LogAlign = 0) const {
164 return std::max(std::max(unsigned(PostAlign), LogAlign),
165 internalKnownBits());
169 std::vector<BasicBlockInfo> BBInfo;
171 /// WaterList - A sorted list of basic blocks where islands could be placed
172 /// (i.e. blocks that don't fall through to the following block, due
173 /// to a return, unreachable, or unconditional branch).
174 std::vector<MachineBasicBlock*> WaterList;
176 /// NewWaterList - The subset of WaterList that was created since the
177 /// previous iteration by inserting unconditional branches.
178 SmallSet<MachineBasicBlock*, 4> NewWaterList;
180 typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
182 /// CPUser - One user of a constant pool, keeping the machine instruction
183 /// pointer, the constant pool being referenced, and the max displacement
184 /// allowed from the instruction to the CP. The HighWaterMark records the
185 /// highest basic block where a new CPEntry can be placed. To ensure this
186 /// pass terminates, the CP entries are initially placed at the end of the
187 /// function and then move monotonically to lower addresses. The
188 /// exception to this rule is when the current CP entry for a particular
189 /// CPUser is out of range, but there is another CP entry for the same
190 /// constant value in range. We want to use the existing in-range CP
191 /// entry, but if it later moves out of range, the search for new water
192 /// should resume where it left off. The HighWaterMark is used to record
197 MachineBasicBlock *HighWaterMark;
204 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
205 bool neg, bool soimm)
206 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm),
207 KnownAlignment(false) {
208 HighWaterMark = CPEMI->getParent();
210 /// getMaxDisp - Returns the maximum displacement supported by MI.
211 /// Correct for unknown alignment.
212 unsigned getMaxDisp() const {
213 return KnownAlignment ? MaxDisp : MaxDisp - 2;
217 /// CPUsers - Keep track of all of the machine instructions that use various
218 /// constant pools and their max displacement.
219 std::vector<CPUser> CPUsers;
221 /// CPEntry - One per constant pool entry, keeping the machine instruction
222 /// pointer, the constpool index, and the number of CPUser's which
223 /// reference this entry.
228 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
229 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
232 /// CPEntries - Keep track of all of the constant pool entry machine
233 /// instructions. For each original constpool index (i.e. those that
234 /// existed upon entry to this pass), it keeps a vector of entries.
235 /// Original elements are cloned as we go along; the clones are
236 /// put in the vector of the original element, but have distinct CPIs.
237 std::vector<std::vector<CPEntry> > CPEntries;
239 /// ImmBranch - One per immediate branch, keeping the machine instruction
240 /// pointer, conditional or unconditional, the max displacement,
241 /// and (if isCond is true) the corresponding unconditional branch
245 unsigned MaxDisp : 31;
248 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr)
249 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
252 /// ImmBranches - Keep track of all the immediate branch instructions.
254 std::vector<ImmBranch> ImmBranches;
256 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
258 SmallVector<MachineInstr*, 4> PushPopMIs;
260 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
261 SmallVector<MachineInstr*, 4> T2JumpTables;
263 /// HasFarJump - True if any far jump instruction has been emitted during
264 /// the branch fix up pass.
268 MachineConstantPool *MCP;
269 const ARMBaseInstrInfo *TII;
270 const ARMSubtarget *STI;
271 ARMFunctionInfo *AFI;
277 ARMConstantIslands() : MachineFunctionPass(ID) {}
279 virtual bool runOnMachineFunction(MachineFunction &MF);
281 virtual const char *getPassName() const {
282 return "ARM constant island placement and branch shortening pass";
286 void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
287 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
288 unsigned getCPELogAlign(const MachineInstr *CPEMI);
289 void scanFunctionJumpTables();
290 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
291 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
292 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
293 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
294 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
295 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
296 bool findAvailableWater(CPUser&U, unsigned UserOffset,
297 water_iterator &WaterIter);
298 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
299 MachineBasicBlock *&NewMBB);
300 bool handleConstantPoolUser(unsigned CPUserIndex);
301 void removeDeadCPEMI(MachineInstr *CPEMI);
302 bool removeUnusedCPEntries();
303 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
304 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
305 bool DoDump = false);
306 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
307 CPUser &U, unsigned &Growth);
308 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
309 bool fixupImmediateBr(ImmBranch &Br);
310 bool fixupConditionalBr(ImmBranch &Br);
311 bool fixupUnconditionalBr(ImmBranch &Br);
312 bool undoLRSpillRestore();
313 bool mayOptimizeThumb2Instruction(const MachineInstr *MI) const;
314 bool optimizeThumb2Instructions();
315 bool optimizeThumb2Branches();
316 bool reorderThumb2JumpTables();
317 bool optimizeThumb2JumpTables();
318 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
319 MachineBasicBlock *JTBB);
321 void computeBlockSize(MachineBasicBlock *MBB);
322 unsigned getOffsetOf(MachineInstr *MI) const;
323 unsigned getUserOffset(CPUser&) const;
327 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
328 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
329 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
331 return isOffsetInRange(UserOffset, TrialOffset,
332 U.getMaxDisp(), U.NegOk, U.IsSoImm);
335 char ARMConstantIslands::ID = 0;
338 /// verify - check BBOffsets, BBSizes, alignment of islands
339 void ARMConstantIslands::verify() {
341 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
343 MachineBasicBlock *MBB = MBBI;
344 unsigned Align = MBB->getAlignment();
345 unsigned MBBId = MBB->getNumber();
346 assert(BBInfo[MBBId].Offset % (1u << Align) == 0);
347 assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
349 DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
350 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
351 CPUser &U = CPUsers[i];
352 unsigned UserOffset = getUserOffset(U);
353 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp(), U.NegOk,
354 /* DoDump = */ true)) {
355 DEBUG(dbgs() << "OK\n");
358 DEBUG(dbgs() << "Out of range.\n");
361 llvm_unreachable("Constant pool entry out of range!");
366 /// print block size and offset information - debugging
367 void ARMConstantIslands::dumpBBs() {
369 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
370 const BasicBlockInfo &BBI = BBInfo[J];
371 dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
372 << " kb=" << unsigned(BBI.KnownBits)
373 << " ua=" << unsigned(BBI.Unalign)
374 << " pa=" << unsigned(BBI.PostAlign)
375 << format(" size=%#x\n", BBInfo[J].Size);
380 /// createARMConstantIslandPass - returns an instance of the constpool
382 FunctionPass *llvm::createARMConstantIslandPass() {
383 return new ARMConstantIslands();
386 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
388 MCP = mf.getConstantPool();
390 DEBUG(dbgs() << "***** ARMConstantIslands: "
391 << MCP->getConstants().size() << " CP entries, aligned to "
392 << MCP->getConstantPoolAlignment() << " bytes *****\n");
394 TII = (const ARMBaseInstrInfo*)MF->getTarget().getInstrInfo();
395 AFI = MF->getInfo<ARMFunctionInfo>();
396 STI = &MF->getTarget().getSubtarget<ARMSubtarget>();
398 isThumb = AFI->isThumbFunction();
399 isThumb1 = AFI->isThumb1OnlyFunction();
400 isThumb2 = AFI->isThumb2Function();
404 // This pass invalidates liveness information when it splits basic blocks.
405 MF->getRegInfo().invalidateLiveness();
407 // Renumber all of the machine basic blocks in the function, guaranteeing that
408 // the numbers agree with the position of the block in the function.
409 MF->RenumberBlocks();
411 // Try to reorder and otherwise adjust the block layout to make good use
412 // of the TB[BH] instructions.
413 bool MadeChange = false;
414 if (isThumb2 && AdjustJumpTableBlocks) {
415 scanFunctionJumpTables();
416 MadeChange |= reorderThumb2JumpTables();
417 // Data is out of date, so clear it. It'll be re-computed later.
418 T2JumpTables.clear();
419 // Blocks may have shifted around. Keep the numbering up to date.
420 MF->RenumberBlocks();
423 // Thumb1 functions containing constant pools get 4-byte alignment.
424 // This is so we can keep exact track of where the alignment padding goes.
426 // ARM and Thumb2 functions need to be 4-byte aligned.
428 MF->EnsureAlignment(2); // 2 = log2(4)
430 // Perform the initial placement of the constant pool entries. To start with,
431 // we put them all at the end of the function.
432 std::vector<MachineInstr*> CPEMIs;
434 doInitialPlacement(CPEMIs);
436 /// The next UID to take is the first unused one.
437 AFI->initPICLabelUId(CPEMIs.size());
439 // Do the initial scan of the function, building up information about the
440 // sizes of each block, the location of all the water, and finding all of the
441 // constant pool users.
442 initializeFunctionInfo(CPEMIs);
447 /// Remove dead constant pool entries.
448 MadeChange |= removeUnusedCPEntries();
450 // Iteratively place constant pool entries and fix up branches until there
452 unsigned NoCPIters = 0, NoBRIters = 0;
454 DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
455 bool CPChange = false;
456 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
457 CPChange |= handleConstantPoolUser(i);
458 if (CPChange && ++NoCPIters > 30)
459 report_fatal_error("Constant Island pass failed to converge!");
462 // Clear NewWaterList now. If we split a block for branches, it should
463 // appear as "new water" for the next iteration of constant pool placement.
464 NewWaterList.clear();
466 DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
467 bool BRChange = false;
468 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
469 BRChange |= fixupImmediateBr(ImmBranches[i]);
470 if (BRChange && ++NoBRIters > 30)
471 report_fatal_error("Branch Fix Up pass failed to converge!");
474 if (!CPChange && !BRChange)
479 // Shrink 32-bit Thumb2 branch, load, and store instructions.
480 if (isThumb2 && !STI->prefers32BitThumb())
481 MadeChange |= optimizeThumb2Instructions();
483 // After a while, this might be made debug-only, but it is not expensive.
486 // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
487 // undo the spill / restore of LR if possible.
488 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
489 MadeChange |= undoLRSpillRestore();
491 // Save the mapping between original and cloned constpool entries.
492 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
493 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
494 const CPEntry & CPE = CPEntries[i][j];
495 AFI->recordCPEClone(i, CPE.CPI);
499 DEBUG(dbgs() << '\n'; dumpBBs());
507 T2JumpTables.clear();
512 /// doInitialPlacement - Perform the initial placement of the constant pool
513 /// entries. To start with, we put them all at the end of the function.
515 ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
516 // Create the basic block to hold the CPE's.
517 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
520 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
521 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
523 // Mark the basic block as required by the const-pool.
524 // If AlignConstantIslands isn't set, use 4-byte alignment for everything.
525 BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
527 // The function needs to be as aligned as the basic blocks. The linker may
528 // move functions around based on their alignment.
529 MF->EnsureAlignment(BB->getAlignment());
531 // Order the entries in BB by descending alignment. That ensures correct
532 // alignment of all entries as long as BB is sufficiently aligned. Keep
533 // track of the insertion point for each alignment. We are going to bucket
534 // sort the entries as they are created.
535 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
537 // Add all of the constants from the constant pool to the end block, use an
538 // identity mapping of CPI's to CPE's.
539 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
541 const TargetData &TD = *MF->getTarget().getTargetData();
542 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
543 unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
544 assert(Size >= 4 && "Too small constant pool entry");
545 unsigned Align = CPs[i].getAlignment();
546 assert(isPowerOf2_32(Align) && "Invalid alignment");
547 // Verify that all constant pool entries are a multiple of their alignment.
548 // If not, we would have to pad them out so that instructions stay aligned.
549 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
551 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
552 unsigned LogAlign = Log2_32(Align);
553 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
554 MachineInstr *CPEMI =
555 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
556 .addImm(i).addConstantPoolIndex(i).addImm(Size);
557 CPEMIs.push_back(CPEMI);
559 // Ensure that future entries with higher alignment get inserted before
560 // CPEMI. This is bucket sort with iterators.
561 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
562 if (InsPoint[a] == InsAt)
565 // Add a new CPEntry, but no corresponding CPUser yet.
566 std::vector<CPEntry> CPEs;
567 CPEs.push_back(CPEntry(CPEMI, i));
568 CPEntries.push_back(CPEs);
570 DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
571 << Size << ", align = " << Align <<'\n');
576 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
577 /// into the block immediately after it.
578 static bool BBHasFallthrough(MachineBasicBlock *MBB) {
579 // Get the next machine basic block in the function.
580 MachineFunction::iterator MBBI = MBB;
581 // Can't fall off end of function.
582 if (llvm::next(MBBI) == MBB->getParent()->end())
585 MachineBasicBlock *NextBB = llvm::next(MBBI);
586 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
587 E = MBB->succ_end(); I != E; ++I)
594 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
595 /// look up the corresponding CPEntry.
596 ARMConstantIslands::CPEntry
597 *ARMConstantIslands::findConstPoolEntry(unsigned CPI,
598 const MachineInstr *CPEMI) {
599 std::vector<CPEntry> &CPEs = CPEntries[CPI];
600 // Number of entries per constpool index should be small, just do a
602 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
603 if (CPEs[i].CPEMI == CPEMI)
609 /// getCPELogAlign - Returns the required alignment of the constant pool entry
610 /// represented by CPEMI. Alignment is measured in log2(bytes) units.
611 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
612 assert(CPEMI && CPEMI->getOpcode() == ARM::CONSTPOOL_ENTRY);
614 // Everything is 4-byte aligned unless AlignConstantIslands is set.
615 if (!AlignConstantIslands)
618 unsigned CPI = CPEMI->getOperand(1).getIndex();
619 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
620 unsigned Align = MCP->getConstants()[CPI].getAlignment();
621 assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
622 return Log2_32(Align);
625 /// scanFunctionJumpTables - Do a scan of the function, building up
626 /// information about the sizes of each block and the locations of all
628 void ARMConstantIslands::scanFunctionJumpTables() {
629 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
631 MachineBasicBlock &MBB = *MBBI;
633 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
635 if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT)
636 T2JumpTables.push_back(I);
640 /// initializeFunctionInfo - Do the initial scan of the function, building up
641 /// information about the sizes of each block, the location of all the water,
642 /// and finding all of the constant pool users.
643 void ARMConstantIslands::
644 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
646 BBInfo.resize(MF->getNumBlockIDs());
648 // First thing, compute the size of all basic blocks, and see if the function
649 // has any inline assembly in it. If so, we have to be conservative about
650 // alignment assumptions, as we don't know for sure the size of any
651 // instructions in the inline assembly.
652 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
655 // The known bits of the entry block offset are determined by the function
657 BBInfo.front().KnownBits = MF->getAlignment();
659 // Compute block offsets and known bits.
660 adjustBBOffsetsAfter(MF->begin());
662 // Now go back through the instructions and build up our data structures.
663 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
665 MachineBasicBlock &MBB = *MBBI;
667 // If this block doesn't fall through into the next MBB, then this is
668 // 'water' that a constant pool island could be placed.
669 if (!BBHasFallthrough(&MBB))
670 WaterList.push_back(&MBB);
672 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
674 if (I->isDebugValue())
677 int Opc = I->getOpcode();
685 continue; // Ignore other JT branches
687 T2JumpTables.push_back(I);
688 continue; // Does not get an entry in ImmBranches
719 // Record this immediate branch.
720 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
721 ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
724 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
725 PushPopMIs.push_back(I);
727 if (Opc == ARM::CONSTPOOL_ENTRY)
730 // Scan the instructions for constant pool operands.
731 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
732 if (I->getOperand(op).isCPI()) {
733 // We found one. The addressing mode tells us the max displacement
734 // from the PC that this instruction permits.
736 // Basic size info comes from the TSFlags field.
740 bool IsSoImm = false;
744 llvm_unreachable("Unknown addressing mode for CP reference!");
746 // Taking the address of a CP entry.
748 // This takes a SoImm, which is 8 bit immediate rotated. We'll
749 // pretend the maximum offset is 255 * 4. Since each instruction
750 // 4 byte wide, this is always correct. We'll check for other
751 // displacements that fits in a SoImm as well.
757 case ARM::t2LEApcrel:
769 Bits = 12; // +-offset_12
775 Scale = 4; // +(offset_8*4)
781 Scale = 4; // +-(offset_8*4)
786 // Remember that this is a user of a CP entry.
787 unsigned CPI = I->getOperand(op).getIndex();
788 MachineInstr *CPEMI = CPEMIs[CPI];
789 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
790 CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
792 // Increment corresponding CPEntry reference count.
793 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
794 assert(CPE && "Cannot find a corresponding CPEntry!");
797 // Instructions can only use one CP entry, don't bother scanning the
798 // rest of the operands.
805 /// computeBlockSize - Compute the size and some alignment information for MBB.
806 /// This function updates BBInfo directly.
807 void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
808 BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
813 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
815 BBI.Size += TII->GetInstSizeInBytes(I);
816 // For inline asm, GetInstSizeInBytes returns a conservative estimate.
817 // The actual size may be smaller, but still a multiple of the instr size.
818 if (I->isInlineAsm())
819 BBI.Unalign = isThumb ? 1 : 2;
820 // Also consider instructions that may be shrunk later.
821 else if (isThumb && mayOptimizeThumb2Instruction(I))
825 // tBR_JTr contains a .align 2 directive.
826 if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
828 MBB->getParent()->EnsureAlignment(2);
832 /// getOffsetOf - Return the current offset of the specified machine instruction
833 /// from the start of the function. This offset changes as stuff is moved
834 /// around inside the function.
835 unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
836 MachineBasicBlock *MBB = MI->getParent();
838 // The offset is composed of two things: the sum of the sizes of all MBB's
839 // before this instruction's block, and the offset from the start of the block
841 unsigned Offset = BBInfo[MBB->getNumber()].Offset;
843 // Sum instructions before MI in MBB.
844 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
845 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
846 Offset += TII->GetInstSizeInBytes(I);
851 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
853 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
854 const MachineBasicBlock *RHS) {
855 return LHS->getNumber() < RHS->getNumber();
858 /// updateForInsertedWaterBlock - When a block is newly inserted into the
859 /// machine function, it upsets all of the block numbers. Renumber the blocks
860 /// and update the arrays that parallel this numbering.
861 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
862 // Renumber the MBB's to keep them consecutive.
863 NewBB->getParent()->RenumberBlocks(NewBB);
865 // Insert an entry into BBInfo to align it properly with the (newly
866 // renumbered) block numbers.
867 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
869 // Next, update WaterList. Specifically, we need to add NewMBB as having
870 // available water after it.
872 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
874 WaterList.insert(IP, NewBB);
878 /// Split the basic block containing MI into two blocks, which are joined by
879 /// an unconditional branch. Update data structures and renumber blocks to
880 /// account for this change and returns the newly created block.
881 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
882 MachineBasicBlock *OrigBB = MI->getParent();
884 // Create a new MBB for the code after the OrigBB.
885 MachineBasicBlock *NewBB =
886 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
887 MachineFunction::iterator MBBI = OrigBB; ++MBBI;
888 MF->insert(MBBI, NewBB);
890 // Splice the instructions starting with MI over to NewBB.
891 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
893 // Add an unconditional branch from OrigBB to NewBB.
894 // Note the new unconditional branch is not being recorded.
895 // There doesn't seem to be meaningful DebugInfo available; this doesn't
896 // correspond to anything in the source.
897 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
899 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
901 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB)
902 .addImm(ARMCC::AL).addReg(0);
905 // Update the CFG. All succs of OrigBB are now succs of NewBB.
906 NewBB->transferSuccessors(OrigBB);
908 // OrigBB branches to NewBB.
909 OrigBB->addSuccessor(NewBB);
911 // Update internal data structures to account for the newly inserted MBB.
912 // This is almost the same as updateForInsertedWaterBlock, except that
913 // the Water goes after OrigBB, not NewBB.
914 MF->RenumberBlocks(NewBB);
916 // Insert an entry into BBInfo to align it properly with the (newly
917 // renumbered) block numbers.
918 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
920 // Next, update WaterList. Specifically, we need to add OrigMBB as having
921 // available water after it (but not if it's already there, which happens
922 // when splitting before a conditional branch that is followed by an
923 // unconditional branch - in that case we want to insert NewBB).
925 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
927 MachineBasicBlock* WaterBB = *IP;
928 if (WaterBB == OrigBB)
929 WaterList.insert(llvm::next(IP), NewBB);
931 WaterList.insert(IP, OrigBB);
932 NewWaterList.insert(OrigBB);
934 // Figure out how large the OrigBB is. As the first half of the original
935 // block, it cannot contain a tablejump. The size includes
936 // the new jump we added. (It should be possible to do this without
937 // recounting everything, but it's very confusing, and this is rarely
939 computeBlockSize(OrigBB);
941 // Figure out how large the NewMBB is. As the second half of the original
942 // block, it may contain a tablejump.
943 computeBlockSize(NewBB);
945 // All BBOffsets following these blocks must be modified.
946 adjustBBOffsetsAfter(OrigBB);
951 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
952 /// displacement computation. Update U.KnownAlignment to match its current
953 /// basic block location.
954 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
955 unsigned UserOffset = getOffsetOf(U.MI);
956 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
957 unsigned KnownBits = BBI.internalKnownBits();
959 // The value read from PC is offset from the actual instruction address.
960 UserOffset += (isThumb ? 4 : 8);
962 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
963 // Make sure U.getMaxDisp() returns a constrained range.
964 U.KnownAlignment = (KnownBits >= 2);
966 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
967 // purposes of the displacement computation; compensate for that here.
968 // For unknown alignments, getMaxDisp() constrains the range instead.
969 if (isThumb && U.KnownAlignment)
975 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
976 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
977 /// constant pool entry).
978 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
979 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
980 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
981 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
982 unsigned TrialOffset, unsigned MaxDisp,
983 bool NegativeOK, bool IsSoImm) {
984 if (UserOffset <= TrialOffset) {
985 // User before the Trial.
986 if (TrialOffset - UserOffset <= MaxDisp)
988 // FIXME: Make use full range of soimm values.
989 } else if (NegativeOK) {
990 if (UserOffset - TrialOffset <= MaxDisp)
992 // FIXME: Make use full range of soimm values.
997 /// isWaterInRange - Returns true if a CPE placed after the specified
998 /// Water (a basic block) will be in range for the specific MI.
1000 /// Compute how much the function will grow by inserting a CPE after Water.
1001 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1002 MachineBasicBlock* Water, CPUser &U,
1004 unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
1005 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
1006 unsigned NextBlockOffset, NextBlockAlignment;
1007 MachineFunction::const_iterator NextBlock = Water;
1008 if (++NextBlock == MF->end()) {
1009 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1010 NextBlockAlignment = 0;
1012 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1013 NextBlockAlignment = NextBlock->getAlignment();
1015 unsigned Size = U.CPEMI->getOperand(2).getImm();
1016 unsigned CPEEnd = CPEOffset + Size;
1018 // The CPE may be able to hide in the alignment padding before the next
1019 // block. It may also cause more padding to be required if it is more aligned
1020 // that the next block.
1021 if (CPEEnd > NextBlockOffset) {
1022 Growth = CPEEnd - NextBlockOffset;
1023 // Compute the padding that would go at the end of the CPE to align the next
1025 Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
1027 // If the CPE is to be inserted before the instruction, that will raise
1028 // the offset of the instruction. Also account for unknown alignment padding
1029 // in blocks between CPE and the user.
1030 if (CPEOffset < UserOffset)
1031 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
1033 // CPE fits in existing padding.
1036 return isOffsetInRange(UserOffset, CPEOffset, U);
1039 /// isCPEntryInRange - Returns true if the distance between specific MI and
1040 /// specific ConstPool entry instruction can fit in MI's displacement field.
1041 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1042 MachineInstr *CPEMI, unsigned MaxDisp,
1043 bool NegOk, bool DoDump) {
1044 unsigned CPEOffset = getOffsetOf(CPEMI);
1045 assert(CPEOffset % 4 == 0 && "Misaligned CPE");
1049 unsigned Block = MI->getParent()->getNumber();
1050 const BasicBlockInfo &BBI = BBInfo[Block];
1051 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1052 << " max delta=" << MaxDisp
1053 << format(" insn address=%#x", UserOffset)
1054 << " in BB#" << Block << ": "
1055 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1056 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1057 int(CPEOffset-UserOffset));
1061 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1065 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1066 /// unconditionally branches to its only successor.
1067 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1068 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1071 MachineBasicBlock *Succ = *MBB->succ_begin();
1072 MachineBasicBlock *Pred = *MBB->pred_begin();
1073 MachineInstr *PredMI = &Pred->back();
1074 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1075 || PredMI->getOpcode() == ARM::t2B)
1076 return PredMI->getOperand(0).getMBB() == Succ;
1081 void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
1082 unsigned BBNum = BB->getNumber();
1083 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
1084 // Get the offset and known bits at the end of the layout predecessor.
1085 // Include the alignment of the current block.
1086 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
1087 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
1088 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
1090 // This is where block i begins. Stop if the offset is already correct,
1091 // and we have updated 2 blocks. This is the maximum number of blocks
1092 // changed before calling this function.
1093 if (i > BBNum + 2 &&
1094 BBInfo[i].Offset == Offset &&
1095 BBInfo[i].KnownBits == KnownBits)
1098 BBInfo[i].Offset = Offset;
1099 BBInfo[i].KnownBits = KnownBits;
1103 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1104 /// and instruction CPEMI, and decrement its refcount. If the refcount
1105 /// becomes 0 remove the entry and instruction. Returns true if we removed
1106 /// the entry, false if we didn't.
1108 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1109 MachineInstr *CPEMI) {
1110 // Find the old entry. Eliminate it if it is no longer used.
1111 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1112 assert(CPE && "Unexpected!");
1113 if (--CPE->RefCount == 0) {
1114 removeDeadCPEMI(CPEMI);
1122 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1123 /// if not, see if an in-range clone of the CPE is in range, and if so,
1124 /// change the data structures so the user references the clone. Returns:
1125 /// 0 = no existing entry found
1126 /// 1 = entry found, and there were no code insertions or deletions
1127 /// 2 = entry found, and there were code insertions or deletions
1128 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
1130 MachineInstr *UserMI = U.MI;
1131 MachineInstr *CPEMI = U.CPEMI;
1133 // Check to see if the CPE is already in-range.
1134 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1136 DEBUG(dbgs() << "In range\n");
1140 // No. Look for previously created clones of the CPE that are in range.
1141 unsigned CPI = CPEMI->getOperand(1).getIndex();
1142 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1143 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1144 // We already tried this one
1145 if (CPEs[i].CPEMI == CPEMI)
1147 // Removing CPEs can leave empty entries, skip
1148 if (CPEs[i].CPEMI == NULL)
1150 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1152 DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1153 << CPEs[i].CPI << "\n");
1154 // Point the CPUser node to the replacement
1155 U.CPEMI = CPEs[i].CPEMI;
1156 // Change the CPI in the instruction operand to refer to the clone.
1157 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1158 if (UserMI->getOperand(j).isCPI()) {
1159 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1162 // Adjust the refcount of the clone...
1164 // ...and the original. If we didn't remove the old entry, none of the
1165 // addresses changed, so we don't need another pass.
1166 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1172 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1173 /// the specific unconditional branch instruction.
1174 static inline unsigned getUnconditionalBrDisp(int Opc) {
1177 return ((1<<10)-1)*2;
1179 return ((1<<23)-1)*2;
1184 return ((1<<23)-1)*4;
1187 /// findAvailableWater - Look for an existing entry in the WaterList in which
1188 /// we can place the CPE referenced from U so it's within range of U's MI.
1189 /// Returns true if found, false if not. If it returns true, WaterIter
1190 /// is set to the WaterList entry. For Thumb, prefer water that will not
1191 /// introduce padding to water that will. To ensure that this pass
1192 /// terminates, the CPE location for a particular CPUser is only allowed to
1193 /// move to a lower address, so search backward from the end of the list and
1194 /// prefer the first water that is in range.
1195 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1196 water_iterator &WaterIter) {
1197 if (WaterList.empty())
1200 unsigned BestGrowth = ~0u;
1201 for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
1203 MachineBasicBlock* WaterBB = *IP;
1204 // Check if water is in range and is either at a lower address than the
1205 // current "high water mark" or a new water block that was created since
1206 // the previous iteration by inserting an unconditional branch. In the
1207 // latter case, we want to allow resetting the high water mark back to
1208 // this new water since we haven't seen it before. Inserting branches
1209 // should be relatively uncommon and when it does happen, we want to be
1210 // sure to take advantage of it for all the CPEs near that block, so that
1211 // we don't insert more branches than necessary.
1213 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1214 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1215 NewWaterList.count(WaterBB)) && Growth < BestGrowth) {
1216 // This is the least amount of required padding seen so far.
1217 BestGrowth = Growth;
1219 DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
1220 << " Growth=" << Growth << '\n');
1222 // Keep looking unless it is perfect.
1223 if (BestGrowth == 0)
1229 return BestGrowth != ~0u;
1232 /// createNewWater - No existing WaterList entry will work for
1233 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1234 /// block is used if in range, and the conditional branch munged so control
1235 /// flow is correct. Otherwise the block is split to create a hole with an
1236 /// unconditional branch around it. In either case NewMBB is set to a
1237 /// block following which the new island can be inserted (the WaterList
1238 /// is not adjusted).
1239 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1240 unsigned UserOffset,
1241 MachineBasicBlock *&NewMBB) {
1242 CPUser &U = CPUsers[CPUserIndex];
1243 MachineInstr *UserMI = U.MI;
1244 MachineInstr *CPEMI = U.CPEMI;
1245 unsigned CPELogAlign = getCPELogAlign(CPEMI);
1246 MachineBasicBlock *UserMBB = UserMI->getParent();
1247 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1249 // If the block does not end in an unconditional branch already, and if the
1250 // end of the block is within range, make new water there. (The addition
1251 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1252 // Thumb2, 2 on Thumb1.
1253 if (BBHasFallthrough(UserMBB)) {
1254 // Size of branch to insert.
1255 unsigned Delta = isThumb1 ? 2 : 4;
1256 // End of UserBlock after adding a branch.
1257 unsigned UserBlockEnd = UserBBI.postOffset() + Delta;
1258 // Compute the offset where the CPE will begin.
1259 unsigned CPEOffset = WorstCaseAlign(UserBlockEnd, CPELogAlign,
1260 UserBBI.postKnownBits());
1262 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1263 DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
1264 << format(", expected CPE offset %#x\n", CPEOffset));
1265 NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
1266 // Add an unconditional branch from UserMBB to fallthrough block. Record
1267 // it for branch lengthening; this new branch will not get out of range,
1268 // but if the preceding conditional branch is out of range, the targets
1269 // will be exchanged, and the altered branch may be out of range, so the
1270 // machinery has to know about it.
1271 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1273 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1275 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB)
1276 .addImm(ARMCC::AL).addReg(0);
1277 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1278 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1279 MaxDisp, false, UncondBr));
1280 BBInfo[UserMBB->getNumber()].Size += Delta;
1281 adjustBBOffsetsAfter(UserMBB);
1286 // What a big block. Find a place within the block to split it. This is a
1287 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1288 // entries are 4 bytes: if instruction I references island CPE, and
1289 // instruction I+1 references CPE', it will not work well to put CPE as far
1290 // forward as possible, since then CPE' cannot immediately follow it (that
1291 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1292 // need to create a new island. So, we make a first guess, then walk through
1293 // the instructions between the one currently being looked at and the
1294 // possible insertion point, and make sure any other instructions that
1295 // reference CPEs will be able to use the same island area; if not, we back
1296 // up the insertion point.
1298 // Try to split the block so it's fully aligned. Compute the latest split
1299 // point where we can add a 4-byte branch instruction, and then
1300 // WorstCaseAlign to LogAlign.
1301 unsigned LogAlign = MF->getAlignment();
1302 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
1303 unsigned KnownBits = UserBBI.internalKnownBits();
1304 unsigned UPad = UnknownPadding(LogAlign, KnownBits);
1305 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
1306 DEBUG(dbgs() << format("Split in middle of big block before %#x",
1309 // Account for alignment and unknown padding.
1310 BaseInsertOffset &= ~((1u << LogAlign) - 1);
1311 BaseInsertOffset -= UPad;
1313 // The 4 in the following is for the unconditional branch we'll be inserting
1314 // (allows for long branch on Thumb1). Alignment of the island is handled
1315 // inside isOffsetInRange.
1316 BaseInsertOffset -= 4;
1318 DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1319 << " la=" << LogAlign
1320 << " kb=" << KnownBits
1321 << " up=" << UPad << '\n');
1323 // This could point off the end of the block if we've already got constant
1324 // pool entries following this block; only the last one is in the water list.
1325 // Back past any possible branches (allow for a conditional and a maximally
1326 // long unconditional).
1327 if (BaseInsertOffset >= BBInfo[UserMBB->getNumber()+1].Offset)
1328 BaseInsertOffset = BBInfo[UserMBB->getNumber()+1].Offset -
1330 unsigned EndInsertOffset =
1331 WorstCaseAlign(BaseInsertOffset + 4, LogAlign, KnownBits) +
1332 CPEMI->getOperand(2).getImm();
1333 MachineBasicBlock::iterator MI = UserMI;
1335 unsigned CPUIndex = CPUserIndex+1;
1336 unsigned NumCPUsers = CPUsers.size();
1337 MachineInstr *LastIT = 0;
1338 for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
1339 Offset < BaseInsertOffset;
1340 Offset += TII->GetInstSizeInBytes(MI),
1341 MI = llvm::next(MI)) {
1342 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
1343 CPUser &U = CPUsers[CPUIndex];
1344 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1345 // Shift intertion point by one unit of alignment so it is within reach.
1346 BaseInsertOffset -= 1u << LogAlign;
1347 EndInsertOffset -= 1u << LogAlign;
1349 // This is overly conservative, as we don't account for CPEMIs being
1350 // reused within the block, but it doesn't matter much. Also assume CPEs
1351 // are added in order with alignment padding. We may eventually be able
1352 // to pack the aligned CPEs better.
1353 EndInsertOffset = RoundUpToAlignment(EndInsertOffset,
1354 1u << getCPELogAlign(U.CPEMI)) +
1355 U.CPEMI->getOperand(2).getImm();
1359 // Remember the last IT instruction.
1360 if (MI->getOpcode() == ARM::t2IT)
1366 // Avoid splitting an IT block.
1368 unsigned PredReg = 0;
1369 ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg);
1370 if (CC != ARMCC::AL)
1373 NewMBB = splitBlockBeforeInstr(MI);
1376 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1377 /// is out-of-range. If so, pick up the constant pool value and move it some
1378 /// place in-range. Return true if we changed any addresses (thus must run
1379 /// another pass of branch lengthening), false otherwise.
1380 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
1381 CPUser &U = CPUsers[CPUserIndex];
1382 MachineInstr *UserMI = U.MI;
1383 MachineInstr *CPEMI = U.CPEMI;
1384 unsigned CPI = CPEMI->getOperand(1).getIndex();
1385 unsigned Size = CPEMI->getOperand(2).getImm();
1386 // Compute this only once, it's expensive.
1387 unsigned UserOffset = getUserOffset(U);
1389 // See if the current entry is within range, or there is a clone of it
1391 int result = findInRangeCPEntry(U, UserOffset);
1392 if (result==1) return false;
1393 else if (result==2) return true;
1395 // No existing clone of this CPE is within range.
1396 // We will be generating a new clone. Get a UID for it.
1397 unsigned ID = AFI->createPICLabelUId();
1399 // Look for water where we can place this CPE.
1400 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1401 MachineBasicBlock *NewMBB;
1403 if (findAvailableWater(U, UserOffset, IP)) {
1404 DEBUG(dbgs() << "Found water in range\n");
1405 MachineBasicBlock *WaterBB = *IP;
1407 // If the original WaterList entry was "new water" on this iteration,
1408 // propagate that to the new island. This is just keeping NewWaterList
1409 // updated to match the WaterList, which will be updated below.
1410 if (NewWaterList.count(WaterBB)) {
1411 NewWaterList.erase(WaterBB);
1412 NewWaterList.insert(NewIsland);
1414 // The new CPE goes before the following block (NewMBB).
1415 NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
1419 DEBUG(dbgs() << "No water found\n");
1420 createNewWater(CPUserIndex, UserOffset, NewMBB);
1422 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1423 // called while handling branches so that the water will be seen on the
1424 // next iteration for constant pools, but in this context, we don't want
1425 // it. Check for this so it will be removed from the WaterList.
1426 // Also remove any entry from NewWaterList.
1427 MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
1428 IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
1429 if (IP != WaterList.end())
1430 NewWaterList.erase(WaterBB);
1432 // We are adding new water. Update NewWaterList.
1433 NewWaterList.insert(NewIsland);
1436 // Remove the original WaterList entry; we want subsequent insertions in
1437 // this vicinity to go after the one we're about to insert. This
1438 // considerably reduces the number of times we have to move the same CPE
1439 // more than once and is also important to ensure the algorithm terminates.
1440 if (IP != WaterList.end())
1441 WaterList.erase(IP);
1443 // Okay, we know we can put an island before NewMBB now, do it!
1444 MF->insert(NewMBB, NewIsland);
1446 // Update internal data structures to account for the newly inserted MBB.
1447 updateForInsertedWaterBlock(NewIsland);
1449 // Decrement the old entry, and remove it if refcount becomes 0.
1450 decrementCPEReferenceCount(CPI, CPEMI);
1452 // Now that we have an island to add the CPE to, clone the original CPE and
1453 // add it to the island.
1454 U.HighWaterMark = NewIsland;
1455 U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
1456 .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
1457 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1460 // Mark the basic block as aligned as required by the const-pool entry.
1461 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
1463 // Increase the size of the island block to account for the new entry.
1464 BBInfo[NewIsland->getNumber()].Size += Size;
1465 adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
1467 // Finally, change the CPI in the instruction operand to be ID.
1468 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1469 if (UserMI->getOperand(i).isCPI()) {
1470 UserMI->getOperand(i).setIndex(ID);
1474 DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1475 << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
1480 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1481 /// sizes and offsets of impacted basic blocks.
1482 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1483 MachineBasicBlock *CPEBB = CPEMI->getParent();
1484 unsigned Size = CPEMI->getOperand(2).getImm();
1485 CPEMI->eraseFromParent();
1486 BBInfo[CPEBB->getNumber()].Size -= Size;
1487 // All succeeding offsets have the current size value added in, fix this.
1488 if (CPEBB->empty()) {
1489 BBInfo[CPEBB->getNumber()].Size = 0;
1491 // This block no longer needs to be aligned. <rdar://problem/10534709>.
1492 CPEBB->setAlignment(0);
1494 // Entries are sorted by descending alignment, so realign from the front.
1495 CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
1497 adjustBBOffsetsAfter(CPEBB);
1498 // An island has only one predecessor BB and one successor BB. Check if
1499 // this BB's predecessor jumps directly to this BB's successor. This
1500 // shouldn't happen currently.
1501 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1502 // FIXME: remove the empty blocks after all the work is done?
1505 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1507 bool ARMConstantIslands::removeUnusedCPEntries() {
1508 unsigned MadeChange = false;
1509 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1510 std::vector<CPEntry> &CPEs = CPEntries[i];
1511 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1512 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1513 removeDeadCPEMI(CPEs[j].CPEMI);
1514 CPEs[j].CPEMI = NULL;
1522 /// isBBInRange - Returns true if the distance between specific MI and
1523 /// specific BB can fit in MI's displacement field.
1524 bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
1526 unsigned PCAdj = isThumb ? 4 : 8;
1527 unsigned BrOffset = getOffsetOf(MI) + PCAdj;
1528 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1530 DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
1531 << " from BB#" << MI->getParent()->getNumber()
1532 << " max delta=" << MaxDisp
1533 << " from " << getOffsetOf(MI) << " to " << DestOffset
1534 << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
1536 if (BrOffset <= DestOffset) {
1537 // Branch before the Dest.
1538 if (DestOffset-BrOffset <= MaxDisp)
1541 if (BrOffset-DestOffset <= MaxDisp)
1547 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1548 /// away to fit in its displacement field.
1549 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1550 MachineInstr *MI = Br.MI;
1551 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1553 // Check to see if the DestBB is already in-range.
1554 if (isBBInRange(MI, DestBB, Br.MaxDisp))
1558 return fixupUnconditionalBr(Br);
1559 return fixupConditionalBr(Br);
1562 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1563 /// too far away to fit in its displacement field. If the LR register has been
1564 /// spilled in the epilogue, then we can use BL to implement a far jump.
1565 /// Otherwise, add an intermediate branch instruction to a branch.
1567 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1568 MachineInstr *MI = Br.MI;
1569 MachineBasicBlock *MBB = MI->getParent();
1571 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1573 // Use BL to implement far jump.
1574 Br.MaxDisp = (1 << 21) * 2;
1575 MI->setDesc(TII->get(ARM::tBfar));
1576 BBInfo[MBB->getNumber()].Size += 2;
1577 adjustBBOffsetsAfter(MBB);
1581 DEBUG(dbgs() << " Changed B to long jump " << *MI);
1586 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1587 /// far away to fit in its displacement field. It is converted to an inverse
1588 /// conditional branch + an unconditional branch to the destination.
1590 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1591 MachineInstr *MI = Br.MI;
1592 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1594 // Add an unconditional branch to the destination and invert the branch
1595 // condition to jump over it:
1601 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1602 CC = ARMCC::getOppositeCondition(CC);
1603 unsigned CCReg = MI->getOperand(2).getReg();
1605 // If the branch is at the end of its MBB and that has a fall-through block,
1606 // direct the updated conditional branch to the fall-through block. Otherwise,
1607 // split the MBB before the next instruction.
1608 MachineBasicBlock *MBB = MI->getParent();
1609 MachineInstr *BMI = &MBB->back();
1610 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1614 if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
1615 BMI->getOpcode() == Br.UncondBr) {
1616 // Last MI in the BB is an unconditional branch. Can we simply invert the
1617 // condition and swap destinations:
1623 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1624 if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
1625 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
1627 BMI->getOperand(0).setMBB(DestBB);
1628 MI->getOperand(0).setMBB(NewDest);
1629 MI->getOperand(1).setImm(CC);
1636 splitBlockBeforeInstr(MI);
1637 // No need for the branch to the next block. We're adding an unconditional
1638 // branch to the destination.
1639 int delta = TII->GetInstSizeInBytes(&MBB->back());
1640 BBInfo[MBB->getNumber()].Size -= delta;
1641 MBB->back().eraseFromParent();
1642 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1644 MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
1646 DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
1647 << " also invert condition and change dest. to BB#"
1648 << NextBB->getNumber() << "\n");
1650 // Insert a new conditional branch and a new unconditional branch.
1651 // Also update the ImmBranch as well as adding a new entry for the new branch.
1652 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1653 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1654 Br.MI = &MBB->back();
1655 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
1657 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB)
1658 .addImm(ARMCC::AL).addReg(0);
1660 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1661 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
1662 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1663 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1665 // Remove the old conditional branch. It may or may not still be in MBB.
1666 BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
1667 MI->eraseFromParent();
1668 adjustBBOffsetsAfter(MBB);
1672 /// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
1673 /// LR / restores LR to pc. FIXME: This is done here because it's only possible
1674 /// to do this if tBfar is not used.
1675 bool ARMConstantIslands::undoLRSpillRestore() {
1676 bool MadeChange = false;
1677 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
1678 MachineInstr *MI = PushPopMIs[i];
1679 // First two operands are predicates.
1680 if (MI->getOpcode() == ARM::tPOP_RET &&
1681 MI->getOperand(2).getReg() == ARM::PC &&
1682 MI->getNumExplicitOperands() == 3) {
1683 // Create the new insn and copy the predicate from the old.
1684 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET))
1685 .addOperand(MI->getOperand(0))
1686 .addOperand(MI->getOperand(1));
1687 MI->eraseFromParent();
1694 // mayOptimizeThumb2Instruction - Returns true if optimizeThumb2Instructions
1695 // below may shrink MI.
1697 ARMConstantIslands::mayOptimizeThumb2Instruction(const MachineInstr *MI) const {
1698 switch(MI->getOpcode()) {
1699 // optimizeThumb2Instructions.
1700 case ARM::t2LEApcrel:
1702 // optimizeThumb2Branches.
1706 // optimizeThumb2JumpTables.
1713 bool ARMConstantIslands::optimizeThumb2Instructions() {
1714 bool MadeChange = false;
1716 // Shrink ADR and LDR from constantpool.
1717 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1718 CPUser &U = CPUsers[i];
1719 unsigned Opcode = U.MI->getOpcode();
1720 unsigned NewOpc = 0;
1725 case ARM::t2LEApcrel:
1726 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1727 NewOpc = ARM::tLEApcrel;
1733 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1734 NewOpc = ARM::tLDRpci;
1744 unsigned UserOffset = getUserOffset(U);
1745 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1747 // Be conservative with inline asm.
1748 if (!U.KnownAlignment)
1751 // FIXME: Check if offset is multiple of scale if scale is not 4.
1752 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1753 DEBUG(dbgs() << "Shrink: " << *U.MI);
1754 U.MI->setDesc(TII->get(NewOpc));
1755 MachineBasicBlock *MBB = U.MI->getParent();
1756 BBInfo[MBB->getNumber()].Size -= 2;
1757 adjustBBOffsetsAfter(MBB);
1763 MadeChange |= optimizeThumb2Branches();
1764 MadeChange |= optimizeThumb2JumpTables();
1768 bool ARMConstantIslands::optimizeThumb2Branches() {
1769 bool MadeChange = false;
1771 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) {
1772 ImmBranch &Br = ImmBranches[i];
1773 unsigned Opcode = Br.MI->getOpcode();
1774 unsigned NewOpc = 0;
1792 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1793 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1794 if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
1795 DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1796 Br.MI->setDesc(TII->get(NewOpc));
1797 MachineBasicBlock *MBB = Br.MI->getParent();
1798 BBInfo[MBB->getNumber()].Size -= 2;
1799 adjustBBOffsetsAfter(MBB);
1805 Opcode = Br.MI->getOpcode();
1806 if (Opcode != ARM::tBcc)
1809 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1810 // so this transformation is not safe.
1811 if (!Br.MI->killsRegister(ARM::CPSR))
1815 unsigned PredReg = 0;
1816 ARMCC::CondCodes Pred = getInstrPredicate(Br.MI, PredReg);
1817 if (Pred == ARMCC::EQ)
1819 else if (Pred == ARMCC::NE)
1820 NewOpc = ARM::tCBNZ;
1823 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1824 // Check if the distance is within 126. Subtract starting offset by 2
1825 // because the cmp will be eliminated.
1826 unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2;
1827 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1828 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
1829 MachineBasicBlock::iterator CmpMI = Br.MI;
1830 if (CmpMI != Br.MI->getParent()->begin()) {
1832 if (CmpMI->getOpcode() == ARM::tCMPi8) {
1833 unsigned Reg = CmpMI->getOperand(0).getReg();
1834 Pred = getInstrPredicate(CmpMI, PredReg);
1835 if (Pred == ARMCC::AL &&
1836 CmpMI->getOperand(1).getImm() == 0 &&
1837 isARMLowRegister(Reg)) {
1838 MachineBasicBlock *MBB = Br.MI->getParent();
1839 DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
1840 MachineInstr *NewBR =
1841 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
1842 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
1843 CmpMI->eraseFromParent();
1844 Br.MI->eraseFromParent();
1846 BBInfo[MBB->getNumber()].Size -= 2;
1847 adjustBBOffsetsAfter(MBB);
1859 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
1860 /// jumptables when it's possible.
1861 bool ARMConstantIslands::optimizeThumb2JumpTables() {
1862 bool MadeChange = false;
1864 // FIXME: After the tables are shrunk, can we get rid some of the
1865 // constantpool tables?
1866 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1867 if (MJTI == 0) return false;
1869 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1870 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
1871 MachineInstr *MI = T2JumpTables[i];
1872 const MCInstrDesc &MCID = MI->getDesc();
1873 unsigned NumOps = MCID.getNumOperands();
1874 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2);
1875 MachineOperand JTOP = MI->getOperand(JTOpIdx);
1876 unsigned JTI = JTOP.getIndex();
1877 assert(JTI < JT.size());
1880 bool HalfWordOk = true;
1881 unsigned JTOffset = getOffsetOf(MI) + 4;
1882 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1883 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
1884 MachineBasicBlock *MBB = JTBBs[j];
1885 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
1886 // Negative offset is not ok. FIXME: We should change BB layout to make
1887 // sure all the branches are forward.
1888 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
1890 unsigned TBHLimit = ((1<<16)-1)*2;
1891 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
1893 if (!ByteOk && !HalfWordOk)
1897 if (ByteOk || HalfWordOk) {
1898 MachineBasicBlock *MBB = MI->getParent();
1899 unsigned BaseReg = MI->getOperand(0).getReg();
1900 bool BaseRegKill = MI->getOperand(0).isKill();
1903 unsigned IdxReg = MI->getOperand(1).getReg();
1904 bool IdxRegKill = MI->getOperand(1).isKill();
1906 // Scan backwards to find the instruction that defines the base
1907 // register. Due to post-RA scheduling, we can't count on it
1908 // immediately preceding the branch instruction.
1909 MachineBasicBlock::iterator PrevI = MI;
1910 MachineBasicBlock::iterator B = MBB->begin();
1911 while (PrevI != B && !PrevI->definesRegister(BaseReg))
1914 // If for some reason we didn't find it, we can't do anything, so
1915 // just skip this one.
1916 if (!PrevI->definesRegister(BaseReg))
1919 MachineInstr *AddrMI = PrevI;
1921 // Examine the instruction that calculates the jumptable entry address.
1922 // Make sure it only defines the base register and kills any uses
1923 // other than the index register.
1924 for (unsigned k = 0, eee = AddrMI->getNumOperands(); k != eee; ++k) {
1925 const MachineOperand &MO = AddrMI->getOperand(k);
1926 if (!MO.isReg() || !MO.getReg())
1928 if (MO.isDef() && MO.getReg() != BaseReg) {
1932 if (MO.isUse() && !MO.isKill() && MO.getReg() != IdxReg) {
1940 // Now scan back again to find the tLEApcrel or t2LEApcrelJT instruction
1941 // that gave us the initial base register definition.
1942 for (--PrevI; PrevI != B && !PrevI->definesRegister(BaseReg); --PrevI)
1945 // The instruction should be a tLEApcrel or t2LEApcrelJT; we want
1946 // to delete it as well.
1947 MachineInstr *LeaMI = PrevI;
1948 if ((LeaMI->getOpcode() != ARM::tLEApcrelJT &&
1949 LeaMI->getOpcode() != ARM::t2LEApcrelJT) ||
1950 LeaMI->getOperand(0).getReg() != BaseReg)
1956 DEBUG(dbgs() << "Shrink JT: " << *MI << " addr: " << *AddrMI
1957 << " lea: " << *LeaMI);
1958 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
1959 MachineInstr *NewJTMI = BuildMI(MBB, MI->getDebugLoc(), TII->get(Opc))
1960 .addReg(IdxReg, getKillRegState(IdxRegKill))
1961 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
1962 .addImm(MI->getOperand(JTOpIdx+1).getImm());
1963 DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
1964 // FIXME: Insert an "ALIGN" instruction to ensure the next instruction
1965 // is 2-byte aligned. For now, asm printer will fix it up.
1966 unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
1967 unsigned OrigSize = TII->GetInstSizeInBytes(AddrMI);
1968 OrigSize += TII->GetInstSizeInBytes(LeaMI);
1969 OrigSize += TII->GetInstSizeInBytes(MI);
1971 AddrMI->eraseFromParent();
1972 LeaMI->eraseFromParent();
1973 MI->eraseFromParent();
1975 int delta = OrigSize - NewSize;
1976 BBInfo[MBB->getNumber()].Size -= delta;
1977 adjustBBOffsetsAfter(MBB);
1987 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
1988 /// jump tables always branch forwards, since that's what tbb and tbh need.
1989 bool ARMConstantIslands::reorderThumb2JumpTables() {
1990 bool MadeChange = false;
1992 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1993 if (MJTI == 0) return false;
1995 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1996 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
1997 MachineInstr *MI = T2JumpTables[i];
1998 const MCInstrDesc &MCID = MI->getDesc();
1999 unsigned NumOps = MCID.getNumOperands();
2000 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2);
2001 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2002 unsigned JTI = JTOP.getIndex();
2003 assert(JTI < JT.size());
2005 // We prefer if target blocks for the jump table come after the jump
2006 // instruction so we can use TB[BH]. Loop through the target blocks
2007 // and try to adjust them such that that's true.
2008 int JTNumber = MI->getParent()->getNumber();
2009 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2010 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2011 MachineBasicBlock *MBB = JTBBs[j];
2012 int DTNumber = MBB->getNumber();
2014 if (DTNumber < JTNumber) {
2015 // The destination precedes the switch. Try to move the block forward
2016 // so we have a positive offset.
2017 MachineBasicBlock *NewBB =
2018 adjustJTTargetBlockForward(MBB, MI->getParent());
2020 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2029 MachineBasicBlock *ARMConstantIslands::
2030 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2031 // If the destination block is terminated by an unconditional branch,
2032 // try to move it; otherwise, create a new block following the jump
2033 // table that branches back to the actual target. This is a very simple
2034 // heuristic. FIXME: We can definitely improve it.
2035 MachineBasicBlock *TBB = 0, *FBB = 0;
2036 SmallVector<MachineOperand, 4> Cond;
2037 SmallVector<MachineOperand, 4> CondPrior;
2038 MachineFunction::iterator BBi = BB;
2039 MachineFunction::iterator OldPrior = prior(BBi);
2041 // If the block terminator isn't analyzable, don't try to move the block
2042 bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
2044 // If the block ends in an unconditional branch, move it. The prior block
2045 // has to have an analyzable terminator for us to move this one. Be paranoid
2046 // and make sure we're not trying to move the entry block of the function.
2047 if (!B && Cond.empty() && BB != MF->begin() &&
2048 !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2049 BB->moveAfter(JTBB);
2050 OldPrior->updateTerminator();
2051 BB->updateTerminator();
2052 // Update numbering to account for the block being moved.
2053 MF->RenumberBlocks();
2058 // Create a new MBB for the code after the jump BB.
2059 MachineBasicBlock *NewBB =
2060 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2061 MachineFunction::iterator MBBI = JTBB; ++MBBI;
2062 MF->insert(MBBI, NewBB);
2064 // Add an unconditional branch from NewBB to BB.
2065 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2066 // correspond directly to anything in the source.
2067 assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?");
2068 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B)).addMBB(BB)
2069 .addImm(ARMCC::AL).addReg(0);
2071 // Update internal data structures to account for the newly inserted MBB.
2072 MF->RenumberBlocks(NewBB);
2075 NewBB->addSuccessor(BB);
2076 JTBB->removeSuccessor(BB);
2077 JTBB->addSuccessor(NewBB);