1 //===- RegisterCoalescer.cpp - Generic Register Coalescing Interface -------==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the generic RegisterCoalescer interface which
11 // is used as the common interface used by all clients and
12 // implementations of register coalescing.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "regcoalescing"
17 #include "RegisterCoalescer.h"
18 #include "VirtRegMap.h"
19 #include "LiveDebugVariables.h"
21 #include "llvm/Pass.h"
22 #include "llvm/Value.h"
23 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetRegisterInfo.h"
28 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineLoopInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/Passes.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/ADT/OwningPtr.h"
43 #include "llvm/ADT/SmallSet.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/ADT/STLExtras.h"
50 STATISTIC(numJoins , "Number of interval joins performed");
51 STATISTIC(numCrossRCs , "Number of cross class joins performed");
52 STATISTIC(numCommutes , "Number of instruction commuting performed");
53 STATISTIC(numExtends , "Number of copies extended");
54 STATISTIC(NumReMats , "Number of instructions re-materialized");
55 STATISTIC(numPeep , "Number of identity moves eliminated after coalescing");
56 STATISTIC(numAborts , "Number of times interval joining aborted");
59 EnableJoining("join-liveintervals",
60 cl::desc("Coalesce copies (default=true)"),
64 DisableCrossClassJoin("disable-cross-class-join",
65 cl::desc("Avoid coalescing cross register class copies"),
66 cl::init(false), cl::Hidden);
69 EnablePhysicalJoin("join-physregs",
70 cl::desc("Join physical register copies"),
71 cl::init(false), cl::Hidden);
74 VerifyCoalescing("verify-coalescing",
75 cl::desc("Verify machine instrs before and after register coalescing"),
78 INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",
79 "Simple Register Coalescing", false, false)
80 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
81 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
82 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
83 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
84 INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
85 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
86 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
87 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
88 INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",
89 "Simple Register Coalescing", false, false)
91 char RegisterCoalescer::ID = 0;
93 static unsigned compose(const TargetRegisterInfo &tri, unsigned a, unsigned b) {
96 return tri.composeSubRegIndices(a, b);
99 static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
100 unsigned &Src, unsigned &Dst,
101 unsigned &SrcSub, unsigned &DstSub) {
103 Dst = MI->getOperand(0).getReg();
104 DstSub = MI->getOperand(0).getSubReg();
105 Src = MI->getOperand(1).getReg();
106 SrcSub = MI->getOperand(1).getSubReg();
107 } else if (MI->isSubregToReg()) {
108 Dst = MI->getOperand(0).getReg();
109 DstSub = compose(tri, MI->getOperand(0).getSubReg(),
110 MI->getOperand(3).getImm());
111 Src = MI->getOperand(2).getReg();
112 SrcSub = MI->getOperand(2).getSubReg();
118 bool CoalescerPair::setRegisters(const MachineInstr *MI) {
119 srcReg_ = dstReg_ = subIdx_ = 0;
121 flipped_ = crossClass_ = false;
123 unsigned Src, Dst, SrcSub, DstSub;
124 if (!isMoveInstr(tri_, MI, Src, Dst, SrcSub, DstSub))
126 partial_ = SrcSub || DstSub;
128 // If one register is a physreg, it must be Dst.
129 if (TargetRegisterInfo::isPhysicalRegister(Src)) {
130 if (TargetRegisterInfo::isPhysicalRegister(Dst))
133 std::swap(SrcSub, DstSub);
137 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
139 if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
140 // Eliminate DstSub on a physreg.
142 Dst = tri_.getSubReg(Dst, DstSub);
143 if (!Dst) return false;
147 // Eliminate SrcSub by picking a corresponding Dst superregister.
149 Dst = tri_.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
150 if (!Dst) return false;
152 } else if (!MRI.getRegClass(Src)->contains(Dst)) {
156 // Both registers are virtual.
158 // Both registers have subreg indices.
159 if (SrcSub && DstSub) {
160 // For now we only handle the case of identical indices in commensurate
161 // registers: Dreg:ssub_1 + Dreg:ssub_1 -> Dreg
162 // FIXME: Handle Qreg:ssub_3 + Dreg:ssub_1 as QReg:dsub_1 + Dreg.
163 if (SrcSub != DstSub)
165 const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
166 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
167 if (!getCommonSubClass(DstRC, SrcRC))
172 // There can be no SrcSub.
177 assert(!flipped_ && "Unexpected flip");
181 // Find the new register class.
182 const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
183 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
185 newRC_ = tri_.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
187 newRC_ = getCommonSubClass(DstRC, SrcRC);
190 crossClass_ = newRC_ != DstRC || newRC_ != SrcRC;
192 // Check our invariants
193 assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");
194 assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&
195 "Cannot have a physical SubIdx");
202 bool CoalescerPair::flip() {
203 if (subIdx_ || TargetRegisterInfo::isPhysicalRegister(dstReg_))
205 std::swap(srcReg_, dstReg_);
206 flipped_ = !flipped_;
210 bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
213 unsigned Src, Dst, SrcSub, DstSub;
214 if (!isMoveInstr(tri_, MI, Src, Dst, SrcSub, DstSub))
217 // Find the virtual register that is srcReg_.
218 if (Dst == srcReg_) {
220 std::swap(SrcSub, DstSub);
221 } else if (Src != srcReg_) {
225 // Now check that Dst matches dstReg_.
226 if (TargetRegisterInfo::isPhysicalRegister(dstReg_)) {
227 if (!TargetRegisterInfo::isPhysicalRegister(Dst))
229 assert(!subIdx_ && "Inconsistent CoalescerPair state.");
230 // DstSub could be set for a physreg from INSERT_SUBREG.
232 Dst = tri_.getSubReg(Dst, DstSub);
235 return dstReg_ == Dst;
236 // This is a partial register copy. Check that the parts match.
237 return tri_.getSubReg(dstReg_, SrcSub) == Dst;
239 // dstReg_ is virtual.
242 // Registers match, do the subregisters line up?
243 return compose(tri_, subIdx_, SrcSub) == DstSub;
247 void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
248 AU.setPreservesCFG();
249 AU.addRequired<AliasAnalysis>();
250 AU.addRequired<LiveIntervals>();
251 AU.addPreserved<LiveIntervals>();
252 AU.addRequired<LiveDebugVariables>();
253 AU.addPreserved<LiveDebugVariables>();
254 AU.addPreserved<SlotIndexes>();
255 AU.addRequired<MachineLoopInfo>();
256 AU.addPreserved<MachineLoopInfo>();
257 AU.addPreservedID(MachineDominatorsID);
258 AU.addPreservedID(StrongPHIEliminationID);
259 AU.addPreservedID(PHIEliminationID);
260 AU.addPreservedID(TwoAddressInstructionPassID);
261 MachineFunctionPass::getAnalysisUsage(AU);
264 void RegisterCoalescer::markAsJoined(MachineInstr *CopyMI) {
265 /// Joined copies are not deleted immediately, but kept in JoinedCopies.
266 JoinedCopies.insert(CopyMI);
268 /// Mark all register operands of CopyMI as <undef> so they won't affect dead
269 /// code elimination.
270 for (MachineInstr::mop_iterator I = CopyMI->operands_begin(),
271 E = CopyMI->operands_end(); I != E; ++I)
276 /// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
277 /// being the source and IntB being the dest, thus this defines a value number
278 /// in IntB. If the source value number (in IntA) is defined by a copy from B,
279 /// see if we can merge these two pieces of B into a single value number,
280 /// eliminating a copy. For example:
284 /// B1 = A3 <- this copy
286 /// In this case, B0 can be extended to where the B1 copy lives, allowing the B1
287 /// value number to be replaced with B0 (which simplifies the B liveinterval).
289 /// This returns true if an interval was modified.
291 bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
292 MachineInstr *CopyMI) {
293 // Bail if there is no dst interval - can happen when merging physical subreg
295 if (!li_->hasInterval(CP.getDstReg()))
299 li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
301 li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
302 SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
304 // BValNo is a value number in B that is defined by a copy from A. 'B3' in
305 // the example above.
306 LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
307 if (BLR == IntB.end()) return false;
308 VNInfo *BValNo = BLR->valno;
310 // Get the location that B is defined at. Two options: either this value has
311 // an unknown definition point or it is defined at CopyIdx. If unknown, we
313 if (!BValNo->isDefByCopy()) return false;
314 assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
316 // AValNo is the value number in A that defines the copy, A3 in the example.
317 SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
318 LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
319 // The live range might not exist after fun with physreg coalescing.
320 if (ALR == IntA.end()) return false;
321 VNInfo *AValNo = ALR->valno;
322 // If it's re-defined by an early clobber somewhere in the live range, then
323 // it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
325 // 172 %ECX<def> = MOV32rr %reg1039<kill>
326 // 180 INLINEASM <es:subl $5,$1
327 // sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9,
329 // 36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
330 // 188 %EAX<def> = MOV32rr %EAX<kill>
331 // 196 %ECX<def> = MOV32rr %ECX<kill>
332 // 204 %ECX<def> = MOV32rr %ECX<kill>
333 // 212 %EAX<def> = MOV32rr %EAX<kill>
334 // 220 %EAX<def> = MOV32rr %EAX
335 // 228 %reg1039<def> = MOV32rr %ECX<kill>
336 // The early clobber operand ties ECX input to the ECX def.
338 // The live interval of ECX is represented as this:
339 // %reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
340 // The coalescer has no idea there was a def in the middle of [174,230].
341 if (AValNo->hasRedefByEC())
344 // If AValNo is defined as a copy from IntB, we can potentially process this.
345 // Get the instruction that defines this value number.
346 if (!CP.isCoalescable(AValNo->getCopy()))
349 // Get the LiveRange in IntB that this value number starts with.
350 LiveInterval::iterator ValLR =
351 IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
352 if (ValLR == IntB.end())
355 // Make sure that the end of the live range is inside the same block as
357 MachineInstr *ValLREndInst =
358 li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
359 if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent())
362 // Okay, we now know that ValLR ends in the same block that the CopyMI
363 // live-range starts. If there are no intervening live ranges between them in
364 // IntB, we can merge them.
365 if (ValLR+1 != BLR) return false;
367 // If a live interval is a physical register, conservatively check if any
368 // of its aliases is overlapping the live interval of the virtual register.
369 // If so, do not coalesce.
370 if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
371 for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
372 if (li_->hasInterval(*AS) && IntA.overlaps(li_->getInterval(*AS))) {
374 dbgs() << "\t\tInterfere with alias ";
375 li_->getInterval(*AS).print(dbgs(), tri_);
382 dbgs() << "Extending: ";
383 IntB.print(dbgs(), tri_);
386 SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
387 // We are about to delete CopyMI, so need to remove it as the 'instruction
388 // that defines this value #'. Update the valnum with the new defining
390 BValNo->def = FillerStart;
393 // Okay, we can merge them. We need to insert a new liverange:
394 // [ValLR.end, BLR.begin) of either value number, then we merge the
395 // two value numbers.
396 IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo));
398 // If the IntB live range is assigned to a physical register, and if that
399 // physreg has sub-registers, update their live intervals as well.
400 if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
401 for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
402 if (!li_->hasInterval(*SR))
404 LiveInterval &SRLI = li_->getInterval(*SR);
405 SRLI.addRange(LiveRange(FillerStart, FillerEnd,
406 SRLI.getNextValue(FillerStart, 0,
407 li_->getVNInfoAllocator())));
411 // Okay, merge "B1" into the same value number as "B0".
412 if (BValNo != ValLR->valno) {
413 // If B1 is killed by a PHI, then the merged live range must also be killed
414 // by the same PHI, as B0 and B1 can not overlap.
415 bool HasPHIKill = BValNo->hasPHIKill();
416 IntB.MergeValueNumberInto(BValNo, ValLR->valno);
418 ValLR->valno->setHasPHIKill(true);
421 dbgs() << " result = ";
422 IntB.print(dbgs(), tri_);
426 // If the source instruction was killing the source register before the
427 // merge, unset the isKill marker given the live range has been extended.
428 int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
430 ValLREndInst->getOperand(UIdx).setIsKill(false);
433 // If the copy instruction was killing the destination register before the
434 // merge, find the last use and trim the live range. That will also add the
436 if (ALR->end == CopyIdx)
437 li_->shrinkToUses(&IntA);
443 /// HasOtherReachingDefs - Return true if there are definitions of IntB
444 /// other than BValNo val# that can reach uses of AValno val# of IntA.
445 bool RegisterCoalescer::HasOtherReachingDefs(LiveInterval &IntA,
449 for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
451 if (AI->valno != AValNo) continue;
452 LiveInterval::Ranges::iterator BI =
453 std::upper_bound(IntB.ranges.begin(), IntB.ranges.end(), AI->start);
454 if (BI != IntB.ranges.begin())
456 for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) {
457 if (BI->valno == BValNo)
459 if (BI->start <= AI->start && BI->end > AI->start)
461 if (BI->start > AI->start && BI->start < AI->end)
468 /// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with
469 /// IntA being the source and IntB being the dest, thus this defines a value
470 /// number in IntB. If the source value number (in IntA) is defined by a
471 /// commutable instruction and its other operand is coalesced to the copy dest
472 /// register, see if we can transform the copy into a noop by commuting the
473 /// definition. For example,
475 /// A3 = op A2 B0<kill>
477 /// B1 = A3 <- this copy
479 /// = op A3 <- more uses
483 /// B2 = op B0 A2<kill>
485 /// B1 = B2 <- now an identify copy
487 /// = op B2 <- more uses
489 /// This returns true if an interval was modified.
491 bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
492 MachineInstr *CopyMI) {
493 // FIXME: For now, only eliminate the copy by commuting its def when the
494 // source register is a virtual register. We want to guard against cases
495 // where the copy is a back edge copy and commuting the def lengthen the
496 // live interval of the source register to the entire loop.
497 if (CP.isPhys() && CP.isFlipped())
500 // Bail if there is no dst interval.
501 if (!li_->hasInterval(CP.getDstReg()))
504 SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
507 li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
509 li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
511 // BValNo is a value number in B that is defined by a copy from A. 'B3' in
512 // the example above.
513 VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
514 if (!BValNo || !BValNo->isDefByCopy())
517 assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
519 // AValNo is the value number in A that defines the copy, A3 in the example.
520 VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getUseIndex());
521 assert(AValNo && "COPY source not live");
523 // If other defs can reach uses of this def, then it's not safe to perform
525 if (AValNo->isPHIDef() || AValNo->isUnused() || AValNo->hasPHIKill())
527 MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
530 const MCInstrDesc &MCID = DefMI->getDesc();
531 if (!MCID.isCommutable())
533 // If DefMI is a two-address instruction then commuting it will change the
534 // destination register.
535 int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg);
536 assert(DefIdx != -1);
538 if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
540 unsigned Op1, Op2, NewDstIdx;
541 if (!tii_->findCommutedOpIndices(DefMI, Op1, Op2))
545 else if (Op2 == UseOpIdx)
550 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
551 unsigned NewReg = NewDstMO.getReg();
552 if (NewReg != IntB.reg || !NewDstMO.isKill())
555 // Make sure there are no other definitions of IntB that would reach the
556 // uses which the new definition can reach.
557 if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
560 // Abort if the aliases of IntB.reg have values that are not simply the
561 // clobbers from the superreg.
562 if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
563 for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
564 if (li_->hasInterval(*AS) &&
565 HasOtherReachingDefs(IntA, li_->getInterval(*AS), AValNo, 0))
568 // If some of the uses of IntA.reg is already coalesced away, return false.
569 // It's not possible to determine whether it's safe to perform the coalescing.
570 for (MachineRegisterInfo::use_nodbg_iterator UI =
571 mri_->use_nodbg_begin(IntA.reg),
572 UE = mri_->use_nodbg_end(); UI != UE; ++UI) {
573 MachineInstr *UseMI = &*UI;
574 SlotIndex UseIdx = li_->getInstructionIndex(UseMI);
575 LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
576 if (ULR == IntA.end())
578 if (ULR->valno == AValNo && JoinedCopies.count(UseMI))
582 DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << AValNo->def << '\t'
585 // At this point we have decided that it is legal to do this
586 // transformation. Start by commuting the instruction.
587 MachineBasicBlock *MBB = DefMI->getParent();
588 MachineInstr *NewMI = tii_->commuteInstruction(DefMI);
591 if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
592 TargetRegisterInfo::isVirtualRegister(IntB.reg) &&
593 !mri_->constrainRegClass(IntB.reg, mri_->getRegClass(IntA.reg)))
595 if (NewMI != DefMI) {
596 li_->ReplaceMachineInstrInMaps(DefMI, NewMI);
597 MBB->insert(DefMI, NewMI);
600 unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
601 NewMI->getOperand(OpIdx).setIsKill();
603 // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
612 // Update uses of IntA of the specific Val# with IntB.
613 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
614 UE = mri_->use_end(); UI != UE;) {
615 MachineOperand &UseMO = UI.getOperand();
616 MachineInstr *UseMI = &*UI;
618 if (JoinedCopies.count(UseMI))
620 if (UseMI->isDebugValue()) {
621 // FIXME These don't have an instruction index. Not clear we have enough
622 // info to decide whether to do this replacement or not. For now do it.
623 UseMO.setReg(NewReg);
626 SlotIndex UseIdx = li_->getInstructionIndex(UseMI).getUseIndex();
627 LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
628 if (ULR == IntA.end() || ULR->valno != AValNo)
630 if (TargetRegisterInfo::isPhysicalRegister(NewReg))
631 UseMO.substPhysReg(NewReg, *tri_);
633 UseMO.setReg(NewReg);
636 if (!UseMI->isCopy())
638 if (UseMI->getOperand(0).getReg() != IntB.reg ||
639 UseMI->getOperand(0).getSubReg())
642 // This copy will become a noop. If it's defining a new val#, merge it into
644 SlotIndex DefIdx = UseIdx.getDefIndex();
645 VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
648 DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
649 assert(DVNI->def == DefIdx);
650 BValNo = IntB.MergeValueNumberInto(BValNo, DVNI);
654 // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
656 VNInfo *ValNo = BValNo;
657 ValNo->def = AValNo->def;
659 for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
661 if (AI->valno != AValNo) continue;
662 IntB.addRange(LiveRange(AI->start, AI->end, ValNo));
664 DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
666 IntA.removeValNo(AValNo);
667 DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n');
672 /// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
673 /// computation, replace the copy by rematerialize the definition.
674 bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
678 MachineInstr *CopyMI) {
679 SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getUseIndex();
680 LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
681 assert(SrcLR != SrcInt.end() && "Live range not found!");
682 VNInfo *ValNo = SrcLR->valno;
683 // If other defs can reach uses of this def, then it's not safe to perform
685 if (ValNo->isPHIDef() || ValNo->isUnused() || ValNo->hasPHIKill())
687 MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
690 assert(DefMI && "Defining instruction disappeared");
691 const MCInstrDesc &MCID = DefMI->getDesc();
692 if (!MCID.isAsCheapAsAMove())
694 if (!tii_->isTriviallyReMaterializable(DefMI, AA))
696 bool SawStore = false;
697 if (!DefMI->isSafeToMove(tii_, AA, SawStore))
699 if (MCID.getNumDefs() != 1)
701 if (!DefMI->isImplicitDef()) {
702 // Make sure the copy destination register class fits the instruction
703 // definition register class. The mismatch can happen as a result of earlier
704 // extract_subreg, insert_subreg, subreg_to_reg coalescing.
705 const TargetRegisterClass *RC = tii_->getRegClass(MCID, 0, tri_);
706 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
707 if (mri_->getRegClass(DstReg) != RC)
709 } else if (!RC->contains(DstReg))
713 // If destination register has a sub-register index on it, make sure it
714 // matches the instruction register class.
716 const MCInstrDesc &MCID = DefMI->getDesc();
717 if (MCID.getNumDefs() != 1)
719 const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
720 const TargetRegisterClass *DstSubRC =
721 DstRC->getSubRegisterRegClass(DstSubIdx);
722 const TargetRegisterClass *DefRC = tii_->getRegClass(MCID, 0, tri_);
725 else if (DefRC != DstSubRC)
729 RemoveCopyFlag(DstReg, CopyMI);
731 MachineBasicBlock *MBB = CopyMI->getParent();
732 MachineBasicBlock::iterator MII =
733 llvm::next(MachineBasicBlock::iterator(CopyMI));
734 tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_);
735 MachineInstr *NewMI = prior(MII);
737 // CopyMI may have implicit operands, transfer them over to the newly
738 // rematerialized instruction. And update implicit def interval valnos.
739 for (unsigned i = CopyMI->getDesc().getNumOperands(),
740 e = CopyMI->getNumOperands(); i != e; ++i) {
741 MachineOperand &MO = CopyMI->getOperand(i);
742 if (MO.isReg() && MO.isImplicit())
743 NewMI->addOperand(MO);
745 RemoveCopyFlag(MO.getReg(), CopyMI);
748 NewMI->copyImplicitOps(CopyMI);
749 li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
750 CopyMI->eraseFromParent();
751 ReMatCopies.insert(CopyMI);
752 ReMatDefs.insert(DefMI);
753 DEBUG(dbgs() << "Remat: " << *NewMI);
756 // The source interval can become smaller because we removed a use.
758 li_->shrinkToUses(&SrcInt);
763 /// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
764 /// update the subregister number if it is not zero. If DstReg is a
765 /// physical register and the existing subregister number of the def / use
766 /// being updated is not zero, make sure to set it to the correct physical
769 RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
770 bool DstIsPhys = CP.isPhys();
771 unsigned SrcReg = CP.getSrcReg();
772 unsigned DstReg = CP.getDstReg();
773 unsigned SubIdx = CP.getSubIdx();
775 // Update LiveDebugVariables.
776 ldv_->renameRegister(SrcReg, DstReg, SubIdx);
778 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
779 MachineInstr *UseMI = I.skipInstruction();) {
780 // A PhysReg copy that won't be coalesced can perhaps be rematerialized
783 if (UseMI->isCopy() &&
784 !UseMI->getOperand(1).getSubReg() &&
785 !UseMI->getOperand(0).getSubReg() &&
786 UseMI->getOperand(1).getReg() == SrcReg &&
787 UseMI->getOperand(0).getReg() != SrcReg &&
788 UseMI->getOperand(0).getReg() != DstReg &&
789 !JoinedCopies.count(UseMI) &&
790 ReMaterializeTrivialDef(li_->getInterval(SrcReg), false,
791 UseMI->getOperand(0).getReg(), 0, UseMI))
795 SmallVector<unsigned,8> Ops;
797 tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
798 bool Kills = false, Deads = false;
800 // Replace SrcReg with DstReg in all UseMI operands.
801 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
802 MachineOperand &MO = UseMI->getOperand(Ops[i]);
803 Kills |= MO.isKill();
804 Deads |= MO.isDead();
807 MO.substPhysReg(DstReg, *tri_);
809 MO.substVirtReg(DstReg, SubIdx, *tri_);
812 // This instruction is a copy that will be removed.
813 if (JoinedCopies.count(UseMI))
817 // If UseMI was a simple SrcReg def, make sure we didn't turn it into a
818 // read-modify-write of DstReg.
820 UseMI->addRegisterDead(DstReg, tri_);
821 else if (!Reads && Writes)
822 UseMI->addRegisterDefined(DstReg, tri_);
824 // Kill flags apply to the whole physical register.
825 if (DstIsPhys && Kills)
826 UseMI->addRegisterKilled(DstReg, tri_);
830 dbgs() << "\t\tupdated: ";
831 if (!UseMI->isDebugValue())
832 dbgs() << li_->getInstructionIndex(UseMI) << "\t";
838 /// removeIntervalIfEmpty - Check if the live interval of a physical register
839 /// is empty, if so remove it and also remove the empty intervals of its
840 /// sub-registers. Return true if live interval is removed.
841 static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
842 const TargetRegisterInfo *tri_) {
844 if (TargetRegisterInfo::isPhysicalRegister(li.reg))
845 for (const unsigned* SR = tri_->getSubRegisters(li.reg); *SR; ++SR) {
846 if (!li_->hasInterval(*SR))
848 LiveInterval &sli = li_->getInterval(*SR);
850 li_->removeInterval(*SR);
852 li_->removeInterval(li.reg);
858 /// RemoveDeadDef - If a def of a live interval is now determined dead, remove
859 /// the val# it defines. If the live interval becomes empty, remove it as well.
860 bool RegisterCoalescer::RemoveDeadDef(LiveInterval &li,
861 MachineInstr *DefMI) {
862 SlotIndex DefIdx = li_->getInstructionIndex(DefMI).getDefIndex();
863 LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
864 if (DefIdx != MLR->valno->def)
866 li.removeValNo(MLR->valno);
867 return removeIntervalIfEmpty(li, li_, tri_);
870 void RegisterCoalescer::RemoveCopyFlag(unsigned DstReg,
871 const MachineInstr *CopyMI) {
872 SlotIndex DefIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
873 if (li_->hasInterval(DstReg)) {
874 LiveInterval &LI = li_->getInterval(DstReg);
875 if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
876 if (LR->valno->def == DefIdx)
877 LR->valno->setCopy(0);
879 if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
881 for (const unsigned* AS = tri_->getAliasSet(DstReg); *AS; ++AS) {
882 if (!li_->hasInterval(*AS))
884 LiveInterval &LI = li_->getInterval(*AS);
885 if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
886 if (LR->valno->def == DefIdx)
887 LR->valno->setCopy(0);
891 /// shouldJoinPhys - Return true if a copy involving a physreg should be joined.
892 /// We need to be careful about coalescing a source physical register with a
893 /// virtual register. Once the coalescing is done, it cannot be broken and these
894 /// are not spillable! If the destination interval uses are far away, think
895 /// twice about coalescing them!
896 bool RegisterCoalescer::shouldJoinPhys(CoalescerPair &CP) {
897 bool Allocatable = li_->isAllocatable(CP.getDstReg());
898 LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
900 /// Always join simple intervals that are defined by a single copy from a
901 /// reserved register. This doesn't increase register pressure, so it is
902 /// always beneficial.
903 if (!Allocatable && CP.isFlipped() && JoinVInt.containsOneValue())
906 if (!EnablePhysicalJoin) {
907 DEBUG(dbgs() << "\tPhysreg joins disabled.\n");
911 // Only coalesce to allocatable physreg, we don't want to risk modifying
912 // reserved registers.
914 DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
915 return false; // Not coalescable.
918 // Don't join with physregs that have a ridiculous number of live
919 // ranges. The data structure performance is really bad when that
921 if (li_->hasInterval(CP.getDstReg()) &&
922 li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
925 << "\tPhysical register live interval too complicated, abort!\n");
929 // FIXME: Why are we skipping this test for partial copies?
930 // CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
931 if (!CP.isPartial()) {
932 const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
933 unsigned Threshold = RegClassInfo.getNumAllocatableRegs(RC) * 2;
934 unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
935 if (Length > Threshold) {
937 DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
944 /// isWinToJoinCrossClass - Return true if it's profitable to coalesce
945 /// two virtual registers from different register classes.
947 RegisterCoalescer::isWinToJoinCrossClass(unsigned SrcReg,
949 const TargetRegisterClass *SrcRC,
950 const TargetRegisterClass *DstRC,
951 const TargetRegisterClass *NewRC) {
952 unsigned NewRCCount = RegClassInfo.getNumAllocatableRegs(NewRC);
953 // This heuristics is good enough in practice, but it's obviously not *right*.
954 // 4 is a magic number that works well enough for x86, ARM, etc. It filter
955 // out all but the most restrictive register classes.
956 if (NewRCCount > 4 ||
957 // Early exit if the function is fairly small, coalesce aggressively if
958 // that's the case. For really special register classes with 3 or
959 // fewer registers, be a bit more careful.
960 (li_->getFuncInstructionCount() / NewRCCount) < 8)
962 LiveInterval &SrcInt = li_->getInterval(SrcReg);
963 LiveInterval &DstInt = li_->getInterval(DstReg);
964 unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
965 unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
967 // Coalesce aggressively if the intervals are small compared to the number of
968 // registers in the new class. The number 4 is fairly arbitrary, chosen to be
969 // less aggressive than the 8 used for the whole function size.
970 const unsigned ThresSize = 4 * NewRCCount;
971 if (SrcSize <= ThresSize && DstSize <= ThresSize)
974 // Estimate *register use density*. If it doubles or more, abort.
975 unsigned SrcUses = std::distance(mri_->use_nodbg_begin(SrcReg),
976 mri_->use_nodbg_end());
977 unsigned DstUses = std::distance(mri_->use_nodbg_begin(DstReg),
978 mri_->use_nodbg_end());
979 unsigned NewUses = SrcUses + DstUses;
980 unsigned NewSize = SrcSize + DstSize;
981 if (SrcRC != NewRC && SrcSize > ThresSize) {
982 unsigned SrcRCCount = RegClassInfo.getNumAllocatableRegs(SrcRC);
983 if (NewUses*SrcSize*SrcRCCount > 2*SrcUses*NewSize*NewRCCount)
986 if (DstRC != NewRC && DstSize > ThresSize) {
987 unsigned DstRCCount = RegClassInfo.getNumAllocatableRegs(DstRC);
988 if (NewUses*DstSize*DstRCCount > 2*DstUses*NewSize*NewRCCount)
995 /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
996 /// which are the src/dst of the copy instruction CopyMI. This returns true
997 /// if the copy was successfully coalesced away. If it is not currently
998 /// possible to coalesce this interval, but it may be possible if other
999 /// things get coalesced, then it returns true by reference in 'Again'.
1000 bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
1003 if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI))
1004 return false; // Already done.
1006 DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
1008 CoalescerPair CP(*tii_, *tri_);
1009 if (!CP.setRegisters(CopyMI)) {
1010 DEBUG(dbgs() << "\tNot coalescable.\n");
1014 // If they are already joined we continue.
1015 if (CP.getSrcReg() == CP.getDstReg()) {
1016 markAsJoined(CopyMI);
1017 DEBUG(dbgs() << "\tCopy already coalesced.\n");
1018 return false; // Not coalescable.
1021 DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_)
1022 << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx())
1025 // Enforce policies.
1027 if (!shouldJoinPhys(CP)) {
1028 // Before giving up coalescing, if definition of source is defined by
1029 // trivial computation, try rematerializing it.
1030 if (!CP.isFlipped() &&
1031 ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
1032 CP.getDstReg(), 0, CopyMI))
1037 // Avoid constraining virtual register regclass too much.
1038 if (CP.isCrossClass()) {
1039 DEBUG(dbgs() << "\tCross-class to " << CP.getNewRC()->getName() << ".\n");
1040 if (DisableCrossClassJoin) {
1041 DEBUG(dbgs() << "\tCross-class joins disabled.\n");
1044 if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
1045 mri_->getRegClass(CP.getSrcReg()),
1046 mri_->getRegClass(CP.getDstReg()),
1048 DEBUG(dbgs() << "\tAvoid coalescing to constrained register class.\n");
1049 Again = true; // May be possible to coalesce later.
1054 // When possible, let DstReg be the larger interval.
1055 if (!CP.getSubIdx() && li_->getInterval(CP.getSrcReg()).ranges.size() >
1056 li_->getInterval(CP.getDstReg()).ranges.size())
1060 // Okay, attempt to join these two intervals. On failure, this returns false.
1061 // Otherwise, if one of the intervals being joined is a physreg, this method
1062 // always canonicalizes DstInt to be it. The output "SrcInt" will not have
1063 // been modified, so we can use this information below to update aliases.
1064 if (!JoinIntervals(CP)) {
1065 // Coalescing failed.
1067 // If definition of source is defined by trivial computation, try
1068 // rematerializing it.
1069 if (!CP.isFlipped() &&
1070 ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
1071 CP.getDstReg(), 0, CopyMI))
1074 // If we can eliminate the copy without merging the live ranges, do so now.
1075 if (!CP.isPartial()) {
1076 if (AdjustCopiesBackFrom(CP, CopyMI) ||
1077 RemoveCopyByCommutingDef(CP, CopyMI)) {
1078 markAsJoined(CopyMI);
1079 DEBUG(dbgs() << "\tTrivial!\n");
1084 // Otherwise, we are unable to join the intervals.
1085 DEBUG(dbgs() << "\tInterference!\n");
1086 Again = true; // May be possible to coalesce later.
1090 // Coalescing to a virtual register that is of a sub-register class of the
1091 // other. Make sure the resulting register is set to the right register class.
1092 if (CP.isCrossClass()) {
1094 mri_->setRegClass(CP.getDstReg(), CP.getNewRC());
1097 // Remember to delete the copy instruction.
1098 markAsJoined(CopyMI);
1100 UpdateRegDefsUses(CP);
1102 // If we have extended the live range of a physical register, make sure we
1103 // update live-in lists as well.
1105 SmallVector<MachineBasicBlock*, 16> BlockSeq;
1106 // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
1107 // ranges for this, and they are preserved.
1108 LiveInterval &SrcInt = li_->getInterval(CP.getSrcReg());
1109 for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
1111 li_->findLiveInMBBs(I->start, I->end, BlockSeq);
1112 for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
1113 MachineBasicBlock &block = *BlockSeq[idx];
1114 if (!block.isLiveIn(CP.getDstReg()))
1115 block.addLiveIn(CP.getDstReg());
1121 // SrcReg is guarateed to be the register whose live interval that is
1123 li_->removeInterval(CP.getSrcReg());
1125 // Update regalloc hint.
1126 tri_->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *mf_);
1129 LiveInterval &DstInt = li_->getInterval(CP.getDstReg());
1130 dbgs() << "\tJoined. Result = ";
1131 DstInt.print(dbgs(), tri_);
1139 /// ComputeUltimateVN - Assuming we are going to join two live intervals,
1140 /// compute what the resultant value numbers for each value in the input two
1141 /// ranges will be. This is complicated by copies between the two which can
1142 /// and will commonly cause multiple value numbers to be merged into one.
1144 /// VN is the value number that we're trying to resolve. InstDefiningValue
1145 /// keeps track of the new InstDefiningValue assignment for the result
1146 /// LiveInterval. ThisFromOther/OtherFromThis are sets that keep track of
1147 /// whether a value in this or other is a copy from the opposite set.
1148 /// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
1149 /// already been assigned.
1151 /// ThisFromOther[x] - If x is defined as a copy from the other interval, this
1152 /// contains the value number the copy is from.
1154 static unsigned ComputeUltimateVN(VNInfo *VNI,
1155 SmallVector<VNInfo*, 16> &NewVNInfo,
1156 DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
1157 DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
1158 SmallVector<int, 16> &ThisValNoAssignments,
1159 SmallVector<int, 16> &OtherValNoAssignments) {
1160 unsigned VN = VNI->id;
1162 // If the VN has already been computed, just return it.
1163 if (ThisValNoAssignments[VN] >= 0)
1164 return ThisValNoAssignments[VN];
1165 assert(ThisValNoAssignments[VN] != -2 && "Cyclic value numbers");
1167 // If this val is not a copy from the other val, then it must be a new value
1168 // number in the destination.
1169 DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
1170 if (I == ThisFromOther.end()) {
1171 NewVNInfo.push_back(VNI);
1172 return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
1174 VNInfo *OtherValNo = I->second;
1176 // Otherwise, this *is* a copy from the RHS. If the other side has already
1177 // been computed, return it.
1178 if (OtherValNoAssignments[OtherValNo->id] >= 0)
1179 return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
1181 // Mark this value number as currently being computed, then ask what the
1182 // ultimate value # of the other value is.
1183 ThisValNoAssignments[VN] = -2;
1184 unsigned UltimateVN =
1185 ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
1186 OtherValNoAssignments, ThisValNoAssignments);
1187 return ThisValNoAssignments[VN] = UltimateVN;
1191 // Find out if we have something like
1194 // if so, we can pretend this is actually
1197 // which allows us to coalesce A and B.
1198 // VNI is the definition of B. LR is the life range of A that includes
1199 // the slot just before B. If we return true, we add "B = X" to DupCopies.
1200 static bool RegistersDefinedFromSameValue(LiveIntervals &li,
1201 const TargetRegisterInfo &tri,
1205 SmallVector<MachineInstr*, 8> &DupCopies) {
1206 // FIXME: This is very conservative. For example, we don't handle
1207 // physical registers.
1209 MachineInstr *MI = VNI->getCopy();
1211 if (!MI->isFullCopy() || CP.isPartial() || CP.isPhys())
1214 unsigned Dst = MI->getOperand(0).getReg();
1215 unsigned Src = MI->getOperand(1).getReg();
1217 // FIXME: If "B = X" kills X, we have to move the kill back to its
1218 // previous use. For now we just avoid the optimization in that case.
1219 LiveInterval &SrcInt = li.getInterval(Src);
1220 if (SrcInt.killedAt(VNI->def))
1223 if (!TargetRegisterInfo::isVirtualRegister(Src) ||
1224 !TargetRegisterInfo::isVirtualRegister(Dst))
1227 unsigned A = CP.getDstReg();
1228 unsigned B = CP.getSrcReg();
1234 VNInfo *Other = LR->valno;
1235 if (!Other->isDefByCopy())
1237 const MachineInstr *OtherMI = Other->getCopy();
1239 if (!OtherMI->isFullCopy())
1242 unsigned OtherDst = OtherMI->getOperand(0).getReg();
1243 unsigned OtherSrc = OtherMI->getOperand(1).getReg();
1245 if (!TargetRegisterInfo::isVirtualRegister(OtherSrc) ||
1246 !TargetRegisterInfo::isVirtualRegister(OtherDst))
1249 assert(OtherDst == B);
1251 if (Src != OtherSrc)
1254 // If the copies use two different value numbers of X, we cannot merge
1256 if (SrcInt.getVNInfoAt(Other->def) != SrcInt.getVNInfoAt(VNI->def))
1259 DupCopies.push_back(MI);
1264 /// JoinIntervals - Attempt to join these two intervals. On failure, this
1266 bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
1267 LiveInterval &RHS = li_->getInterval(CP.getSrcReg());
1268 DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), tri_); dbgs() << "\n"; });
1270 // If a live interval is a physical register, check for interference with any
1271 // aliases. The interference check implemented here is a bit more conservative
1272 // than the full interfeence check below. We allow overlapping live ranges
1273 // only when one is a copy of the other.
1275 for (const unsigned *AS = tri_->getAliasSet(CP.getDstReg()); *AS; ++AS){
1276 if (!li_->hasInterval(*AS))
1278 const LiveInterval &LHS = li_->getInterval(*AS);
1279 LiveInterval::const_iterator LI = LHS.begin();
1280 for (LiveInterval::const_iterator RI = RHS.begin(), RE = RHS.end();
1282 LI = std::lower_bound(LI, LHS.end(), RI->start);
1283 // Does LHS have an overlapping live range starting before RI?
1284 if ((LI != LHS.begin() && LI[-1].end > RI->start) &&
1285 (RI->start != RI->valno->def ||
1286 !CP.isCoalescable(li_->getInstructionFromIndex(RI->start)))) {
1288 dbgs() << "\t\tInterference from alias: ";
1289 LHS.print(dbgs(), tri_);
1290 dbgs() << "\n\t\tOverlap at " << RI->start << " and no copy.\n";
1295 // Check that LHS ranges beginning in this range are copies.
1296 for (; LI != LHS.end() && LI->start < RI->end; ++LI) {
1297 if (LI->start != LI->valno->def ||
1298 !CP.isCoalescable(li_->getInstructionFromIndex(LI->start))) {
1300 dbgs() << "\t\tInterference from alias: ";
1301 LHS.print(dbgs(), tri_);
1302 dbgs() << "\n\t\tDef at " << LI->start << " is not a copy.\n";
1311 // Compute the final value assignment, assuming that the live ranges can be
1313 SmallVector<int, 16> LHSValNoAssignments;
1314 SmallVector<int, 16> RHSValNoAssignments;
1315 DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
1316 DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
1317 SmallVector<VNInfo*, 16> NewVNInfo;
1319 SmallVector<MachineInstr*, 8> DupCopies;
1321 LiveInterval &LHS = li_->getOrCreateInterval(CP.getDstReg());
1322 DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), tri_); dbgs() << "\n"; });
1324 // Loop over the value numbers of the LHS, seeing if any are defined from
1326 for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
1329 if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
1332 // Never join with a register that has EarlyClobber redefs.
1333 if (VNI->hasRedefByEC())
1336 // Figure out the value # from the RHS.
1337 LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
1338 // The copy could be to an aliased physreg.
1341 // DstReg is known to be a register in the LHS interval. If the src is
1342 // from the RHS interval, we can use its value #.
1343 MachineInstr *MI = VNI->getCopy();
1344 if (!CP.isCoalescable(MI) &&
1345 !RegistersDefinedFromSameValue(*li_, *tri_, CP, VNI, lr, DupCopies))
1348 LHSValsDefinedFromRHS[VNI] = lr->valno;
1351 // Loop over the value numbers of the RHS, seeing if any are defined from
1353 for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
1356 if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
1359 // Never join with a register that has EarlyClobber redefs.
1360 if (VNI->hasRedefByEC())
1363 // Figure out the value # from the LHS.
1364 LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
1365 // The copy could be to an aliased physreg.
1368 // DstReg is known to be a register in the RHS interval. If the src is
1369 // from the LHS interval, we can use its value #.
1370 MachineInstr *MI = VNI->getCopy();
1371 if (!CP.isCoalescable(MI) &&
1372 !RegistersDefinedFromSameValue(*li_, *tri_, CP, VNI, lr, DupCopies))
1375 RHSValsDefinedFromLHS[VNI] = lr->valno;
1378 LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
1379 RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
1380 NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
1382 for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
1385 unsigned VN = VNI->id;
1386 if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
1388 ComputeUltimateVN(VNI, NewVNInfo,
1389 LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
1390 LHSValNoAssignments, RHSValNoAssignments);
1392 for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
1395 unsigned VN = VNI->id;
1396 if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
1398 // If this value number isn't a copy from the LHS, it's a new number.
1399 if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
1400 NewVNInfo.push_back(VNI);
1401 RHSValNoAssignments[VN] = NewVNInfo.size()-1;
1405 ComputeUltimateVN(VNI, NewVNInfo,
1406 RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
1407 RHSValNoAssignments, LHSValNoAssignments);
1410 // Armed with the mappings of LHS/RHS values to ultimate values, walk the
1411 // interval lists to see if these intervals are coalescable.
1412 LiveInterval::const_iterator I = LHS.begin();
1413 LiveInterval::const_iterator IE = LHS.end();
1414 LiveInterval::const_iterator J = RHS.begin();
1415 LiveInterval::const_iterator JE = RHS.end();
1417 // Skip ahead until the first place of potential sharing.
1418 if (I != IE && J != JE) {
1419 if (I->start < J->start) {
1420 I = std::upper_bound(I, IE, J->start);
1421 if (I != LHS.begin()) --I;
1422 } else if (J->start < I->start) {
1423 J = std::upper_bound(J, JE, I->start);
1424 if (J != RHS.begin()) --J;
1428 while (I != IE && J != JE) {
1429 // Determine if these two live ranges overlap.
1431 if (I->start < J->start) {
1432 Overlaps = I->end > J->start;
1434 Overlaps = J->end > I->start;
1437 // If so, check value # info to determine if they are really different.
1439 // If the live range overlap will map to the same value number in the
1440 // result liverange, we can still coalesce them. If not, we can't.
1441 if (LHSValNoAssignments[I->valno->id] !=
1442 RHSValNoAssignments[J->valno->id])
1444 // If it's re-defined by an early clobber somewhere in the live range,
1445 // then conservatively abort coalescing.
1446 if (NewVNInfo[LHSValNoAssignments[I->valno->id]]->hasRedefByEC())
1450 if (I->end < J->end)
1456 // Update kill info. Some live ranges are extended due to copy coalescing.
1457 for (DenseMap<VNInfo*, VNInfo*>::iterator I = LHSValsDefinedFromRHS.begin(),
1458 E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
1459 VNInfo *VNI = I->first;
1460 unsigned LHSValID = LHSValNoAssignments[VNI->id];
1461 if (VNI->hasPHIKill())
1462 NewVNInfo[LHSValID]->setHasPHIKill(true);
1465 // Update kill info. Some live ranges are extended due to copy coalescing.
1466 for (DenseMap<VNInfo*, VNInfo*>::iterator I = RHSValsDefinedFromLHS.begin(),
1467 E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
1468 VNInfo *VNI = I->first;
1469 unsigned RHSValID = RHSValNoAssignments[VNI->id];
1470 if (VNI->hasPHIKill())
1471 NewVNInfo[RHSValID]->setHasPHIKill(true);
1474 if (LHSValNoAssignments.empty())
1475 LHSValNoAssignments.push_back(-1);
1476 if (RHSValNoAssignments.empty())
1477 RHSValNoAssignments.push_back(-1);
1479 for (SmallVector<MachineInstr*, 8>::iterator I = DupCopies.begin(),
1480 E = DupCopies.end(); I != E; ++I) {
1481 MachineInstr *MI = *I;
1483 // We have pretended that the assignment to B in
1486 // was actually a copy from A. Now that we decided to coalesce A and B,
1487 // transform the code into
1490 // and mark the X as coalesced to keep the illusion.
1491 unsigned Src = MI->getOperand(1).getReg();
1492 MI->getOperand(0).substVirtReg(Src, 0, *tri_);
1497 // If we get here, we know that we can coalesce the live ranges. Ask the
1498 // intervals to coalesce themselves now.
1499 LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
1505 // DepthMBBCompare - Comparison predicate that sort first based on the loop
1506 // depth of the basic block (the unsigned), and then on the MBB number.
1507 struct DepthMBBCompare {
1508 typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
1509 bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
1510 // Deeper loops first
1511 if (LHS.first != RHS.first)
1512 return LHS.first > RHS.first;
1514 // Prefer blocks that are more connected in the CFG. This takes care of
1515 // the most difficult copies first while intervals are short.
1516 unsigned cl = LHS.second->pred_size() + LHS.second->succ_size();
1517 unsigned cr = RHS.second->pred_size() + RHS.second->succ_size();
1521 // As a last resort, sort by block number.
1522 return LHS.second->getNumber() < RHS.second->getNumber();
1527 void RegisterCoalescer::CopyCoalesceInMBB(MachineBasicBlock *MBB,
1528 std::vector<MachineInstr*> &TryAgain) {
1529 DEBUG(dbgs() << MBB->getName() << ":\n");
1531 SmallVector<MachineInstr*, 8> VirtCopies;
1532 SmallVector<MachineInstr*, 8> PhysCopies;
1533 SmallVector<MachineInstr*, 8> ImpDefCopies;
1534 for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
1536 MachineInstr *Inst = MII++;
1538 // If this isn't a copy nor a extract_subreg, we can't join intervals.
1539 unsigned SrcReg, DstReg;
1540 if (Inst->isCopy()) {
1541 DstReg = Inst->getOperand(0).getReg();
1542 SrcReg = Inst->getOperand(1).getReg();
1543 } else if (Inst->isSubregToReg()) {
1544 DstReg = Inst->getOperand(0).getReg();
1545 SrcReg = Inst->getOperand(2).getReg();
1549 bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
1550 bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
1551 if (li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty())
1552 ImpDefCopies.push_back(Inst);
1553 else if (SrcIsPhys || DstIsPhys)
1554 PhysCopies.push_back(Inst);
1556 VirtCopies.push_back(Inst);
1559 // Try coalescing implicit copies and insert_subreg <undef> first,
1560 // followed by copies to / from physical registers, then finally copies
1561 // from virtual registers to virtual registers.
1562 for (unsigned i = 0, e = ImpDefCopies.size(); i != e; ++i) {
1563 MachineInstr *TheCopy = ImpDefCopies[i];
1565 if (!JoinCopy(TheCopy, Again))
1567 TryAgain.push_back(TheCopy);
1569 for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) {
1570 MachineInstr *TheCopy = PhysCopies[i];
1572 if (!JoinCopy(TheCopy, Again))
1574 TryAgain.push_back(TheCopy);
1576 for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) {
1577 MachineInstr *TheCopy = VirtCopies[i];
1579 if (!JoinCopy(TheCopy, Again))
1581 TryAgain.push_back(TheCopy);
1585 void RegisterCoalescer::joinIntervals() {
1586 DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
1588 std::vector<MachineInstr*> TryAgainList;
1589 if (loopInfo->empty()) {
1590 // If there are no loops in the function, join intervals in function order.
1591 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
1593 CopyCoalesceInMBB(I, TryAgainList);
1595 // Otherwise, join intervals in inner loops before other intervals.
1596 // Unfortunately we can't just iterate over loop hierarchy here because
1597 // there may be more MBB's than BB's. Collect MBB's for sorting.
1599 // Join intervals in the function prolog first. We want to join physical
1600 // registers with virtual registers before the intervals got too long.
1601 std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
1602 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();I != E;++I){
1603 MachineBasicBlock *MBB = I;
1604 MBBs.push_back(std::make_pair(loopInfo->getLoopDepth(MBB), I));
1607 // Sort by loop depth.
1608 std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
1610 // Finally, join intervals in loop nest order.
1611 for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
1612 CopyCoalesceInMBB(MBBs[i].second, TryAgainList);
1615 // Joining intervals can allow other intervals to be joined. Iteratively join
1616 // until we make no progress.
1617 bool ProgressMade = true;
1618 while (ProgressMade) {
1619 ProgressMade = false;
1621 for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) {
1622 MachineInstr *&TheCopy = TryAgainList[i];
1627 bool Success = JoinCopy(TheCopy, Again);
1628 if (Success || !Again) {
1629 TheCopy= 0; // Mark this one as done.
1630 ProgressMade = true;
1636 void RegisterCoalescer::releaseMemory() {
1637 JoinedCopies.clear();
1638 ReMatCopies.clear();
1642 bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
1644 mri_ = &fn.getRegInfo();
1645 tm_ = &fn.getTarget();
1646 tri_ = tm_->getRegisterInfo();
1647 tii_ = tm_->getInstrInfo();
1648 li_ = &getAnalysis<LiveIntervals>();
1649 ldv_ = &getAnalysis<LiveDebugVariables>();
1650 AA = &getAnalysis<AliasAnalysis>();
1651 loopInfo = &getAnalysis<MachineLoopInfo>();
1653 DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
1654 << "********** Function: "
1655 << ((Value*)mf_->getFunction())->getName() << '\n');
1657 if (VerifyCoalescing)
1658 mf_->verify(this, "Before register coalescing");
1660 RegClassInfo.runOnMachineFunction(fn);
1662 // Join (coalesce) intervals if requested.
1663 if (EnableJoining) {
1666 dbgs() << "********** INTERVALS POST JOINING **********\n";
1667 for (LiveIntervals::iterator I = li_->begin(), E = li_->end();
1669 I->second->print(dbgs(), tri_);
1675 // Perform a final pass over the instructions and compute spill weights
1676 // and remove identity moves.
1677 SmallVector<unsigned, 4> DeadDefs;
1678 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
1679 mbbi != mbbe; ++mbbi) {
1680 MachineBasicBlock* mbb = mbbi;
1681 for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
1683 MachineInstr *MI = mii;
1684 if (JoinedCopies.count(MI)) {
1685 // Delete all coalesced copies.
1686 bool DoDelete = true;
1687 assert(MI->isCopyLike() && "Unrecognized copy instruction");
1688 unsigned SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
1689 if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
1690 MI->getNumOperands() > 2)
1691 // Do not delete extract_subreg, insert_subreg of physical
1692 // registers unless the definition is dead. e.g.
1693 // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
1694 // or else the scavenger may complain. LowerSubregs will
1695 // delete them later.
1698 if (MI->allDefsAreDead()) {
1699 if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
1700 li_->hasInterval(SrcReg))
1701 li_->shrinkToUses(&li_->getInterval(SrcReg));
1705 // We need the instruction to adjust liveness, so make it a KILL.
1706 if (MI->isSubregToReg()) {
1707 MI->RemoveOperand(3);
1708 MI->RemoveOperand(1);
1710 MI->setDesc(tii_->get(TargetOpcode::KILL));
1711 mii = llvm::next(mii);
1713 li_->RemoveMachineInstrFromMaps(MI);
1714 mii = mbbi->erase(mii);
1720 // Now check if this is a remat'ed def instruction which is now dead.
1721 if (ReMatDefs.count(MI)) {
1723 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1724 const MachineOperand &MO = MI->getOperand(i);
1727 unsigned Reg = MO.getReg();
1730 if (TargetRegisterInfo::isVirtualRegister(Reg))
1731 DeadDefs.push_back(Reg);
1734 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
1735 !mri_->use_nodbg_empty(Reg)) {
1741 while (!DeadDefs.empty()) {
1742 unsigned DeadDef = DeadDefs.back();
1743 DeadDefs.pop_back();
1744 RemoveDeadDef(li_->getInterval(DeadDef), MI);
1746 li_->RemoveMachineInstrFromMaps(mii);
1747 mii = mbbi->erase(mii);
1755 // Check for now unnecessary kill flags.
1756 if (li_->isNotInMIMap(MI)) continue;
1757 SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
1758 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1759 MachineOperand &MO = MI->getOperand(i);
1760 if (!MO.isReg() || !MO.isKill()) continue;
1761 unsigned reg = MO.getReg();
1762 if (!reg || !li_->hasInterval(reg)) continue;
1763 if (!li_->getInterval(reg).killedAt(DefIdx)) {
1764 MO.setIsKill(false);
1767 // When leaving a kill flag on a physreg, check if any subregs should
1769 if (!TargetRegisterInfo::isPhysicalRegister(reg))
1771 for (const unsigned *SR = tri_->getSubRegisters(reg);
1772 unsigned S = *SR; ++SR)
1773 if (li_->hasInterval(S) && li_->getInterval(S).liveAt(DefIdx))
1774 MI->addRegisterDefined(S, tri_);
1780 DEBUG(ldv_->dump());
1781 if (VerifyCoalescing)
1782 mf_->verify(this, "After register coalescing");
1786 /// print - Implement the dump method.
1787 void RegisterCoalescer::print(raw_ostream &O, const Module* m) const {
1791 RegisterCoalescer *llvm::createRegisterCoalescer() {
1792 return new RegisterCoalescer();