1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file promote memory references to be register references. It promotes
11 // alloca instructions which only have loads and stores as uses (or that have
12 // PHI nodes which are only loaded from). An alloca is transformed by using
13 // dominator frontiers to place PHI nodes, then traversing the function in
14 // depth-first order to rewrite loads and stores as appropriate. This is just
15 // the standard SSA construction algorithm to construct "pruned" SSA form.
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
20 #include "llvm/Analysis/Dominators.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/Function.h"
23 #include "llvm/Constant.h"
24 #include "llvm/Support/CFG.h"
25 #include "llvm/Support/StableBasicBlockNumbering.h"
26 #include "Support/StringExtras.h"
29 /// isAllocaPromotable - Return true if this alloca is legal for promotion.
30 /// This is true if there are only loads and stores to the alloca... of if there
31 /// is a PHI node using the address which can be trivially transformed.
33 bool llvm::isAllocaPromotable(const AllocaInst *AI, const TargetData &TD) {
34 // FIXME: If the memory unit is of pointer or integer type, we can permit
35 // assignments to subsections of the memory unit.
37 // Only allow direct loads and stores...
38 for (Value::use_const_iterator UI = AI->use_begin(), UE = AI->use_end();
39 UI != UE; ++UI) // Loop over all of the uses of the alloca
40 if (isa<LoadInst>(*UI)) {
42 } else if (const StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
43 if (SI->getOperand(0) == AI)
44 return false; // Don't allow a store OF the AI, only INTO the AI.
45 } else if (const PHINode *PN = dyn_cast<PHINode>(*UI)) {
46 // We only support PHI nodes in a few simple cases. The PHI node is only
47 // allowed to have one use, which must be a load instruction, and can only
48 // use alloca instructions (no random pointers). Also, there cannot be
49 // any accesses to AI between the PHI node and the use of the PHI.
50 if (!PN->hasOneUse()) return false;
52 // Our transformation causes the unconditional loading of all pointer
53 // operands to the PHI node. Because this could cause a fault if there is
54 // a critical edge in the CFG and if one of the pointers is illegal, we
55 // refuse to promote PHI nodes unless they are obviously safe. For now,
56 // obviously safe means that all of the operands are allocas.
58 // If we wanted to extend this code to break critical edges, this
59 // restriction could be relaxed, and we could even handle uses of the PHI
60 // node that are volatile loads or stores.
62 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
63 if (!isa<AllocaInst>(PN->getIncomingValue(i)))
66 // Now make sure the one user instruction is in the same basic block as
67 // the PHI, and that there are no loads or stores between the PHI node and
69 BasicBlock::const_iterator UI = cast<Instruction>(PN->use_back());
70 if (!isa<LoadInst>(UI) || cast<LoadInst>(UI)->isVolatile()) return false;
72 // Scan looking for memory accesses.
73 // FIXME: this should REALLY use alias analysis.
74 for (--UI; !isa<PHINode>(UI); --UI)
75 if (isa<LoadInst>(UI) || isa<StoreInst>(UI) || isa<CallInst>(UI))
78 // If we got this far, we can promote the PHI use.
79 } else if (const SelectInst *SI = dyn_cast<SelectInst>(*UI)) {
80 // We only support selects in a few simple cases. The select is only
81 // allowed to have one use, which must be a load instruction, and can only
82 // use alloca instructions (no random pointers). Also, there cannot be
83 // any accesses to AI between the PHI node and the use of the PHI.
84 if (!SI->hasOneUse()) return false;
86 // Our transformation causes the unconditional loading of all pointer
87 // operands of the select. Because this could cause a fault if there is a
88 // critical edge in the CFG and if one of the pointers is illegal, we
89 // refuse to promote the select unless it is obviously safe. For now,
90 // obviously safe means that all of the operands are allocas.
92 if (!isa<AllocaInst>(SI->getOperand(1)) ||
93 !isa<AllocaInst>(SI->getOperand(2)))
96 // Now make sure the one user instruction is in the same basic block as
97 // the PHI, and that there are no loads or stores between the PHI node and
99 BasicBlock::const_iterator UI = cast<Instruction>(SI->use_back());
100 if (!isa<LoadInst>(UI) || cast<LoadInst>(UI)->isVolatile()) return false;
102 // Scan looking for memory accesses.
103 // FIXME: this should REALLY use alias analysis.
104 for (--UI; &*UI != SI; --UI)
105 if (isa<LoadInst>(UI) || isa<StoreInst>(UI) || isa<CallInst>(UI))
108 // If we got this far, we can promote the select use.
110 return false; // Not a load, store, or promotable PHI?
117 struct PromoteMem2Reg {
118 // Allocas - The alloca instructions being promoted
119 std::vector<AllocaInst*> Allocas;
121 DominanceFrontier &DF;
122 const TargetData &TD;
124 // AllocaLookup - Reverse mapping of Allocas
125 std::map<AllocaInst*, unsigned> AllocaLookup;
127 // NewPhiNodes - The PhiNodes we're adding.
128 std::map<BasicBlock*, std::vector<PHINode*> > NewPhiNodes;
130 // Visited - The set of basic blocks the renamer has already visited.
131 std::set<BasicBlock*> Visited;
133 // BBNumbers - Contains a stable numbering of basic blocks to avoid
134 // non-determinstic behavior.
135 StableBasicBlockNumbering BBNumbers;
138 PromoteMem2Reg(const std::vector<AllocaInst*> &A, DominatorTree &dt,
139 DominanceFrontier &df, const TargetData &td)
140 : Allocas(A), DT(dt), DF(df), TD(td) {}
145 void MarkDominatingPHILive(BasicBlock *BB, unsigned AllocaNum,
146 std::set<PHINode*> &DeadPHINodes);
147 void PromoteLocallyUsedAlloca(BasicBlock *BB, AllocaInst *AI);
148 void PromoteLocallyUsedAllocas(BasicBlock *BB,
149 const std::vector<AllocaInst*> &AIs);
151 void RenamePass(BasicBlock *BB, BasicBlock *Pred,
152 std::vector<Value*> &IncVals);
153 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version,
154 std::set<PHINode*> &InsertedPHINodes);
156 } // end of anonymous namespace
158 void PromoteMem2Reg::run() {
159 Function &F = *DF.getRoot()->getParent();
161 // LocallyUsedAllocas - Keep track of all of the alloca instructions which are
162 // only used in a single basic block. These instructions can be efficiently
163 // promoted by performing a single linear scan over that one block. Since
164 // individual basic blocks are sometimes large, we group together all allocas
165 // that are live in a single basic block by the basic block they are live in.
166 std::map<BasicBlock*, std::vector<AllocaInst*> > LocallyUsedAllocas;
169 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
170 AllocaInst *AI = Allocas[AllocaNum];
172 assert(isAllocaPromotable(AI, TD) &&
173 "Cannot promote non-promotable alloca!");
174 assert(AI->getParent()->getParent() == &F &&
175 "All allocas should be in the same function, which is same as DF!");
177 if (AI->use_empty()) {
178 // If there are no uses of the alloca, just delete it now.
179 AI->getParent()->getInstList().erase(AI);
181 // Remove the alloca from the Allocas list, since it has been processed
182 Allocas[AllocaNum] = Allocas.back();
188 // Calculate the set of read and write-locations for each alloca. This is
189 // analogous to finding the 'uses' and 'definitions' of each variable.
190 std::vector<BasicBlock*> DefiningBlocks;
191 std::vector<BasicBlock*> UsingBlocks;
193 BasicBlock *OnlyBlock = 0;
194 bool OnlyUsedInOneBlock = true;
196 // As we scan the uses of the alloca instruction, keep track of stores, and
197 // decide whether all of the loads and stores to the alloca are within the
200 for (Value::use_iterator U =AI->use_begin(), E = AI->use_end(); U != E;++U){
201 Instruction *User = cast<Instruction>(*U);
202 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
203 // Remember the basic blocks which define new values for the alloca
204 DefiningBlocks.push_back(SI->getParent());
205 } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
206 // Otherwise it must be a load instruction, keep track of variable reads
207 UsingBlocks.push_back(LI->getParent());
208 } else if (SelectInst *SI = dyn_cast<SelectInst>(User)) {
209 // Because of the restrictions we placed on Select instruction uses
210 // above things are very simple. Transform the PHI of addresses into a
211 // select of loaded values.
212 LoadInst *Load = cast<LoadInst>(SI->use_back());
213 std::string LoadName = Load->getName(); Load->setName("");
215 Value *TrueVal = new LoadInst(SI->getOperand(1),
216 SI->getOperand(1)->getName()+".val", SI);
217 Value *FalseVal = new LoadInst(SI->getOperand(2),
218 SI->getOperand(2)->getName()+".val", SI);
220 Value *NewSI = new SelectInst(SI->getOperand(0), TrueVal,
221 FalseVal, Load->getName(), SI);
222 Load->replaceAllUsesWith(NewSI);
223 Load->getParent()->getInstList().erase(Load);
224 SI->getParent()->getInstList().erase(SI);
226 // Restart our scan of uses...
227 DefiningBlocks.clear();
231 // Because of the restrictions we placed on PHI node uses above, the PHI
232 // node reads the block in any using predecessors. Transform the PHI of
233 // addresses into a PHI of loaded values.
234 PHINode *PN = cast<PHINode>(User);
235 assert(PN->hasOneUse() && "Cannot handle PHI Node with != 1 use!");
236 LoadInst *PNUser = cast<LoadInst>(PN->use_back());
237 std::string PNUserName = PNUser->getName(); PNUser->setName("");
239 // Create the new PHI node and insert load instructions as appropriate.
240 PHINode *NewPN = new PHINode(AI->getAllocatedType(), PNUserName, PN);
241 std::map<BasicBlock*, LoadInst*> NewLoads;
242 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
243 BasicBlock *Pred = PN->getIncomingBlock(i);
244 LoadInst *&NewLoad = NewLoads[Pred];
245 if (NewLoad == 0) // Insert the new load in the predecessor
246 NewLoad = new LoadInst(PN->getIncomingValue(i),
247 PN->getIncomingValue(i)->getName()+".val",
248 Pred->getTerminator());
249 NewPN->addIncoming(NewLoad, Pred);
252 // Remove the old load.
253 PNUser->replaceAllUsesWith(NewPN);
254 PNUser->getParent()->getInstList().erase(PNUser);
256 // Remove the old PHI node.
257 PN->getParent()->getInstList().erase(PN);
259 // Restart our scan of uses...
260 DefiningBlocks.clear();
265 if (OnlyUsedInOneBlock) {
267 OnlyBlock = User->getParent();
268 else if (OnlyBlock != User->getParent())
269 OnlyUsedInOneBlock = false;
273 // If the alloca is only read and written in one basic block, just perform a
274 // linear sweep over the block to eliminate it.
275 if (OnlyUsedInOneBlock) {
276 LocallyUsedAllocas[OnlyBlock].push_back(AI);
278 // Remove the alloca from the Allocas list, since it will be processed.
279 Allocas[AllocaNum] = Allocas.back();
285 // If we haven't computed a numbering for the BB's in the function, do so
287 BBNumbers.compute(F);
289 // Compute the locations where PhiNodes need to be inserted. Look at the
290 // dominance frontier of EACH basic-block we have a write in.
292 unsigned CurrentVersion = 0;
293 std::set<PHINode*> InsertedPHINodes;
294 std::vector<unsigned> DFBlocks;
295 while (!DefiningBlocks.empty()) {
296 BasicBlock *BB = DefiningBlocks.back();
297 DefiningBlocks.pop_back();
299 // Look up the DF for this write, add it to PhiNodes
300 DominanceFrontier::const_iterator it = DF.find(BB);
301 if (it != DF.end()) {
302 const DominanceFrontier::DomSetType &S = it->second;
304 // In theory we don't need the indirection through the DFBlocks vector.
305 // In practice, the order of calling QueuePhiNode would depend on the
306 // (unspecified) ordering of basic blocks in the dominance frontier,
307 // which would give PHI nodes non-determinstic subscripts. Fix this by
308 // processing blocks in order of the occurance in the function.
309 for (DominanceFrontier::DomSetType::iterator P = S.begin(),PE = S.end();
311 DFBlocks.push_back(BBNumbers.getNumber(*P));
313 // Sort by which the block ordering in the function.
314 std::sort(DFBlocks.begin(), DFBlocks.end());
316 for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i) {
317 BasicBlock *BB = BBNumbers.getBlock(DFBlocks[i]);
318 if (QueuePhiNode(BB, AllocaNum, CurrentVersion, InsertedPHINodes))
319 DefiningBlocks.push_back(BB);
325 // Now that we have inserted PHI nodes along the Iterated Dominance Frontier
326 // of the writes to the variable, scan through the reads of the variable,
327 // marking PHI nodes which are actually necessary as alive (by removing them
328 // from the InsertedPHINodes set). This is not perfect: there may PHI
329 // marked alive because of loads which are dominated by stores, but there
330 // will be no unmarked PHI nodes which are actually used.
332 for (unsigned i = 0, e = UsingBlocks.size(); i != e; ++i)
333 MarkDominatingPHILive(UsingBlocks[i], AllocaNum, InsertedPHINodes);
336 // If there are any PHI nodes which are now known to be dead, remove them!
337 for (std::set<PHINode*>::iterator I = InsertedPHINodes.begin(),
338 E = InsertedPHINodes.end(); I != E; ++I) {
340 std::vector<PHINode*> &BBPNs = NewPhiNodes[PN->getParent()];
341 BBPNs[AllocaNum] = 0;
343 // Check to see if we just removed the last inserted PHI node from this
344 // basic block. If so, remove the entry for the basic block.
345 bool HasOtherPHIs = false;
346 for (unsigned i = 0, e = BBPNs.size(); i != e; ++i)
352 NewPhiNodes.erase(PN->getParent());
354 PN->getParent()->getInstList().erase(PN);
357 // Keep the reverse mapping of the 'Allocas' array.
358 AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
361 // Process all allocas which are only used in a single basic block.
362 for (std::map<BasicBlock*, std::vector<AllocaInst*> >::iterator I =
363 LocallyUsedAllocas.begin(), E = LocallyUsedAllocas.end(); I != E; ++I){
364 const std::vector<AllocaInst*> &Allocas = I->second;
365 assert(!Allocas.empty() && "empty alloca list??");
367 // It's common for there to only be one alloca in the list. Handle it
369 if (Allocas.size() == 1)
370 PromoteLocallyUsedAlloca(I->first, Allocas[0]);
372 PromoteLocallyUsedAllocas(I->first, Allocas);
376 return; // All of the allocas must have been trivial!
378 // Set the incoming values for the basic block to be null values for all of
379 // the alloca's. We do this in case there is a load of a value that has not
380 // been stored yet. In this case, it will get this null value.
382 std::vector<Value *> Values(Allocas.size());
383 for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
384 Values[i] = Constant::getNullValue(Allocas[i]->getAllocatedType());
386 // Walks all basic blocks in the function performing the SSA rename algorithm
387 // and inserting the phi nodes we marked as necessary
389 RenamePass(F.begin(), 0, Values);
391 // The renamer uses the Visited set to avoid infinite loops. Clear it now.
394 // Remove the allocas themselves from the function...
395 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) {
396 Instruction *A = Allocas[i];
398 // If there are any uses of the alloca instructions left, they must be in
399 // sections of dead code that were not processed on the dominance frontier.
400 // Just delete the users now.
403 A->replaceAllUsesWith(Constant::getNullValue(A->getType()));
404 A->getParent()->getInstList().erase(A);
407 // At this point, the renamer has added entries to PHI nodes for all reachable
408 // code. Unfortunately, there may be blocks which are not reachable, which
409 // the renamer hasn't traversed. If this is the case, the PHI nodes may not
410 // have incoming values for all predecessors. Loop over all PHI nodes we have
411 // created, inserting null constants if they are missing any incoming values.
413 for (std::map<BasicBlock*, std::vector<PHINode *> >::iterator I =
414 NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E; ++I) {
416 std::vector<BasicBlock*> Preds(pred_begin(I->first), pred_end(I->first));
417 std::vector<PHINode*> &PNs = I->second;
418 assert(!PNs.empty() && "Empty PHI node list??");
420 // Only do work here if there the PHI nodes are missing incoming values. We
421 // know that all PHI nodes that were inserted in a block will have the same
422 // number of incoming values, so we can just check any PHI node.
424 for (unsigned i = 0; (FirstPHI = PNs[i]) == 0; ++i)
427 if (Preds.size() != FirstPHI->getNumIncomingValues()) {
428 // Ok, now we know that all of the PHI nodes are missing entries for some
429 // basic blocks. Start by sorting the incoming predecessors for efficient
431 std::sort(Preds.begin(), Preds.end());
433 // Now we loop through all BB's which have entries in FirstPHI and remove
434 // them from the Preds list.
435 for (unsigned i = 0, e = FirstPHI->getNumIncomingValues(); i != e; ++i) {
436 // Do a log(n) search of the Preds list for the entry we want.
437 std::vector<BasicBlock*>::iterator EntIt =
438 std::lower_bound(Preds.begin(), Preds.end(),
439 FirstPHI->getIncomingBlock(i));
440 assert(EntIt != Preds.end() && *EntIt == FirstPHI->getIncomingBlock(i)&&
441 "PHI node has entry for a block which is not a predecessor!");
447 // At this point, the blocks left in the preds list must have dummy
448 // entries inserted into every PHI nodes for the block.
449 for (unsigned i = 0, e = PNs.size(); i != e; ++i)
450 if (PHINode *PN = PNs[i]) {
451 Value *NullVal = Constant::getNullValue(PN->getType());
452 for (unsigned pred = 0, e = Preds.size(); pred != e; ++pred)
453 PN->addIncoming(NullVal, Preds[pred]);
459 // MarkDominatingPHILive - Mem2Reg wants to construct "pruned" SSA form, not
460 // "minimal" SSA form. To do this, it inserts all of the PHI nodes on the IDF
461 // as usual (inserting the PHI nodes in the DeadPHINodes set), then processes
462 // each read of the variable. For each block that reads the variable, this
463 // function is called, which removes used PHI nodes from the DeadPHINodes set.
464 // After all of the reads have been processed, any PHI nodes left in the
465 // DeadPHINodes set are removed.
467 void PromoteMem2Reg::MarkDominatingPHILive(BasicBlock *BB, unsigned AllocaNum,
468 std::set<PHINode*> &DeadPHINodes) {
469 // Scan the immediate dominators of this block looking for a block which has a
470 // PHI node for Alloca num. If we find it, mark the PHI node as being alive!
471 for (DominatorTree::Node *N = DT[BB]; N; N = N->getIDom()) {
472 BasicBlock *DomBB = N->getBlock();
473 std::map<BasicBlock*, std::vector<PHINode*> >::iterator
474 I = NewPhiNodes.find(DomBB);
475 if (I != NewPhiNodes.end() && I->second[AllocaNum]) {
476 // Ok, we found an inserted PHI node which dominates this value.
477 PHINode *DominatingPHI = I->second[AllocaNum];
479 // Find out if we previously thought it was dead.
480 std::set<PHINode*>::iterator DPNI = DeadPHINodes.find(DominatingPHI);
481 if (DPNI != DeadPHINodes.end()) {
482 // Ok, until now, we thought this PHI node was dead. Mark it as being
484 DeadPHINodes.erase(DPNI);
486 // Now that we have marked the PHI node alive, also mark any PHI nodes
487 // which it might use as being alive as well.
488 for (pred_iterator PI = pred_begin(DomBB), PE = pred_end(DomBB);
490 MarkDominatingPHILive(*PI, AllocaNum, DeadPHINodes);
496 /// PromoteLocallyUsedAlloca - Many allocas are only used within a single basic
497 /// block. If this is the case, avoid traversing the CFG and inserting a lot of
498 /// potentially useless PHI nodes by just performing a single linear pass over
499 /// the basic block using the Alloca.
501 void PromoteMem2Reg::PromoteLocallyUsedAlloca(BasicBlock *BB, AllocaInst *AI) {
502 assert(!AI->use_empty() && "There are no uses of the alloca!");
504 // Handle degenerate cases quickly.
505 if (AI->hasOneUse()) {
506 Instruction *U = cast<Instruction>(AI->use_back());
507 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
508 // Must be a load of uninitialized value.
509 LI->replaceAllUsesWith(Constant::getNullValue(AI->getAllocatedType()));
511 // Otherwise it must be a store which is never read.
512 assert(isa<StoreInst>(U));
514 BB->getInstList().erase(U);
516 // Uses of the uninitialized memory location shall get zero...
517 Value *CurVal = Constant::getNullValue(AI->getAllocatedType());
519 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
520 Instruction *Inst = I++;
521 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
522 if (LI->getOperand(0) == AI) {
523 // Loads just returns the "current value"...
524 LI->replaceAllUsesWith(CurVal);
525 BB->getInstList().erase(LI);
527 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
528 if (SI->getOperand(1) == AI) {
529 // Store updates the "current value"...
530 CurVal = SI->getOperand(0);
531 BB->getInstList().erase(SI);
537 // After traversing the basic block, there should be no more uses of the
538 // alloca, remove it now.
539 assert(AI->use_empty() && "Uses of alloca from more than one BB??");
540 AI->getParent()->getInstList().erase(AI);
543 /// PromoteLocallyUsedAllocas - This method is just like
544 /// PromoteLocallyUsedAlloca, except that it processes multiple alloca
545 /// instructions in parallel. This is important in cases where we have large
546 /// basic blocks, as we don't want to rescan the entire basic block for each
547 /// alloca which is locally used in it (which might be a lot).
548 void PromoteMem2Reg::
549 PromoteLocallyUsedAllocas(BasicBlock *BB, const std::vector<AllocaInst*> &AIs) {
550 std::map<AllocaInst*, Value*> CurValues;
551 for (unsigned i = 0, e = AIs.size(); i != e; ++i)
552 CurValues[AIs[i]] = 0; // Insert with null value
554 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
555 Instruction *Inst = I++;
556 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
557 // Is this a load of an alloca we are tracking?
558 if (AllocaInst *AI = dyn_cast<AllocaInst>(LI->getOperand(0))) {
559 std::map<AllocaInst*, Value*>::iterator AIt = CurValues.find(AI);
560 if (AIt != CurValues.end()) {
561 // Loads just returns the "current value"...
562 if (AIt->second == 0) // Uninitialized value??
563 AIt->second =Constant::getNullValue(AIt->first->getAllocatedType());
564 LI->replaceAllUsesWith(AIt->second);
565 BB->getInstList().erase(LI);
568 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
569 if (AllocaInst *AI = dyn_cast<AllocaInst>(SI->getOperand(1))) {
570 std::map<AllocaInst*, Value*>::iterator AIt = CurValues.find(AI);
571 if (AIt != CurValues.end()) {
572 // Store updates the "current value"...
573 AIt->second = SI->getOperand(0);
574 BB->getInstList().erase(SI);
583 // QueuePhiNode - queues a phi-node to be added to a basic-block for a specific
584 // Alloca returns true if there wasn't already a phi-node for that variable
586 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
588 std::set<PHINode*> &InsertedPHINodes) {
589 // Look up the basic-block in question
590 std::vector<PHINode*> &BBPNs = NewPhiNodes[BB];
591 if (BBPNs.empty()) BBPNs.resize(Allocas.size());
593 // If the BB already has a phi node added for the i'th alloca then we're done!
594 if (BBPNs[AllocaNo]) return false;
596 // Create a PhiNode using the dereferenced type... and add the phi-node to the
598 BBPNs[AllocaNo] = new PHINode(Allocas[AllocaNo]->getAllocatedType(),
599 Allocas[AllocaNo]->getName() + "." +
600 utostr(Version++), BB->begin());
601 InsertedPHINodes.insert(BBPNs[AllocaNo]);
606 // RenamePass - Recursively traverse the CFG of the function, renaming loads and
607 // stores to the allocas which we are promoting. IncomingVals indicates what
608 // value each Alloca contains on exit from the predecessor block Pred.
610 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
611 std::vector<Value*> &IncomingVals) {
613 // If this BB needs a PHI node, update the PHI node for each variable we need
615 std::map<BasicBlock*, std::vector<PHINode *> >::iterator
616 BBPNI = NewPhiNodes.find(BB);
617 if (BBPNI != NewPhiNodes.end()) {
618 std::vector<PHINode *> &BBPNs = BBPNI->second;
619 for (unsigned k = 0; k != BBPNs.size(); ++k)
620 if (PHINode *PN = BBPNs[k]) {
621 // Add this incoming value to the PHI node.
622 PN->addIncoming(IncomingVals[k], Pred);
624 // The currently active variable for this block is now the PHI.
625 IncomingVals[k] = PN;
629 // don't revisit nodes
630 if (Visited.count(BB)) return;
635 for (BasicBlock::iterator II = BB->begin(); !isa<TerminatorInst>(II); ) {
636 Instruction *I = II++; // get the instruction, increment iterator
638 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
639 if (AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand())) {
640 std::map<AllocaInst*, unsigned>::iterator AI = AllocaLookup.find(Src);
641 if (AI != AllocaLookup.end()) {
642 Value *V = IncomingVals[AI->second];
644 // walk the use list of this load and replace all uses with r
645 LI->replaceAllUsesWith(V);
646 BB->getInstList().erase(LI);
649 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
650 // Delete this instruction and mark the name as the current holder of the
652 if (AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand())) {
653 std::map<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
654 if (ai != AllocaLookup.end()) {
655 // what value were we writing?
656 IncomingVals[ai->second] = SI->getOperand(0);
657 BB->getInstList().erase(SI);
663 // Recurse to our successors.
664 TerminatorInst *TI = BB->getTerminator();
665 for (unsigned i = 0; i != TI->getNumSuccessors(); i++) {
666 std::vector<Value*> OutgoingVals(IncomingVals);
667 RenamePass(TI->getSuccessor(i), BB, OutgoingVals);
671 /// PromoteMemToReg - Promote the specified list of alloca instructions into
672 /// scalar registers, inserting PHI nodes as appropriate. This function makes
673 /// use of DominanceFrontier information. This function does not modify the CFG
674 /// of the function at all. All allocas must be from the same function.
676 void llvm::PromoteMemToReg(const std::vector<AllocaInst*> &Allocas,
677 DominatorTree &DT, DominanceFrontier &DF,
678 const TargetData &TD) {
679 // If there is nothing to do, bail out...
680 if (Allocas.empty()) return;
681 PromoteMem2Reg(Allocas, DT, DF, TD).run();