1 //===-- DwarfEHPrepare - Prepare exception handling for code generation ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass mulches exception handling code into a form adapted to code
11 // generation. Required if using dwarf exception handling.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "dwarfehprepare"
16 #include "llvm/Function.h"
17 #include "llvm/Instructions.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/Module.h"
20 #include "llvm/Pass.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/Dominators.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/Target/TargetLowering.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
27 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
30 STATISTIC(NumLandingPadsSplit, "Number of landing pads split");
31 STATISTIC(NumUnwindsLowered, "Number of unwind instructions lowered");
32 STATISTIC(NumExceptionValuesMoved, "Number of eh.exception calls moved");
33 STATISTIC(NumStackTempsIntroduced, "Number of stack temporaries introduced");
36 class DwarfEHPrepare : public FunctionPass {
37 const TargetMachine *TM;
38 const TargetLowering *TLI;
41 // The eh.exception intrinsic.
42 Function *ExceptionValueIntrinsic;
44 // The eh.selector intrinsic.
45 Function *SelectorIntrinsic;
47 // _Unwind_Resume_or_Rethrow call.
50 // The EH language-specific catch-all type.
51 GlobalVariable *EHCatchAllValue;
53 // _Unwind_Resume or the target equivalent.
54 Constant *RewindFunction;
56 // Dominator info is used when turning stack temporaries into registers.
58 DominanceFrontier *DF;
60 // The function we are running on.
63 // The landing pads for this function.
64 typedef SmallPtrSet<BasicBlock*, 8> BBSet;
67 // Stack temporary used to hold eh.exception values.
68 AllocaInst *ExceptionValueVar;
70 bool NormalizeLandingPads();
72 bool MoveExceptionValueCalls();
73 bool FinishStackTemporaries();
74 bool PromoteStackTemporaries();
76 Instruction *CreateExceptionValueCall(BasicBlock *BB);
77 Instruction *CreateValueLoad(BasicBlock *BB);
79 /// CreateReadOfExceptionValue - Return the result of the eh.exception
80 /// intrinsic by calling the intrinsic if in a landing pad, or loading it
81 /// from the exception value variable otherwise.
82 Instruction *CreateReadOfExceptionValue(BasicBlock *BB) {
83 return LandingPads.count(BB) ?
84 CreateExceptionValueCall(BB) : CreateValueLoad(BB);
87 /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still
88 /// use the ".llvm.eh.catch.all.value" call need to convert to using its
89 /// initializer instead.
90 bool CleanupSelectors();
92 bool IsACleanupSelector(IntrinsicInst *);
94 /// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
95 void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
97 /// FindAllURoRInvokes - Find all URoR invokes in the function.
98 void FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes);
100 /// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow"
101 /// calls. The "unwind" part of these invokes jump to a landing pad within
102 /// the current function. This is a candidate to merge the selector
103 /// associated with the URoR invoke with the one from the URoR's landing
105 bool HandleURoRInvokes();
107 /// FindSelectorAndURoR - Find the eh.selector call and URoR call associated
108 /// with the eh.exception call. This recursively looks past instructions
109 /// which don't change the EH pointer value, like casts or PHI nodes.
110 bool FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
111 SmallPtrSet<IntrinsicInst*, 8> &SelCalls);
113 /// DoMem2RegPromotion - Take an alloca call and promote it from memory to a
115 bool DoMem2RegPromotion(Value *V) {
116 AllocaInst *AI = dyn_cast<AllocaInst>(V);
117 if (!AI || !isAllocaPromotable(AI)) return false;
119 // Turn the alloca into a register.
120 std::vector<AllocaInst*> Allocas(1, AI);
121 PromoteMemToReg(Allocas, *DT, *DF);
125 /// PromoteStoreInst - Perform Mem2Reg on a StoreInst.
126 bool PromoteStoreInst(StoreInst *SI) {
127 if (!SI || !DT || !DF) return false;
128 if (DoMem2RegPromotion(SI->getOperand(1)))
133 /// PromoteEHPtrStore - Promote the storing of an EH pointer into a
134 /// register. This should get rid of the store and subsequent loads.
135 bool PromoteEHPtrStore(IntrinsicInst *II) {
136 if (!DT || !DF) return false;
138 bool Changed = false;
143 for (Value::use_iterator
144 I = II->use_begin(), E = II->use_end(); I != E; ++I) {
145 SI = dyn_cast<StoreInst>(I);
149 if (!PromoteStoreInst(SI))
159 static char ID; // Pass identification, replacement for typeid.
160 DwarfEHPrepare(const TargetMachine *tm, bool fast) :
161 FunctionPass(&ID), TM(tm), TLI(TM->getTargetLowering()),
163 ExceptionValueIntrinsic(0), SelectorIntrinsic(0),
164 URoR(0), EHCatchAllValue(0), RewindFunction(0) {}
166 virtual bool runOnFunction(Function &Fn);
168 // getAnalysisUsage - We need dominance frontiers for memory promotion.
169 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
171 AU.addRequired<DominatorTree>();
172 AU.addPreserved<DominatorTree>();
174 AU.addRequired<DominanceFrontier>();
175 AU.addPreserved<DominanceFrontier>();
178 const char *getPassName() const {
179 return "Exception handling preparation";
183 } // end anonymous namespace
185 char DwarfEHPrepare::ID = 0;
187 FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm, bool fast) {
188 return new DwarfEHPrepare(tm, fast);
191 /// IsACleanupSelector - Return true if the intrinsic instruction is a clean-up
192 /// selector instruction.
193 bool DwarfEHPrepare::IsACleanupSelector(IntrinsicInst *II) {
194 unsigned NumOps = II->getNumOperands();
195 bool IsCleanUp = (NumOps == 3);
200 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(3))) {
201 unsigned Val = CI->getZExtValue();
203 if (Val == 0 || Val + 3 == NumOps) {
204 // If the value is 0 or the selector has only filters in it, then it's
208 assert(Val + 3 < NumOps && "Ill-formed eh.selector!");
210 if (Val + 4 == NumOps) {
211 if (ConstantInt *FinalVal =
212 dyn_cast<ConstantInt>(II->getOperand(NumOps - 1)))
213 return FinalVal->isZero();
221 /// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
222 void DwarfEHPrepare::
223 FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
224 for (Value::use_iterator
225 I = SelectorIntrinsic->use_begin(),
226 E = SelectorIntrinsic->use_end(); I != E; ++I) {
227 IntrinsicInst *II = cast<IntrinsicInst>(I);
229 if (II->getParent()->getParent() != F)
232 if (IsACleanupSelector(II))
237 /// FindAllURoRInvokes - Find all URoR invokes in the function.
238 void DwarfEHPrepare::
239 FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes) {
240 for (Value::use_iterator
241 I = URoR->use_begin(),
242 E = URoR->use_end(); I != E; ++I) {
243 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
244 URoRInvokes.insert(II);
248 /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still use
249 /// the ".llvm.eh.catch.all.value" call need to convert to using its
250 /// initializer instead.
251 bool DwarfEHPrepare::CleanupSelectors() {
252 if (!EHCatchAllValue) return false;
254 if (!SelectorIntrinsic) {
256 Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
257 if (!SelectorIntrinsic) return false;
260 bool Changed = false;
261 for (Value::use_iterator
262 I = SelectorIntrinsic->use_begin(),
263 E = SelectorIntrinsic->use_end(); I != E; ++I) {
264 IntrinsicInst *Sel = dyn_cast<IntrinsicInst>(I);
265 if (!Sel || Sel->getParent()->getParent() != F) continue;
267 // Index of the ".llvm.eh.catch.all.value" variable.
268 unsigned OpIdx = Sel->getNumOperands() - 1;
269 GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getOperand(OpIdx));
270 if (GV != EHCatchAllValue) continue;
271 Sel->setOperand(OpIdx, EHCatchAllValue->getInitializer());
278 /// FindSelectorAndURoR - Find the eh.selector call associated with the
279 /// eh.exception call. And indicate if there is a URoR "invoke" associated with
280 /// the eh.exception call. This recursively looks past instructions which don't
281 /// change the EH pointer value, like casts or PHI nodes.
283 DwarfEHPrepare::FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
284 SmallPtrSet<IntrinsicInst*, 8> &SelCalls) {
285 SmallPtrSet<PHINode*, 32> SeenPHIs;
286 bool Changed = false;
289 for (Value::use_iterator
290 I = Inst->use_begin(), E = Inst->use_end(); I != E; ++I) {
291 Instruction *II = dyn_cast<Instruction>(I);
292 if (!II || II->getParent()->getParent() != F) continue;
294 if (IntrinsicInst *Sel = dyn_cast<IntrinsicInst>(II)) {
295 if (Sel->getIntrinsicID() == Intrinsic::eh_selector)
296 SelCalls.insert(Sel);
297 } else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(II)) {
298 if (Invoke->getCalledFunction() == URoR)
300 } else if (CastInst *CI = dyn_cast<CastInst>(II)) {
301 Changed |= FindSelectorAndURoR(CI, URoRInvoke, SelCalls);
302 } else if (StoreInst *SI = dyn_cast<StoreInst>(II)) {
303 if (!PromoteStoreInst(SI)) continue;
306 goto restart; // Uses may have changed, restart loop.
307 } else if (PHINode *PN = dyn_cast<PHINode>(II)) {
308 if (SeenPHIs.insert(PN))
309 // Don't process a PHI node more than once.
310 Changed |= FindSelectorAndURoR(PN, URoRInvoke, SelCalls);
317 /// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" calls. The
318 /// "unwind" part of these invokes jump to a landing pad within the current
319 /// function. This is a candidate to merge the selector associated with the URoR
320 /// invoke with the one from the URoR's landing pad.
321 bool DwarfEHPrepare::HandleURoRInvokes() {
322 if (!DT) return CleanupSelectors(); // We require DominatorTree information.
324 if (!EHCatchAllValue) {
326 F->getParent()->getNamedGlobal(".llvm.eh.catch.all.value");
327 if (!EHCatchAllValue) return false;
330 if (!SelectorIntrinsic) {
332 Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
333 if (!SelectorIntrinsic) return false;
337 URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
338 if (!URoR) return CleanupSelectors();
341 SmallPtrSet<IntrinsicInst*, 32> Sels;
342 SmallPtrSet<InvokeInst*, 32> URoRInvokes;
343 FindAllCleanupSelectors(Sels);
344 FindAllURoRInvokes(URoRInvokes);
346 SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;
348 for (SmallPtrSet<IntrinsicInst*, 32>::iterator
349 SI = Sels.begin(), SE = Sels.end(); SI != SE; ++SI) {
350 const BasicBlock *SelBB = (*SI)->getParent();
351 for (SmallPtrSet<InvokeInst*, 32>::iterator
352 UI = URoRInvokes.begin(), UE = URoRInvokes.end(); UI != UE; ++UI) {
353 const BasicBlock *URoRBB = (*UI)->getParent();
354 if (SelBB == URoRBB || DT->dominates(SelBB, URoRBB)) {
355 SelsToConvert.insert(*SI);
361 bool Changed = false;
363 if (Sels.size() != SelsToConvert.size()) {
364 // If we haven't been able to convert all of the clean-up selectors, then
365 // loop through the slow way to see if they still need to be converted.
366 if (!ExceptionValueIntrinsic) {
367 ExceptionValueIntrinsic =
368 Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
369 if (!ExceptionValueIntrinsic) return CleanupSelectors();
372 for (Value::use_iterator
373 I = ExceptionValueIntrinsic->use_begin(),
374 E = ExceptionValueIntrinsic->use_end(); I != E; ++I) {
375 IntrinsicInst *EHPtr = dyn_cast<IntrinsicInst>(I);
376 if (!EHPtr || EHPtr->getParent()->getParent() != F) continue;
378 Changed |= PromoteEHPtrStore(EHPtr);
380 bool URoRInvoke = false;
381 SmallPtrSet<IntrinsicInst*, 8> SelCalls;
382 Changed |= FindSelectorAndURoR(EHPtr, URoRInvoke, SelCalls);
385 // This EH pointer is being used by an invoke of an URoR instruction and
386 // an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
387 // need to convert it to a 'catch-all'.
388 for (SmallPtrSet<IntrinsicInst*, 8>::iterator
389 SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
390 if (IsACleanupSelector(*SI))
391 SelsToConvert.insert(*SI);
396 if (!SelsToConvert.empty()) {
397 // Convert all clean-up eh.selectors, which are associated with "invokes" of
398 // URoR calls, into catch-all eh.selectors.
401 for (SmallPtrSet<IntrinsicInst*, 8>::iterator
402 SI = SelsToConvert.begin(), SE = SelsToConvert.end();
404 IntrinsicInst *II = *SI;
405 SmallVector<Value*, 8> Args;
407 // Use the exception object pointer and the personality function
408 // from the original selector.
409 Args.push_back(II->getOperand(1)); // Exception object pointer.
410 Args.push_back(II->getOperand(2)); // Personality function.
413 unsigned E = II->getNumOperands() -
414 (isa<ConstantInt>(II->getOperand(II->getNumOperands() - 1)) ? 1 : 0);
416 // Add in any filter IDs.
418 Args.push_back(II->getOperand(I));
420 Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.
422 CallInst *NewSelector =
423 CallInst::Create(SelectorIntrinsic, Args.begin(), Args.end(),
424 "eh.sel.catch.all", II);
426 NewSelector->setTailCall(II->isTailCall());
427 NewSelector->setAttributes(II->getAttributes());
428 NewSelector->setCallingConv(II->getCallingConv());
430 II->replaceAllUsesWith(NewSelector);
431 II->eraseFromParent();
435 Changed |= CleanupSelectors();
439 /// NormalizeLandingPads - Normalize and discover landing pads, noting them
440 /// in the LandingPads set. A landing pad is normal if the only CFG edges
441 /// that end at it are unwind edges from invoke instructions. If we inlined
442 /// through an invoke we could have a normal branch from the previous
443 /// unwind block through to the landing pad for the original invoke.
444 /// Abnormal landing pads are fixed up by redirecting all unwind edges to
445 /// a new basic block which falls through to the original.
446 bool DwarfEHPrepare::NormalizeLandingPads() {
447 bool Changed = false;
449 const MCAsmInfo *MAI = TM->getMCAsmInfo();
450 bool usingSjLjEH = MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
452 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
453 TerminatorInst *TI = I->getTerminator();
454 if (!isa<InvokeInst>(TI))
456 BasicBlock *LPad = TI->getSuccessor(1);
457 // Skip landing pads that have already been normalized.
458 if (LandingPads.count(LPad))
461 // Check that only invoke unwind edges end at the landing pad.
462 bool OnlyUnwoundTo = true;
463 bool SwitchOK = usingSjLjEH;
464 for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad);
466 TerminatorInst *PT = (*PI)->getTerminator();
467 // The SjLj dispatch block uses a switch instruction. This is effectively
468 // an unwind edge, so we can disregard it here. There will only ever
469 // be one dispatch, however, so if there are multiple switches, one
470 // of them truly is a normal edge, not an unwind edge.
471 if (SwitchOK && isa<SwitchInst>(PT)) {
475 if (!isa<InvokeInst>(PT) || LPad == PT->getSuccessor(0)) {
476 OnlyUnwoundTo = false;
482 // Only unwind edges lead to the landing pad. Remember the landing pad.
483 LandingPads.insert(LPad);
487 // At least one normal edge ends at the landing pad. Redirect the unwind
488 // edges to a new basic block which falls through into this one.
490 // Create the new basic block.
491 BasicBlock *NewBB = BasicBlock::Create(F->getContext(),
492 LPad->getName() + "_unwind_edge");
494 // Insert it into the function right before the original landing pad.
495 LPad->getParent()->getBasicBlockList().insert(LPad, NewBB);
497 // Redirect unwind edges from the original landing pad to NewBB.
498 for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ) {
499 TerminatorInst *PT = (*PI++)->getTerminator();
500 if (isa<InvokeInst>(PT) && PT->getSuccessor(1) == LPad)
501 // Unwind to the new block.
502 PT->setSuccessor(1, NewBB);
505 // If there are any PHI nodes in LPad, we need to update them so that they
506 // merge incoming values from NewBB instead.
507 for (BasicBlock::iterator II = LPad->begin(); isa<PHINode>(II); ++II) {
508 PHINode *PN = cast<PHINode>(II);
509 pred_iterator PB = pred_begin(NewBB), PE = pred_end(NewBB);
511 // Check to see if all of the values coming in via unwind edges are the
512 // same. If so, we don't need to create a new PHI node.
513 Value *InVal = PN->getIncomingValueForBlock(*PB);
514 for (pred_iterator PI = PB; PI != PE; ++PI) {
515 if (PI != PB && InVal != PN->getIncomingValueForBlock(*PI)) {
522 // Different unwind edges have different values. Create a new PHI node
524 PHINode *NewPN = PHINode::Create(PN->getType(), PN->getName()+".unwind",
526 // Add an entry for each unwind edge, using the value from the old PHI.
527 for (pred_iterator PI = PB; PI != PE; ++PI)
528 NewPN->addIncoming(PN->getIncomingValueForBlock(*PI), *PI);
530 // Now use this new PHI as the common incoming value for NewBB in PN.
534 // Revector exactly one entry in the PHI node to come from NewBB
535 // and delete all other entries that come from unwind edges. If
536 // there are both normal and unwind edges from the same predecessor,
537 // this leaves an entry for the normal edge.
538 for (pred_iterator PI = PB; PI != PE; ++PI)
539 PN->removeIncomingValue(*PI);
540 PN->addIncoming(InVal, NewBB);
543 // Add a fallthrough from NewBB to the original landing pad.
544 BranchInst::Create(LPad, NewBB);
546 // Now update DominatorTree and DominanceFrontier analysis information.
548 DT->splitBlock(NewBB);
550 DF->splitBlock(NewBB);
552 // Remember the newly constructed landing pad. The original landing pad
553 // LPad is no longer a landing pad now that all unwind edges have been
554 // revectored to NewBB.
555 LandingPads.insert(NewBB);
556 ++NumLandingPadsSplit;
563 /// LowerUnwinds - Turn unwind instructions into calls to _Unwind_Resume,
564 /// rethrowing any previously caught exception. This will crash horribly
565 /// at runtime if there is no such exception: using unwind to throw a new
566 /// exception is currently not supported.
567 bool DwarfEHPrepare::LowerUnwinds() {
568 SmallVector<TerminatorInst*, 16> UnwindInsts;
570 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
571 TerminatorInst *TI = I->getTerminator();
572 if (isa<UnwindInst>(TI))
573 UnwindInsts.push_back(TI);
576 if (UnwindInsts.empty()) return false;
578 // Find the rewind function if we didn't already.
579 if (!RewindFunction) {
580 LLVMContext &Ctx = UnwindInsts[0]->getContext();
581 std::vector<const Type*>
582 Params(1, Type::getInt8PtrTy(Ctx));
583 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
585 const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
586 RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
589 bool Changed = false;
591 for (SmallVectorImpl<TerminatorInst*>::iterator
592 I = UnwindInsts.begin(), E = UnwindInsts.end(); I != E; ++I) {
593 TerminatorInst *TI = *I;
595 // Replace the unwind instruction with a call to _Unwind_Resume (or the
596 // appropriate target equivalent) followed by an UnreachableInst.
598 // Create the call...
599 CallInst *CI = CallInst::Create(RewindFunction,
600 CreateReadOfExceptionValue(TI->getParent()),
602 CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
603 // ...followed by an UnreachableInst.
604 new UnreachableInst(TI->getContext(), TI);
606 // Nuke the unwind instruction.
607 TI->eraseFromParent();
615 /// MoveExceptionValueCalls - Ensure that eh.exception is only ever called from
616 /// landing pads by replacing calls outside of landing pads with loads from a
617 /// stack temporary. Move eh.exception calls inside landing pads to the start
618 /// of the landing pad (optional, but may make things simpler for later passes).
619 bool DwarfEHPrepare::MoveExceptionValueCalls() {
620 // If the eh.exception intrinsic is not declared in the module then there is
621 // nothing to do. Speed up compilation by checking for this common case.
622 if (!ExceptionValueIntrinsic &&
623 !F->getParent()->getFunction(Intrinsic::getName(Intrinsic::eh_exception)))
626 bool Changed = false;
628 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
629 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
630 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
631 if (CI->getIntrinsicID() == Intrinsic::eh_exception) {
632 if (!CI->use_empty()) {
633 Value *ExceptionValue = CreateReadOfExceptionValue(BB);
634 if (CI == ExceptionValue) {
635 // The call was at the start of a landing pad - leave it alone.
636 assert(LandingPads.count(BB) &&
637 "Created eh.exception call outside landing pad!");
640 CI->replaceAllUsesWith(ExceptionValue);
642 CI->eraseFromParent();
643 ++NumExceptionValuesMoved;
651 /// FinishStackTemporaries - If we introduced a stack variable to hold the
652 /// exception value then initialize it in each landing pad.
653 bool DwarfEHPrepare::FinishStackTemporaries() {
654 if (!ExceptionValueVar)
658 bool Changed = false;
660 // Make sure that there is a store of the exception value at the start of
662 for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
664 Instruction *ExceptionValue = CreateReadOfExceptionValue(*LI);
665 Instruction *Store = new StoreInst(ExceptionValue, ExceptionValueVar);
666 Store->insertAfter(ExceptionValue);
673 /// PromoteStackTemporaries - Turn any stack temporaries we introduced into
674 /// registers if possible.
675 bool DwarfEHPrepare::PromoteStackTemporaries() {
676 if (ExceptionValueVar && DT && DF && isAllocaPromotable(ExceptionValueVar)) {
677 // Turn the exception temporary into registers and phi nodes if possible.
678 std::vector<AllocaInst*> Allocas(1, ExceptionValueVar);
679 PromoteMemToReg(Allocas, *DT, *DF);
685 /// CreateExceptionValueCall - Insert a call to the eh.exception intrinsic at
686 /// the start of the basic block (unless there already is one, in which case
687 /// the existing call is returned).
688 Instruction *DwarfEHPrepare::CreateExceptionValueCall(BasicBlock *BB) {
689 Instruction *Start = BB->getFirstNonPHIOrDbg();
690 // Is this a call to eh.exception?
691 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Start))
692 if (CI->getIntrinsicID() == Intrinsic::eh_exception)
693 // Reuse the existing call.
696 // Find the eh.exception intrinsic if we didn't already.
697 if (!ExceptionValueIntrinsic)
698 ExceptionValueIntrinsic = Intrinsic::getDeclaration(F->getParent(),
699 Intrinsic::eh_exception);
702 return CallInst::Create(ExceptionValueIntrinsic, "eh.value.call", Start);
705 /// CreateValueLoad - Insert a load of the exception value stack variable
706 /// (creating it if necessary) at the start of the basic block (unless
707 /// there already is a load, in which case the existing load is returned).
708 Instruction *DwarfEHPrepare::CreateValueLoad(BasicBlock *BB) {
709 Instruction *Start = BB->getFirstNonPHIOrDbg();
710 // Is this a load of the exception temporary?
711 if (ExceptionValueVar)
712 if (LoadInst* LI = dyn_cast<LoadInst>(Start))
713 if (LI->getPointerOperand() == ExceptionValueVar)
714 // Reuse the existing load.
717 // Create the temporary if we didn't already.
718 if (!ExceptionValueVar) {
719 ExceptionValueVar = new AllocaInst(PointerType::getUnqual(
720 Type::getInt8Ty(BB->getContext())), "eh.value", F->begin()->begin());
721 ++NumStackTempsIntroduced;
725 return new LoadInst(ExceptionValueVar, "eh.value.load", Start);
728 bool DwarfEHPrepare::runOnFunction(Function &Fn) {
729 bool Changed = false;
731 // Initialize internal state.
732 DT = getAnalysisIfAvailable<DominatorTree>();
733 DF = getAnalysisIfAvailable<DominanceFrontier>();
734 ExceptionValueVar = 0;
737 // Ensure that only unwind edges end at landing pads (a landing pad is a
738 // basic block where an invoke unwind edge ends).
739 Changed |= NormalizeLandingPads();
741 // Turn unwind instructions into libcalls.
742 Changed |= LowerUnwinds();
744 // TODO: Move eh.selector calls to landing pads and combine them.
746 // Move eh.exception calls to landing pads.
747 Changed |= MoveExceptionValueCalls();
749 // Initialize any stack temporaries we introduced.
750 Changed |= FinishStackTemporaries();
752 // Turn any stack temporaries into registers if possible.
754 Changed |= PromoteStackTemporaries();
756 Changed |= HandleURoRInvokes();