1 //===- SjLjEHPrepare.cpp - Eliminate Invoke & Unwind instructions ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation is designed for use by code generators which use SjLj
11 // based exception handling.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sjljehprepare"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/LLVMContext.h"
22 #include "llvm/Module.h"
23 #include "llvm/Pass.h"
24 #include "llvm/Analysis/Verifier.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/IRBuilder.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/ADT/DenseMap.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
42 STATISTIC(NumInvokes, "Number of invokes replaced");
43 STATISTIC(NumSpilled, "Number of registers live across unwind edges");
46 class SjLjEHPrepare : public FunctionPass {
47 const TargetLowering *TLI;
49 Type *FunctionContextTy;
51 Constant *UnregisterFn;
52 Constant *BuiltinSetjmpFn;
53 Constant *FrameAddrFn;
54 Constant *StackAddrFn;
55 Constant *StackRestoreFn;
62 static char ID; // Pass identification, replacement for typeid
63 explicit SjLjEHPrepare(const TargetLowering *tli = NULL)
64 : FunctionPass(ID), TLI(tli) {}
65 bool doInitialization(Module &M);
66 bool runOnFunction(Function &F);
68 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
69 FunctionPass::getAnalysisUsage(AU);
71 const char *getPassName() const {
72 return "SJLJ Exception Handling preparation";
76 bool setupEntryBlockAndCallSites(Function &F);
77 void substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
79 Value *setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads);
80 void lowerIncomingArguments(Function &F);
81 void lowerAcrossUnwindEdges(Function &F, ArrayRef<InvokeInst*> Invokes);
82 void insertCallSiteStore(Instruction *I, int Number);
84 } // end anonymous namespace
86 char SjLjEHPrepare::ID = 0;
88 // Public Interface To the SjLjEHPrepare pass.
89 FunctionPass *llvm::createSjLjEHPreparePass(const TargetLowering *TLI) {
90 return new SjLjEHPrepare(TLI);
92 // doInitialization - Set up decalarations and types needed to process
94 bool SjLjEHPrepare::doInitialization(Module &M) {
95 // Build the function context structure.
96 // builtin_setjmp uses a five word jbuf
97 Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
98 Type *Int32Ty = Type::getInt32Ty(M.getContext());
100 StructType::get(VoidPtrTy, // __prev
101 Int32Ty, // call_site
102 ArrayType::get(Int32Ty, 4), // __data
103 VoidPtrTy, // __personality
105 ArrayType::get(VoidPtrTy, 5), // __jbuf
107 RegisterFn = M.getOrInsertFunction("_Unwind_SjLj_Register",
108 Type::getVoidTy(M.getContext()),
109 PointerType::getUnqual(FunctionContextTy),
112 M.getOrInsertFunction("_Unwind_SjLj_Unregister",
113 Type::getVoidTy(M.getContext()),
114 PointerType::getUnqual(FunctionContextTy),
116 FrameAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::frameaddress);
117 StackAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
118 StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
119 BuiltinSetjmpFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_setjmp);
120 LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda);
121 CallSiteFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_callsite);
122 FuncCtxFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_functioncontext);
128 /// insertCallSiteStore - Insert a store of the call-site value to the
130 void SjLjEHPrepare::insertCallSiteStore(Instruction *I, int Number) {
131 IRBuilder<> Builder(I);
133 // Get a reference to the call_site field.
134 Type *Int32Ty = Type::getInt32Ty(I->getContext());
135 Value *Zero = ConstantInt::get(Int32Ty, 0);
136 Value *One = ConstantInt::get(Int32Ty, 1);
137 Value *Idxs[2] = { Zero, One };
138 Value *CallSite = Builder.CreateGEP(FuncCtx, Idxs, "call_site");
140 // Insert a store of the call-site number
141 ConstantInt *CallSiteNoC = ConstantInt::get(Type::getInt32Ty(I->getContext()),
143 Builder.CreateStore(CallSiteNoC, CallSite, true/*volatile*/);
146 /// markBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
147 /// we reach blocks we've already seen.
148 static void markBlocksLiveIn(BasicBlock *BB, Instruction *Inst,
149 SmallPtrSet<BasicBlock*, 64> &LiveBBs,
150 SmallPtrSet<BasicBlock*, 4> &InvokesCrossed,
152 if (!LiveBBs.insert(BB)) return; // Already been here.
153 if (BB == Inst->getParent()) {
158 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
159 BasicBlock *Pred = *PI;
160 if (BB->isLandingPad() && BB != Inst->getParent()) {
161 InvokesCrossed.insert(Pred);
164 markBlocksLiveIn(Pred, Inst, LiveBBs, InvokesCrossed, FoundDef);
168 /// substituteLPadValues - Substitute the values returned by the landingpad
169 /// instruction with those returned by the personality function.
170 void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
172 SmallVector<Value*, 8> UseWorkList(LPI->use_begin(), LPI->use_end());
173 while (!UseWorkList.empty()) {
174 Value *Val = UseWorkList.pop_back_val();
175 ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val);
177 if (EVI->getNumIndices() != 1) continue;
178 if (*EVI->idx_begin() == 0)
179 EVI->replaceAllUsesWith(ExnVal);
180 else if (*EVI->idx_begin() == 1)
181 EVI->replaceAllUsesWith(SelVal);
182 if (EVI->getNumUses() == 0)
183 EVI->eraseFromParent();
186 if (LPI->getNumUses() == 0) return;
188 // There are still some uses of LPI. Construct an aggregate with the exception
189 // values and replace the LPI with that aggregate.
190 Type *LPadType = LPI->getType();
191 Value *LPadVal = UndefValue::get(LPadType);
193 Builder(llvm::next(BasicBlock::iterator(cast<Instruction>(SelVal))));
194 LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
195 LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
197 LPI->replaceAllUsesWith(LPadVal);
200 /// setupFunctionContext - Allocate the function context on the stack and fill
201 /// it with all of the data that we know at this point.
202 Value *SjLjEHPrepare::
203 setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads) {
204 BasicBlock *EntryBB = F.begin();
206 // Create an alloca for the incoming jump buffer ptr and the new jump buffer
207 // that needs to be restored on all exits from the function. This is an alloca
208 // because the value needs to be added to the global context list.
210 TLI->getTargetData()->getPrefTypeAlignment(FunctionContextTy);
212 new AllocaInst(FunctionContextTy, 0, Align, "fn_context", EntryBB->begin());
214 // Fill in the function context structure.
215 Type *Int32Ty = Type::getInt32Ty(F.getContext());
216 Value *Zero = ConstantInt::get(Int32Ty, 0);
217 Value *One = ConstantInt::get(Int32Ty, 1);
218 Value *Two = ConstantInt::get(Int32Ty, 2);
219 Value *Three = ConstantInt::get(Int32Ty, 3);
220 Value *Four = ConstantInt::get(Int32Ty, 4);
222 Value *Idxs[2] = { Zero, 0 };
224 for (unsigned I = 0, E = LPads.size(); I != E; ++I) {
225 LandingPadInst *LPI = LPads[I];
226 IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
228 // Reference the __data field.
230 Value *FCData = Builder.CreateGEP(FuncCtx, Idxs, "__data");
232 // The exception values come back in context->__data[0].
234 Value *ExceptionAddr = Builder.CreateGEP(FCData, Idxs, "exception_gep");
235 Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val");
236 ExnVal = Builder.CreateIntToPtr(ExnVal, Type::getInt8PtrTy(F.getContext()));
239 Value *SelectorAddr = Builder.CreateGEP(FCData, Idxs, "exn_selector_gep");
240 Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
242 substituteLPadValues(LPI, ExnVal, SelVal);
245 // Personality function
248 PersonalityFn = LPads[0]->getPersonalityFn();
249 Value *PersonalityFieldPtr =
250 GetElementPtrInst::Create(FuncCtx, Idxs, "pers_fn_gep",
251 EntryBB->getTerminator());
252 new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
253 EntryBB->getTerminator());
256 Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
257 EntryBB->getTerminator());
259 Value *LSDAFieldPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "lsda_gep",
260 EntryBB->getTerminator());
261 new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());
266 /// lowerIncomingArguments - To avoid having to handle incoming arguments
267 /// specially, we lower each arg to a copy instruction in the entry block. This
268 /// ensures that the argument value itself cannot be live out of the entry
270 void SjLjEHPrepare::lowerIncomingArguments(Function &F) {
271 BasicBlock::iterator AfterAllocaInsPt = F.begin()->begin();
272 while (isa<AllocaInst>(AfterAllocaInsPt) &&
273 isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsPt)->getArraySize()))
276 for (Function::arg_iterator
277 AI = F.arg_begin(), AE = F.arg_end(); AI != AE; ++AI) {
278 Type *Ty = AI->getType();
280 // Aggregate types can't be cast, but are legal argument types, so we have
281 // to handle them differently. We use an extract/insert pair as a
282 // lightweight method to achieve the same goal.
283 if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
284 Instruction *EI = ExtractValueInst::Create(AI, 0, "", AfterAllocaInsPt);
285 Instruction *NI = InsertValueInst::Create(AI, EI, 0);
287 AI->replaceAllUsesWith(NI);
289 // Set the operand of the instructions back to the AllocaInst.
290 EI->setOperand(0, AI);
291 NI->setOperand(0, AI);
293 // This is always a no-op cast because we're casting AI to AI->getType()
294 // so src and destination types are identical. BitCast is the only
297 new BitCastInst(AI, AI->getType(), AI->getName() + ".tmp",
299 AI->replaceAllUsesWith(NC);
301 // Set the operand of the cast instruction back to the AllocaInst.
302 // Normally it's forbidden to replace a CastInst's operand because it
303 // could cause the opcode to reflect an illegal conversion. However, we're
304 // replacing it here with the same value it was constructed with. We do
305 // this because the above replaceAllUsesWith() clobbered the operand, but
306 // we want this one to remain.
307 NC->setOperand(0, AI);
312 /// lowerAcrossUnwindEdges - Find all variables which are alive across an unwind
313 /// edge and spill them.
314 void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
315 ArrayRef<InvokeInst*> Invokes) {
316 SmallVector<std::pair<Instruction*, Instruction*>, 32> ReloadUsers;
317 DenseMap<std::pair<Instruction*, Instruction*>, AllocaInst*> AllocaMap;
319 // Finally, scan the code looking for instructions with bad live ranges.
320 for (Function::iterator
321 BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
322 for (BasicBlock::iterator
323 II = BB->begin(), IIE = BB->end(); II != IIE; ++II) {
324 // Ignore obvious cases we don't have to handle. In particular, most
325 // instructions either have no uses or only have a single use inside the
326 // current block. Ignore them quickly.
327 Instruction *Inst = II;
328 if (Inst->use_empty()) continue;
329 if (Inst->hasOneUse() &&
330 cast<Instruction>(Inst->use_back())->getParent() == BB &&
331 !isa<PHINode>(Inst->use_back())) continue;
333 // If this is an alloca in the entry block, it's not a real register
335 if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
336 if (isa<ConstantInt>(AI->getArraySize()) && BB == F.begin())
339 // Avoid iterator invalidation by copying users to a temporary vector.
340 SmallVector<Instruction*, 16> Users;
341 for (Value::use_iterator
342 UI = Inst->use_begin(), E = Inst->use_end(); UI != E; ++UI) {
343 Instruction *User = cast<Instruction>(*UI);
344 if (User->getParent() != BB || isa<PHINode>(User))
345 Users.push_back(User);
348 // Find all of the blocks that this value is live in.
349 std::map<Instruction*, SmallPtrSet<BasicBlock*, 4> > InvokesCrossed;
350 std::map<Instruction*, SmallPtrSet<BasicBlock*, 64> > LiveBBs;
351 bool FoundDef = false;
352 while (!Users.empty()) {
353 Instruction *U = Users.pop_back_val();
355 if (PHINode *PN = dyn_cast<PHINode>(U)) {
356 // Uses for a PHI node occur in their predecessor block.
357 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
358 if (PN->getIncomingValue(i) == Inst)
359 markBlocksLiveIn(PN->getIncomingBlock(i), Inst, LiveBBs[U],
360 InvokesCrossed[U], FoundDef);
362 markBlocksLiveIn(U->getParent(), Inst, LiveBBs[U],
363 InvokesCrossed[U], FoundDef);
367 // If we hit the definition, resort to the dump-this-value-everywhere
370 // Now that we know all of the blocks that this thing is live in, see if
371 // it includes any of the unwind locations.
372 bool NeedsSpill = false;
373 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
374 BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
375 if (UnwindBlock == BB) continue;
377 for (std::map<Instruction*, SmallPtrSet<BasicBlock*, 64> >::iterator
378 MI = LiveBBs.begin(), ME = LiveBBs.end(); MI != ME; ++MI) {
379 if (MI->second.count(UnwindBlock)) {
381 dbgs() << "SJLJ Spill: " << *Inst << " around "
382 << UnwindBlock->getName() << "\n";
389 // If we decided we need a spill, do it.
391 DemoteRegToStack(*Inst, true);
396 // We don't need this map anymore.
397 InvokesCrossed.clear();
400 // Go through the invokes the value crosses and insert a spill right
401 // before the invoke.
402 for (std::map<Instruction*, SmallPtrSet<BasicBlock*, 4> >::iterator
403 MI = InvokesCrossed.begin(), ME = InvokesCrossed.end();
405 Instruction *User = MI->first;
406 SmallPtrSet<BasicBlock*, 4> &Crossings = MI->second;
407 if (Crossings.empty()) continue;
409 ReloadUsers.push_back(std::make_pair(Inst, User));
411 AllocaInst *&Slot = AllocaMap[std::make_pair(Inst, User)];
413 Slot = new AllocaInst(Inst->getType(), 0,
414 Inst->getName() + ".reg2mem",
415 F.getEntryBlock().begin());
417 for (SmallPtrSet<BasicBlock*, 4>::iterator
418 CI = Crossings.begin(), CE = Crossings.end(); CI != CE; ++CI) {
419 new StoreInst(Inst, Slot, (*CI)->getTerminator());
426 // Now go through the instructions which were spilled and replace their uses
427 // after a crossed invoke with a reload instruction.
428 for (SmallVectorImpl<std::pair<Instruction*, Instruction*> >::iterator
429 I = ReloadUsers.begin(), E = ReloadUsers.end(); I != E; ++I) {
430 Instruction *User = I->second;
431 AllocaInst *Slot = AllocaMap[*I];
432 assert(Slot && "A spill slot hasn't been allocated yet!");
434 if (PHINode *PN = dyn_cast<PHINode>(User)) {
435 // If this is a PHI node, we can't insert a load of the value before the
436 // use. Instead insert the load in the predecessor block corresponding to
437 // the incoming value.
439 // Note that if there are multiple edges from a basic block to this PHI
440 // node that we cannot have multiple loads. The problem is that the
441 // resulting PHI node will have multiple values (from each load) coming in
442 // from the same block, which is illegal SSA form. For this reason, we
443 // keep track of and reuse loads we insert.
444 DenseMap<BasicBlock*, Value*> Loads;
445 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
446 if (PN->getIncomingValue(i) == I->first) {
447 Value *&V = Loads[PN->getIncomingBlock(i)];
449 // Insert the load into the predecessor block
450 V = new LoadInst(Slot, I->first->getName() + ".reload", true,
451 PN->getIncomingBlock(i)->getTerminator());
453 PN->setIncomingValue(i, V);
456 LoadInst *Reload = new LoadInst(Slot, Slot->getName() + ".reload", User);
457 User->replaceUsesOfWith(I->first, Reload);
461 // Go through the landing pads and remove any PHIs there.
462 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
463 BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
464 LandingPadInst *LPI = UnwindBlock->getLandingPadInst();
466 // Place PHIs into a set to avoid invalidating the iterator.
467 SmallPtrSet<PHINode*, 8> PHIsToDemote;
468 for (BasicBlock::iterator
469 PN = UnwindBlock->begin(); isa<PHINode>(PN); ++PN)
470 PHIsToDemote.insert(cast<PHINode>(PN));
471 if (PHIsToDemote.empty()) continue;
473 // Demote the PHIs to the stack.
474 for (SmallPtrSet<PHINode*, 8>::iterator
475 I = PHIsToDemote.begin(), E = PHIsToDemote.end(); I != E; ++I)
476 DemotePHIToStack(*I);
478 // Move the landingpad instruction back to the top of the landing pad block.
479 LPI->moveBefore(UnwindBlock->begin());
483 /// setupEntryBlockAndCallSites - Setup the entry block by creating and filling
484 /// the function context and marking the call sites with the appropriate
485 /// values. These values are used by the DWARF EH emitter.
486 bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
487 SmallVector<ReturnInst*, 16> Returns;
488 SmallVector<InvokeInst*, 16> Invokes;
489 SmallSetVector<LandingPadInst*, 16> LPads;
491 // Look through the terminators of the basic blocks to find invokes.
492 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
493 if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
494 Invokes.push_back(II);
495 LPads.insert(II->getUnwindDest()->getLandingPadInst());
496 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
497 Returns.push_back(RI);
500 if (Invokes.empty()) return false;
502 NumInvokes += Invokes.size();
504 lowerIncomingArguments(F);
505 lowerAcrossUnwindEdges(F, Invokes);
508 setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end()));
509 BasicBlock *EntryBB = F.begin();
510 Type *Int32Ty = Type::getInt32Ty(F.getContext());
513 ConstantInt::get(Int32Ty, 0), 0
516 // Get a reference to the jump buffer.
517 Idxs[1] = ConstantInt::get(Int32Ty, 5);
518 Value *JBufPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "jbuf_gep",
519 EntryBB->getTerminator());
521 // Save the frame pointer.
522 Idxs[1] = ConstantInt::get(Int32Ty, 0);
523 Value *FramePtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_fp_gep",
524 EntryBB->getTerminator());
526 Value *Val = CallInst::Create(FrameAddrFn,
527 ConstantInt::get(Int32Ty, 0),
529 EntryBB->getTerminator());
530 new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());
532 // Save the stack pointer.
533 Idxs[1] = ConstantInt::get(Int32Ty, 2);
534 Value *StackPtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_sp_gep",
535 EntryBB->getTerminator());
537 Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
538 new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());
540 // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
541 Value *SetjmpArg = CastInst::Create(Instruction::BitCast, JBufPtr,
542 Type::getInt8PtrTy(F.getContext()), "",
543 EntryBB->getTerminator());
544 CallInst::Create(BuiltinSetjmpFn, SetjmpArg, "", EntryBB->getTerminator());
546 // Store a pointer to the function context so that the back-end will know
547 // where to look for it.
548 Value *FuncCtxArg = CastInst::Create(Instruction::BitCast, FuncCtx,
549 Type::getInt8PtrTy(F.getContext()), "",
550 EntryBB->getTerminator());
551 CallInst::Create(FuncCtxFn, FuncCtxArg, "", EntryBB->getTerminator());
553 // At this point, we are all set up, update the invoke instructions to mark
554 // their call_site values.
555 for (unsigned I = 0, E = Invokes.size(); I != E; ++I) {
556 insertCallSiteStore(Invokes[I], I + 1);
558 ConstantInt *CallSiteNum =
559 ConstantInt::get(Type::getInt32Ty(F.getContext()), I + 1);
561 // Record the call site value for the back end so it stays associated with
563 CallInst::Create(CallSiteFn, CallSiteNum, "", Invokes[I]);
566 // Mark call instructions that aren't nounwind as no-action (call_site ==
567 // -1). Skip the entry block, as prior to then, no function context has been
568 // created for this function and any unexpected exceptions thrown will go
569 // directly to the caller's context, which is what we want anyway, so no need
570 // to do anything here.
571 for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;)
572 for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
573 if (CallInst *CI = dyn_cast<CallInst>(I)) {
574 if (!CI->doesNotThrow())
575 insertCallSiteStore(CI, -1);
576 } else if (ResumeInst *RI = dyn_cast<ResumeInst>(I)) {
577 insertCallSiteStore(RI, -1);
580 // Register the function context and make sure it's known to not throw
581 CallInst *Register = CallInst::Create(RegisterFn, FuncCtx, "",
582 EntryBB->getTerminator());
583 Register->setDoesNotThrow();
585 // Following any allocas not in the entry block, update the saved SP in the
586 // jmpbuf to the new value.
587 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
590 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
591 if (CallInst *CI = dyn_cast<CallInst>(I)) {
592 if (CI->getCalledFunction() != StackRestoreFn)
594 } else if (!isa<AllocaInst>(I)) {
597 Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
598 StackAddr->insertAfter(I);
599 Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
600 StoreStackAddr->insertAfter(StackAddr);
604 // Finally, for any returns from this function, if this function contains an
605 // invoke, add a call to unregister the function context.
606 for (unsigned I = 0, E = Returns.size(); I != E; ++I)
607 CallInst::Create(UnregisterFn, FuncCtx, "", Returns[I]);
612 bool SjLjEHPrepare::runOnFunction(Function &F) {
613 bool Res = setupEntryBlockAndCallSites(F);
615 if (verifyFunction(F))
616 report_fatal_error("verifyFunction failed!");