1 //===-- Instructions.cpp - Implement the LLVM instructions ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements all of the non-inline methods for the LLVM instruction
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Instructions.h"
16 #include "LLVMContextImpl.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DataLayout.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/Module.h"
22 #include "llvm/Operator.h"
23 #include "llvm/Support/CallSite.h"
24 #include "llvm/Support/ConstantRange.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/GetElementPtrTypeIterator.h"
27 #include "llvm/Support/MathExtras.h"
30 //===----------------------------------------------------------------------===//
32 //===----------------------------------------------------------------------===//
34 User::op_iterator CallSite::getCallee() const {
35 Instruction *II(getInstruction());
37 ? cast<CallInst>(II)->op_end() - 1 // Skip Callee
38 : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee
41 //===----------------------------------------------------------------------===//
42 // TerminatorInst Class
43 //===----------------------------------------------------------------------===//
45 // Out of line virtual method, so the vtable, etc has a home.
46 TerminatorInst::~TerminatorInst() {
49 //===----------------------------------------------------------------------===//
50 // UnaryInstruction Class
51 //===----------------------------------------------------------------------===//
53 // Out of line virtual method, so the vtable, etc has a home.
54 UnaryInstruction::~UnaryInstruction() {
57 //===----------------------------------------------------------------------===//
59 //===----------------------------------------------------------------------===//
61 /// areInvalidOperands - Return a string if the specified operands are invalid
62 /// for a select operation, otherwise return null.
63 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
64 if (Op1->getType() != Op2->getType())
65 return "both values to select must have same type";
67 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
69 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
70 return "vector select condition element type must be i1";
71 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
73 return "selected values for vector select must be vectors";
74 if (ET->getNumElements() != VT->getNumElements())
75 return "vector select requires selected vectors to have "
76 "the same vector length as select condition";
77 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
78 return "select condition must be i1 or <n x i1>";
84 //===----------------------------------------------------------------------===//
86 //===----------------------------------------------------------------------===//
88 PHINode::PHINode(const PHINode &PN)
89 : Instruction(PN.getType(), Instruction::PHI,
90 allocHungoffUses(PN.getNumOperands()), PN.getNumOperands()),
91 ReservedSpace(PN.getNumOperands()) {
92 std::copy(PN.op_begin(), PN.op_end(), op_begin());
93 std::copy(PN.block_begin(), PN.block_end(), block_begin());
94 SubclassOptionalData = PN.SubclassOptionalData;
101 Use *PHINode::allocHungoffUses(unsigned N) const {
102 // Allocate the array of Uses of the incoming values, followed by a pointer
103 // (with bottom bit set) to the User, followed by the array of pointers to
104 // the incoming basic blocks.
105 size_t size = N * sizeof(Use) + sizeof(Use::UserRef)
106 + N * sizeof(BasicBlock*);
107 Use *Begin = static_cast<Use*>(::operator new(size));
108 Use *End = Begin + N;
109 (void) new(End) Use::UserRef(const_cast<PHINode*>(this), 1);
110 return Use::initTags(Begin, End);
113 // removeIncomingValue - Remove an incoming value. This is useful if a
114 // predecessor basic block is deleted.
115 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
116 Value *Removed = getIncomingValue(Idx);
118 // Move everything after this operand down.
120 // FIXME: we could just swap with the end of the list, then erase. However,
121 // clients might not expect this to happen. The code as it is thrashes the
122 // use/def lists, which is kinda lame.
123 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
124 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
126 // Nuke the last value.
130 // If the PHI node is dead, because it has zero entries, nuke it now.
131 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
132 // If anyone is using this PHI, make them use a dummy value instead...
133 replaceAllUsesWith(UndefValue::get(getType()));
139 /// growOperands - grow operands - This grows the operand list in response
140 /// to a push_back style of operation. This grows the number of ops by 1.5
143 void PHINode::growOperands() {
144 unsigned e = getNumOperands();
145 unsigned NumOps = e + e / 2;
146 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
148 Use *OldOps = op_begin();
149 BasicBlock **OldBlocks = block_begin();
151 ReservedSpace = NumOps;
152 OperandList = allocHungoffUses(ReservedSpace);
154 std::copy(OldOps, OldOps + e, op_begin());
155 std::copy(OldBlocks, OldBlocks + e, block_begin());
157 Use::zap(OldOps, OldOps + e, true);
160 /// hasConstantValue - If the specified PHI node always merges together the same
161 /// value, return the value, otherwise return null.
162 Value *PHINode::hasConstantValue() const {
163 // Exploit the fact that phi nodes always have at least one entry.
164 Value *ConstantValue = getIncomingValue(0);
165 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
166 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
167 if (ConstantValue != this)
168 return 0; // Incoming values not all the same.
169 // The case where the first value is this PHI.
170 ConstantValue = getIncomingValue(i);
172 if (ConstantValue == this)
173 return UndefValue::get(getType());
174 return ConstantValue;
177 //===----------------------------------------------------------------------===//
178 // LandingPadInst Implementation
179 //===----------------------------------------------------------------------===//
181 LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
182 unsigned NumReservedValues, const Twine &NameStr,
183 Instruction *InsertBefore)
184 : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertBefore) {
185 init(PersonalityFn, 1 + NumReservedValues, NameStr);
188 LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
189 unsigned NumReservedValues, const Twine &NameStr,
190 BasicBlock *InsertAtEnd)
191 : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertAtEnd) {
192 init(PersonalityFn, 1 + NumReservedValues, NameStr);
195 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
196 : Instruction(LP.getType(), Instruction::LandingPad,
197 allocHungoffUses(LP.getNumOperands()), LP.getNumOperands()),
198 ReservedSpace(LP.getNumOperands()) {
199 Use *OL = OperandList, *InOL = LP.OperandList;
200 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
203 setCleanup(LP.isCleanup());
206 LandingPadInst::~LandingPadInst() {
210 LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn,
211 unsigned NumReservedClauses,
212 const Twine &NameStr,
213 Instruction *InsertBefore) {
214 return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr,
218 LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn,
219 unsigned NumReservedClauses,
220 const Twine &NameStr,
221 BasicBlock *InsertAtEnd) {
222 return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr,
226 void LandingPadInst::init(Value *PersFn, unsigned NumReservedValues,
227 const Twine &NameStr) {
228 ReservedSpace = NumReservedValues;
230 OperandList = allocHungoffUses(ReservedSpace);
231 OperandList[0] = PersFn;
236 /// growOperands - grow operands - This grows the operand list in response to a
237 /// push_back style of operation. This grows the number of ops by 2 times.
238 void LandingPadInst::growOperands(unsigned Size) {
239 unsigned e = getNumOperands();
240 if (ReservedSpace >= e + Size) return;
241 ReservedSpace = (e + Size / 2) * 2;
243 Use *NewOps = allocHungoffUses(ReservedSpace);
244 Use *OldOps = OperandList;
245 for (unsigned i = 0; i != e; ++i)
246 NewOps[i] = OldOps[i];
248 OperandList = NewOps;
249 Use::zap(OldOps, OldOps + e, true);
252 void LandingPadInst::addClause(Value *Val) {
253 unsigned OpNo = getNumOperands();
255 assert(OpNo < ReservedSpace && "Growing didn't work!");
257 OperandList[OpNo] = Val;
260 //===----------------------------------------------------------------------===//
261 // CallInst Implementation
262 //===----------------------------------------------------------------------===//
264 CallInst::~CallInst() {
267 void CallInst::init(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr) {
268 assert(NumOperands == Args.size() + 1 && "NumOperands not set up?");
273 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
275 assert((Args.size() == FTy->getNumParams() ||
276 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
277 "Calling a function with bad signature!");
279 for (unsigned i = 0; i != Args.size(); ++i)
280 assert((i >= FTy->getNumParams() ||
281 FTy->getParamType(i) == Args[i]->getType()) &&
282 "Calling a function with a bad signature!");
285 std::copy(Args.begin(), Args.end(), op_begin());
289 void CallInst::init(Value *Func, const Twine &NameStr) {
290 assert(NumOperands == 1 && "NumOperands not set up?");
295 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
297 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
303 CallInst::CallInst(Value *Func, const Twine &Name,
304 Instruction *InsertBefore)
305 : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
306 ->getElementType())->getReturnType(),
308 OperandTraits<CallInst>::op_end(this) - 1,
313 CallInst::CallInst(Value *Func, const Twine &Name,
314 BasicBlock *InsertAtEnd)
315 : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
316 ->getElementType())->getReturnType(),
318 OperandTraits<CallInst>::op_end(this) - 1,
323 CallInst::CallInst(const CallInst &CI)
324 : Instruction(CI.getType(), Instruction::Call,
325 OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(),
326 CI.getNumOperands()) {
327 setAttributes(CI.getAttributes());
328 setTailCall(CI.isTailCall());
329 setCallingConv(CI.getCallingConv());
331 std::copy(CI.op_begin(), CI.op_end(), op_begin());
332 SubclassOptionalData = CI.SubclassOptionalData;
335 void CallInst::addAttribute(unsigned i, Attributes attr) {
336 AttributeSet PAL = getAttributes();
337 PAL = PAL.addAttr(getContext(), i, attr);
341 void CallInst::removeAttribute(unsigned i, Attributes attr) {
342 AttributeSet PAL = getAttributes();
343 PAL = PAL.removeAttr(getContext(), i, attr);
347 bool CallInst::hasFnAttr(Attributes::AttrVal A) const {
348 if (AttributeList.getParamAttributes(AttributeSet::FunctionIndex)
351 if (const Function *F = getCalledFunction())
352 return F->getParamAttributes(AttributeSet::FunctionIndex).hasAttribute(A);
356 bool CallInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const {
357 if (AttributeList.getParamAttributes(i).hasAttribute(A))
359 if (const Function *F = getCalledFunction())
360 return F->getParamAttributes(i).hasAttribute(A);
364 /// IsConstantOne - Return true only if val is constant int 1
365 static bool IsConstantOne(Value *val) {
366 assert(val && "IsConstantOne does not work with NULL val");
367 return isa<ConstantInt>(val) && cast<ConstantInt>(val)->isOne();
370 static Instruction *createMalloc(Instruction *InsertBefore,
371 BasicBlock *InsertAtEnd, Type *IntPtrTy,
372 Type *AllocTy, Value *AllocSize,
373 Value *ArraySize, Function *MallocF,
375 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
376 "createMalloc needs either InsertBefore or InsertAtEnd");
378 // malloc(type) becomes:
379 // bitcast (i8* malloc(typeSize)) to type*
380 // malloc(type, arraySize) becomes:
381 // bitcast (i8 *malloc(typeSize*arraySize)) to type*
383 ArraySize = ConstantInt::get(IntPtrTy, 1);
384 else if (ArraySize->getType() != IntPtrTy) {
386 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
389 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
393 if (!IsConstantOne(ArraySize)) {
394 if (IsConstantOne(AllocSize)) {
395 AllocSize = ArraySize; // Operand * 1 = Operand
396 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
397 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
399 // Malloc arg is constant product of type size and array size
400 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
402 // Multiply type size by the array size...
404 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
405 "mallocsize", InsertBefore);
407 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
408 "mallocsize", InsertAtEnd);
412 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
413 // Create the call to Malloc.
414 BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
415 Module* M = BB->getParent()->getParent();
416 Type *BPTy = Type::getInt8PtrTy(BB->getContext());
417 Value *MallocFunc = MallocF;
419 // prototype malloc as "void *malloc(size_t)"
420 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, NULL);
421 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
422 CallInst *MCall = NULL;
423 Instruction *Result = NULL;
425 MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall", InsertBefore);
427 if (Result->getType() != AllocPtrType)
428 // Create a cast instruction to convert to the right type...
429 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
431 MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall");
433 if (Result->getType() != AllocPtrType) {
434 InsertAtEnd->getInstList().push_back(MCall);
435 // Create a cast instruction to convert to the right type...
436 Result = new BitCastInst(MCall, AllocPtrType, Name);
439 MCall->setTailCall();
440 if (Function *F = dyn_cast<Function>(MallocFunc)) {
441 MCall->setCallingConv(F->getCallingConv());
442 if (!F->doesNotAlias(0)) F->setDoesNotAlias(0);
444 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
449 /// CreateMalloc - Generate the IR for a call to malloc:
450 /// 1. Compute the malloc call's argument as the specified type's size,
451 /// possibly multiplied by the array size if the array size is not
453 /// 2. Call malloc with that argument.
454 /// 3. Bitcast the result of the malloc call to the specified type.
455 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
456 Type *IntPtrTy, Type *AllocTy,
457 Value *AllocSize, Value *ArraySize,
460 return createMalloc(InsertBefore, NULL, IntPtrTy, AllocTy, AllocSize,
461 ArraySize, MallocF, Name);
464 /// CreateMalloc - Generate the IR for a call to malloc:
465 /// 1. Compute the malloc call's argument as the specified type's size,
466 /// possibly multiplied by the array size if the array size is not
468 /// 2. Call malloc with that argument.
469 /// 3. Bitcast the result of the malloc call to the specified type.
470 /// Note: This function does not add the bitcast to the basic block, that is the
471 /// responsibility of the caller.
472 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
473 Type *IntPtrTy, Type *AllocTy,
474 Value *AllocSize, Value *ArraySize,
475 Function *MallocF, const Twine &Name) {
476 return createMalloc(NULL, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
477 ArraySize, MallocF, Name);
480 static Instruction* createFree(Value* Source, Instruction *InsertBefore,
481 BasicBlock *InsertAtEnd) {
482 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
483 "createFree needs either InsertBefore or InsertAtEnd");
484 assert(Source->getType()->isPointerTy() &&
485 "Can not free something of nonpointer type!");
487 BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
488 Module* M = BB->getParent()->getParent();
490 Type *VoidTy = Type::getVoidTy(M->getContext());
491 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
492 // prototype free as "void free(void*)"
493 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, NULL);
494 CallInst* Result = NULL;
495 Value *PtrCast = Source;
497 if (Source->getType() != IntPtrTy)
498 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
499 Result = CallInst::Create(FreeFunc, PtrCast, "", InsertBefore);
501 if (Source->getType() != IntPtrTy)
502 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
503 Result = CallInst::Create(FreeFunc, PtrCast, "");
505 Result->setTailCall();
506 if (Function *F = dyn_cast<Function>(FreeFunc))
507 Result->setCallingConv(F->getCallingConv());
512 /// CreateFree - Generate the IR for a call to the builtin free function.
513 Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
514 return createFree(Source, InsertBefore, NULL);
517 /// CreateFree - Generate the IR for a call to the builtin free function.
518 /// Note: This function does not add the call to the basic block, that is the
519 /// responsibility of the caller.
520 Instruction* CallInst::CreateFree(Value* Source, BasicBlock *InsertAtEnd) {
521 Instruction* FreeCall = createFree(Source, NULL, InsertAtEnd);
522 assert(FreeCall && "CreateFree did not create a CallInst");
526 //===----------------------------------------------------------------------===//
527 // InvokeInst Implementation
528 //===----------------------------------------------------------------------===//
530 void InvokeInst::init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException,
531 ArrayRef<Value *> Args, const Twine &NameStr) {
532 assert(NumOperands == 3 + Args.size() && "NumOperands not set up?");
535 Op<-1>() = IfException;
539 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType());
541 assert(((Args.size() == FTy->getNumParams()) ||
542 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
543 "Invoking a function with bad signature");
545 for (unsigned i = 0, e = Args.size(); i != e; i++)
546 assert((i >= FTy->getNumParams() ||
547 FTy->getParamType(i) == Args[i]->getType()) &&
548 "Invoking a function with a bad signature!");
551 std::copy(Args.begin(), Args.end(), op_begin());
555 InvokeInst::InvokeInst(const InvokeInst &II)
556 : TerminatorInst(II.getType(), Instruction::Invoke,
557 OperandTraits<InvokeInst>::op_end(this)
558 - II.getNumOperands(),
559 II.getNumOperands()) {
560 setAttributes(II.getAttributes());
561 setCallingConv(II.getCallingConv());
562 std::copy(II.op_begin(), II.op_end(), op_begin());
563 SubclassOptionalData = II.SubclassOptionalData;
566 BasicBlock *InvokeInst::getSuccessorV(unsigned idx) const {
567 return getSuccessor(idx);
569 unsigned InvokeInst::getNumSuccessorsV() const {
570 return getNumSuccessors();
572 void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) {
573 return setSuccessor(idx, B);
576 bool InvokeInst::hasFnAttr(Attributes::AttrVal A) const {
577 if (AttributeList.getParamAttributes(AttributeSet::FunctionIndex).
580 if (const Function *F = getCalledFunction())
581 return F->getParamAttributes(AttributeSet::FunctionIndex).hasAttribute(A);
585 bool InvokeInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const {
586 if (AttributeList.getParamAttributes(i).hasAttribute(A))
588 if (const Function *F = getCalledFunction())
589 return F->getParamAttributes(i).hasAttribute(A);
593 void InvokeInst::addAttribute(unsigned i, Attributes attr) {
594 AttributeSet PAL = getAttributes();
595 PAL = PAL.addAttr(getContext(), i, attr);
599 void InvokeInst::removeAttribute(unsigned i, Attributes attr) {
600 AttributeSet PAL = getAttributes();
601 PAL = PAL.removeAttr(getContext(), i, attr);
605 LandingPadInst *InvokeInst::getLandingPadInst() const {
606 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
609 //===----------------------------------------------------------------------===//
610 // ReturnInst Implementation
611 //===----------------------------------------------------------------------===//
613 ReturnInst::ReturnInst(const ReturnInst &RI)
614 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Ret,
615 OperandTraits<ReturnInst>::op_end(this) -
617 RI.getNumOperands()) {
618 if (RI.getNumOperands())
619 Op<0>() = RI.Op<0>();
620 SubclassOptionalData = RI.SubclassOptionalData;
623 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
624 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
625 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
630 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
631 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
632 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
637 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
638 : TerminatorInst(Type::getVoidTy(Context), Instruction::Ret,
639 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {
642 unsigned ReturnInst::getNumSuccessorsV() const {
643 return getNumSuccessors();
646 /// Out-of-line ReturnInst method, put here so the C++ compiler can choose to
647 /// emit the vtable for the class in this translation unit.
648 void ReturnInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
649 llvm_unreachable("ReturnInst has no successors!");
652 BasicBlock *ReturnInst::getSuccessorV(unsigned idx) const {
653 llvm_unreachable("ReturnInst has no successors!");
656 ReturnInst::~ReturnInst() {
659 //===----------------------------------------------------------------------===//
660 // ResumeInst Implementation
661 //===----------------------------------------------------------------------===//
663 ResumeInst::ResumeInst(const ResumeInst &RI)
664 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume,
665 OperandTraits<ResumeInst>::op_begin(this), 1) {
666 Op<0>() = RI.Op<0>();
669 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
670 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
671 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
675 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
676 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
677 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
681 unsigned ResumeInst::getNumSuccessorsV() const {
682 return getNumSuccessors();
685 void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
686 llvm_unreachable("ResumeInst has no successors!");
689 BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
690 llvm_unreachable("ResumeInst has no successors!");
693 //===----------------------------------------------------------------------===//
694 // UnreachableInst Implementation
695 //===----------------------------------------------------------------------===//
697 UnreachableInst::UnreachableInst(LLVMContext &Context,
698 Instruction *InsertBefore)
699 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
700 0, 0, InsertBefore) {
702 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
703 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
707 unsigned UnreachableInst::getNumSuccessorsV() const {
708 return getNumSuccessors();
711 void UnreachableInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
712 llvm_unreachable("UnreachableInst has no successors!");
715 BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const {
716 llvm_unreachable("UnreachableInst has no successors!");
719 //===----------------------------------------------------------------------===//
720 // BranchInst Implementation
721 //===----------------------------------------------------------------------===//
723 void BranchInst::AssertOK() {
725 assert(getCondition()->getType()->isIntegerTy(1) &&
726 "May only branch on boolean predicates!");
729 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
730 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
731 OperandTraits<BranchInst>::op_end(this) - 1,
733 assert(IfTrue != 0 && "Branch destination may not be null!");
736 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
737 Instruction *InsertBefore)
738 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
739 OperandTraits<BranchInst>::op_end(this) - 3,
749 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
750 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
751 OperandTraits<BranchInst>::op_end(this) - 1,
753 assert(IfTrue != 0 && "Branch destination may not be null!");
757 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
758 BasicBlock *InsertAtEnd)
759 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
760 OperandTraits<BranchInst>::op_end(this) - 3,
771 BranchInst::BranchInst(const BranchInst &BI) :
772 TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br,
773 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
774 BI.getNumOperands()) {
775 Op<-1>() = BI.Op<-1>();
776 if (BI.getNumOperands() != 1) {
777 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
778 Op<-3>() = BI.Op<-3>();
779 Op<-2>() = BI.Op<-2>();
781 SubclassOptionalData = BI.SubclassOptionalData;
784 void BranchInst::swapSuccessors() {
785 assert(isConditional() &&
786 "Cannot swap successors of an unconditional branch");
787 Op<-1>().swap(Op<-2>());
789 // Update profile metadata if present and it matches our structural
791 MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
792 if (!ProfileData || ProfileData->getNumOperands() != 3)
795 // The first operand is the name. Fetch them backwards and build a new one.
797 ProfileData->getOperand(0),
798 ProfileData->getOperand(2),
799 ProfileData->getOperand(1)
801 setMetadata(LLVMContext::MD_prof,
802 MDNode::get(ProfileData->getContext(), Ops));
805 BasicBlock *BranchInst::getSuccessorV(unsigned idx) const {
806 return getSuccessor(idx);
808 unsigned BranchInst::getNumSuccessorsV() const {
809 return getNumSuccessors();
811 void BranchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
812 setSuccessor(idx, B);
816 //===----------------------------------------------------------------------===//
817 // AllocaInst Implementation
818 //===----------------------------------------------------------------------===//
820 static Value *getAISize(LLVMContext &Context, Value *Amt) {
822 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
824 assert(!isa<BasicBlock>(Amt) &&
825 "Passed basic block into allocation size parameter! Use other ctor");
826 assert(Amt->getType()->isIntegerTy() &&
827 "Allocation array size is not an integer!");
832 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize,
833 const Twine &Name, Instruction *InsertBefore)
834 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
835 getAISize(Ty->getContext(), ArraySize), InsertBefore) {
837 assert(!Ty->isVoidTy() && "Cannot allocate void!");
841 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize,
842 const Twine &Name, BasicBlock *InsertAtEnd)
843 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
844 getAISize(Ty->getContext(), ArraySize), InsertAtEnd) {
846 assert(!Ty->isVoidTy() && "Cannot allocate void!");
850 AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
851 Instruction *InsertBefore)
852 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
853 getAISize(Ty->getContext(), 0), InsertBefore) {
855 assert(!Ty->isVoidTy() && "Cannot allocate void!");
859 AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
860 BasicBlock *InsertAtEnd)
861 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
862 getAISize(Ty->getContext(), 0), InsertAtEnd) {
864 assert(!Ty->isVoidTy() && "Cannot allocate void!");
868 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
869 const Twine &Name, Instruction *InsertBefore)
870 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
871 getAISize(Ty->getContext(), ArraySize), InsertBefore) {
873 assert(!Ty->isVoidTy() && "Cannot allocate void!");
877 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
878 const Twine &Name, BasicBlock *InsertAtEnd)
879 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
880 getAISize(Ty->getContext(), ArraySize), InsertAtEnd) {
882 assert(!Ty->isVoidTy() && "Cannot allocate void!");
886 // Out of line virtual method, so the vtable, etc has a home.
887 AllocaInst::~AllocaInst() {
890 void AllocaInst::setAlignment(unsigned Align) {
891 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
892 assert(Align <= MaximumAlignment &&
893 "Alignment is greater than MaximumAlignment!");
894 setInstructionSubclassData(Log2_32(Align) + 1);
895 assert(getAlignment() == Align && "Alignment representation error!");
898 bool AllocaInst::isArrayAllocation() const {
899 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
904 Type *AllocaInst::getAllocatedType() const {
905 return getType()->getElementType();
908 /// isStaticAlloca - Return true if this alloca is in the entry block of the
909 /// function and is a constant size. If so, the code generator will fold it
910 /// into the prolog/epilog code, so it is basically free.
911 bool AllocaInst::isStaticAlloca() const {
912 // Must be constant size.
913 if (!isa<ConstantInt>(getArraySize())) return false;
915 // Must be in the entry block.
916 const BasicBlock *Parent = getParent();
917 return Parent == &Parent->getParent()->front();
920 //===----------------------------------------------------------------------===//
921 // LoadInst Implementation
922 //===----------------------------------------------------------------------===//
924 void LoadInst::AssertOK() {
925 assert(getOperand(0)->getType()->isPointerTy() &&
926 "Ptr must have pointer type.");
927 assert(!(isAtomic() && getAlignment() == 0) &&
928 "Alignment required for atomic load");
931 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
932 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
933 Load, Ptr, InsertBef) {
936 setAtomic(NotAtomic);
941 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
942 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
943 Load, Ptr, InsertAE) {
946 setAtomic(NotAtomic);
951 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
952 Instruction *InsertBef)
953 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
954 Load, Ptr, InsertBef) {
955 setVolatile(isVolatile);
957 setAtomic(NotAtomic);
962 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
963 BasicBlock *InsertAE)
964 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
965 Load, Ptr, InsertAE) {
966 setVolatile(isVolatile);
968 setAtomic(NotAtomic);
973 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
974 unsigned Align, Instruction *InsertBef)
975 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
976 Load, Ptr, InsertBef) {
977 setVolatile(isVolatile);
979 setAtomic(NotAtomic);
984 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
985 unsigned Align, BasicBlock *InsertAE)
986 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
987 Load, Ptr, InsertAE) {
988 setVolatile(isVolatile);
990 setAtomic(NotAtomic);
995 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
996 unsigned Align, AtomicOrdering Order,
997 SynchronizationScope SynchScope,
998 Instruction *InsertBef)
999 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1000 Load, Ptr, InsertBef) {
1001 setVolatile(isVolatile);
1002 setAlignment(Align);
1003 setAtomic(Order, SynchScope);
1008 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
1009 unsigned Align, AtomicOrdering Order,
1010 SynchronizationScope SynchScope,
1011 BasicBlock *InsertAE)
1012 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1013 Load, Ptr, InsertAE) {
1014 setVolatile(isVolatile);
1015 setAlignment(Align);
1016 setAtomic(Order, SynchScope);
1021 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
1022 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1023 Load, Ptr, InsertBef) {
1026 setAtomic(NotAtomic);
1028 if (Name && Name[0]) setName(Name);
1031 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
1032 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1033 Load, Ptr, InsertAE) {
1036 setAtomic(NotAtomic);
1038 if (Name && Name[0]) setName(Name);
1041 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
1042 Instruction *InsertBef)
1043 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1044 Load, Ptr, InsertBef) {
1045 setVolatile(isVolatile);
1047 setAtomic(NotAtomic);
1049 if (Name && Name[0]) setName(Name);
1052 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
1053 BasicBlock *InsertAE)
1054 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1055 Load, Ptr, InsertAE) {
1056 setVolatile(isVolatile);
1058 setAtomic(NotAtomic);
1060 if (Name && Name[0]) setName(Name);
1063 void LoadInst::setAlignment(unsigned Align) {
1064 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1065 assert(Align <= MaximumAlignment &&
1066 "Alignment is greater than MaximumAlignment!");
1067 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1068 ((Log2_32(Align)+1)<<1));
1069 assert(getAlignment() == Align && "Alignment representation error!");
1072 //===----------------------------------------------------------------------===//
1073 // StoreInst Implementation
1074 //===----------------------------------------------------------------------===//
1076 void StoreInst::AssertOK() {
1077 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1078 assert(getOperand(1)->getType()->isPointerTy() &&
1079 "Ptr must have pointer type!");
1080 assert(getOperand(0)->getType() ==
1081 cast<PointerType>(getOperand(1)->getType())->getElementType()
1082 && "Ptr must be a pointer to Val type!");
1083 assert(!(isAtomic() && getAlignment() == 0) &&
1084 "Alignment required for atomic load");
1088 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1089 : Instruction(Type::getVoidTy(val->getContext()), Store,
1090 OperandTraits<StoreInst>::op_begin(this),
1091 OperandTraits<StoreInst>::operands(this),
1097 setAtomic(NotAtomic);
1101 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1102 : Instruction(Type::getVoidTy(val->getContext()), Store,
1103 OperandTraits<StoreInst>::op_begin(this),
1104 OperandTraits<StoreInst>::operands(this),
1110 setAtomic(NotAtomic);
1114 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1115 Instruction *InsertBefore)
1116 : Instruction(Type::getVoidTy(val->getContext()), Store,
1117 OperandTraits<StoreInst>::op_begin(this),
1118 OperandTraits<StoreInst>::operands(this),
1122 setVolatile(isVolatile);
1124 setAtomic(NotAtomic);
1128 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1129 unsigned Align, Instruction *InsertBefore)
1130 : Instruction(Type::getVoidTy(val->getContext()), Store,
1131 OperandTraits<StoreInst>::op_begin(this),
1132 OperandTraits<StoreInst>::operands(this),
1136 setVolatile(isVolatile);
1137 setAlignment(Align);
1138 setAtomic(NotAtomic);
1142 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1143 unsigned Align, AtomicOrdering Order,
1144 SynchronizationScope SynchScope,
1145 Instruction *InsertBefore)
1146 : Instruction(Type::getVoidTy(val->getContext()), Store,
1147 OperandTraits<StoreInst>::op_begin(this),
1148 OperandTraits<StoreInst>::operands(this),
1152 setVolatile(isVolatile);
1153 setAlignment(Align);
1154 setAtomic(Order, SynchScope);
1158 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1159 BasicBlock *InsertAtEnd)
1160 : Instruction(Type::getVoidTy(val->getContext()), Store,
1161 OperandTraits<StoreInst>::op_begin(this),
1162 OperandTraits<StoreInst>::operands(this),
1166 setVolatile(isVolatile);
1168 setAtomic(NotAtomic);
1172 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1173 unsigned Align, BasicBlock *InsertAtEnd)
1174 : Instruction(Type::getVoidTy(val->getContext()), Store,
1175 OperandTraits<StoreInst>::op_begin(this),
1176 OperandTraits<StoreInst>::operands(this),
1180 setVolatile(isVolatile);
1181 setAlignment(Align);
1182 setAtomic(NotAtomic);
1186 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1187 unsigned Align, AtomicOrdering Order,
1188 SynchronizationScope SynchScope,
1189 BasicBlock *InsertAtEnd)
1190 : Instruction(Type::getVoidTy(val->getContext()), Store,
1191 OperandTraits<StoreInst>::op_begin(this),
1192 OperandTraits<StoreInst>::operands(this),
1196 setVolatile(isVolatile);
1197 setAlignment(Align);
1198 setAtomic(Order, SynchScope);
1202 void StoreInst::setAlignment(unsigned Align) {
1203 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1204 assert(Align <= MaximumAlignment &&
1205 "Alignment is greater than MaximumAlignment!");
1206 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1207 ((Log2_32(Align)+1) << 1));
1208 assert(getAlignment() == Align && "Alignment representation error!");
1211 //===----------------------------------------------------------------------===//
1212 // AtomicCmpXchgInst Implementation
1213 //===----------------------------------------------------------------------===//
1215 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1216 AtomicOrdering Ordering,
1217 SynchronizationScope SynchScope) {
1221 setOrdering(Ordering);
1222 setSynchScope(SynchScope);
1224 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1225 "All operands must be non-null!");
1226 assert(getOperand(0)->getType()->isPointerTy() &&
1227 "Ptr must have pointer type!");
1228 assert(getOperand(1)->getType() ==
1229 cast<PointerType>(getOperand(0)->getType())->getElementType()
1230 && "Ptr must be a pointer to Cmp type!");
1231 assert(getOperand(2)->getType() ==
1232 cast<PointerType>(getOperand(0)->getType())->getElementType()
1233 && "Ptr must be a pointer to NewVal type!");
1234 assert(Ordering != NotAtomic &&
1235 "AtomicCmpXchg instructions must be atomic!");
1238 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1239 AtomicOrdering Ordering,
1240 SynchronizationScope SynchScope,
1241 Instruction *InsertBefore)
1242 : Instruction(Cmp->getType(), AtomicCmpXchg,
1243 OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1244 OperandTraits<AtomicCmpXchgInst>::operands(this),
1246 Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
1249 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1250 AtomicOrdering Ordering,
1251 SynchronizationScope SynchScope,
1252 BasicBlock *InsertAtEnd)
1253 : Instruction(Cmp->getType(), AtomicCmpXchg,
1254 OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1255 OperandTraits<AtomicCmpXchgInst>::operands(this),
1257 Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
1260 //===----------------------------------------------------------------------===//
1261 // AtomicRMWInst Implementation
1262 //===----------------------------------------------------------------------===//
1264 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1265 AtomicOrdering Ordering,
1266 SynchronizationScope SynchScope) {
1269 setOperation(Operation);
1270 setOrdering(Ordering);
1271 setSynchScope(SynchScope);
1273 assert(getOperand(0) && getOperand(1) &&
1274 "All operands must be non-null!");
1275 assert(getOperand(0)->getType()->isPointerTy() &&
1276 "Ptr must have pointer type!");
1277 assert(getOperand(1)->getType() ==
1278 cast<PointerType>(getOperand(0)->getType())->getElementType()
1279 && "Ptr must be a pointer to Val type!");
1280 assert(Ordering != NotAtomic &&
1281 "AtomicRMW instructions must be atomic!");
1284 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1285 AtomicOrdering Ordering,
1286 SynchronizationScope SynchScope,
1287 Instruction *InsertBefore)
1288 : Instruction(Val->getType(), AtomicRMW,
1289 OperandTraits<AtomicRMWInst>::op_begin(this),
1290 OperandTraits<AtomicRMWInst>::operands(this),
1292 Init(Operation, Ptr, Val, Ordering, SynchScope);
1295 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1296 AtomicOrdering Ordering,
1297 SynchronizationScope SynchScope,
1298 BasicBlock *InsertAtEnd)
1299 : Instruction(Val->getType(), AtomicRMW,
1300 OperandTraits<AtomicRMWInst>::op_begin(this),
1301 OperandTraits<AtomicRMWInst>::operands(this),
1303 Init(Operation, Ptr, Val, Ordering, SynchScope);
1306 //===----------------------------------------------------------------------===//
1307 // FenceInst Implementation
1308 //===----------------------------------------------------------------------===//
1310 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1311 SynchronizationScope SynchScope,
1312 Instruction *InsertBefore)
1313 : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertBefore) {
1314 setOrdering(Ordering);
1315 setSynchScope(SynchScope);
1318 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1319 SynchronizationScope SynchScope,
1320 BasicBlock *InsertAtEnd)
1321 : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertAtEnd) {
1322 setOrdering(Ordering);
1323 setSynchScope(SynchScope);
1326 //===----------------------------------------------------------------------===//
1327 // GetElementPtrInst Implementation
1328 //===----------------------------------------------------------------------===//
1330 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1331 const Twine &Name) {
1332 assert(NumOperands == 1 + IdxList.size() && "NumOperands not initialized?");
1333 OperandList[0] = Ptr;
1334 std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1);
1338 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1339 : Instruction(GEPI.getType(), GetElementPtr,
1340 OperandTraits<GetElementPtrInst>::op_end(this)
1341 - GEPI.getNumOperands(),
1342 GEPI.getNumOperands()) {
1343 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1344 SubclassOptionalData = GEPI.SubclassOptionalData;
1347 /// getIndexedType - Returns the type of the element that would be accessed with
1348 /// a gep instruction with the specified parameters.
1350 /// The Idxs pointer should point to a continuous piece of memory containing the
1351 /// indices, either as Value* or uint64_t.
1353 /// A null type is returned if the indices are invalid for the specified
1356 template <typename IndexTy>
1357 static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
1358 PointerType *PTy = dyn_cast<PointerType>(Ptr->getScalarType());
1359 if (!PTy) return 0; // Type isn't a pointer type!
1360 Type *Agg = PTy->getElementType();
1362 // Handle the special case of the empty set index set, which is always valid.
1363 if (IdxList.empty())
1366 // If there is at least one index, the top level type must be sized, otherwise
1367 // it cannot be 'stepped over'.
1368 if (!Agg->isSized())
1371 unsigned CurIdx = 1;
1372 for (; CurIdx != IdxList.size(); ++CurIdx) {
1373 CompositeType *CT = dyn_cast<CompositeType>(Agg);
1374 if (!CT || CT->isPointerTy()) return 0;
1375 IndexTy Index = IdxList[CurIdx];
1376 if (!CT->indexValid(Index)) return 0;
1377 Agg = CT->getTypeAtIndex(Index);
1379 return CurIdx == IdxList.size() ? Agg : 0;
1382 Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<Value *> IdxList) {
1383 return getIndexedTypeInternal(Ptr, IdxList);
1386 Type *GetElementPtrInst::getIndexedType(Type *Ptr,
1387 ArrayRef<Constant *> IdxList) {
1388 return getIndexedTypeInternal(Ptr, IdxList);
1391 Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList) {
1392 return getIndexedTypeInternal(Ptr, IdxList);
1395 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1396 /// zeros. If so, the result pointer and the first operand have the same
1397 /// value, just potentially different types.
1398 bool GetElementPtrInst::hasAllZeroIndices() const {
1399 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1400 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1401 if (!CI->isZero()) return false;
1409 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1410 /// constant integers. If so, the result pointer and the first operand have
1411 /// a constant offset between them.
1412 bool GetElementPtrInst::hasAllConstantIndices() const {
1413 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1414 if (!isa<ConstantInt>(getOperand(i)))
1420 void GetElementPtrInst::setIsInBounds(bool B) {
1421 cast<GEPOperator>(this)->setIsInBounds(B);
1424 bool GetElementPtrInst::isInBounds() const {
1425 return cast<GEPOperator>(this)->isInBounds();
1428 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1429 APInt &Offset) const {
1430 assert(Offset.getBitWidth() ==
1431 DL.getPointerSizeInBits(getPointerAddressSpace()) &&
1432 "The offset must have exactly as many bits as our pointer.");
1434 for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
1435 GTI != GTE; ++GTI) {
1436 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
1442 // Handle a struct index, which adds its field offset to the pointer.
1443 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1444 unsigned ElementIdx = OpC->getZExtValue();
1445 const StructLayout *SL = DL.getStructLayout(STy);
1446 Offset += APInt(Offset.getBitWidth(),
1447 SL->getElementOffset(ElementIdx));
1451 // For array or vector indices, scale the index by the size of the type.
1452 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
1453 Offset += Index * APInt(Offset.getBitWidth(),
1454 DL.getTypeAllocSize(GTI.getIndexedType()));
1459 //===----------------------------------------------------------------------===//
1460 // ExtractElementInst Implementation
1461 //===----------------------------------------------------------------------===//
1463 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1465 Instruction *InsertBef)
1466 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1468 OperandTraits<ExtractElementInst>::op_begin(this),
1470 assert(isValidOperands(Val, Index) &&
1471 "Invalid extractelement instruction operands!");
1477 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1479 BasicBlock *InsertAE)
1480 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1482 OperandTraits<ExtractElementInst>::op_begin(this),
1484 assert(isValidOperands(Val, Index) &&
1485 "Invalid extractelement instruction operands!");
1493 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1494 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy(32))
1500 //===----------------------------------------------------------------------===//
1501 // InsertElementInst Implementation
1502 //===----------------------------------------------------------------------===//
1504 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1506 Instruction *InsertBef)
1507 : Instruction(Vec->getType(), InsertElement,
1508 OperandTraits<InsertElementInst>::op_begin(this),
1510 assert(isValidOperands(Vec, Elt, Index) &&
1511 "Invalid insertelement instruction operands!");
1518 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1520 BasicBlock *InsertAE)
1521 : Instruction(Vec->getType(), InsertElement,
1522 OperandTraits<InsertElementInst>::op_begin(this),
1524 assert(isValidOperands(Vec, Elt, Index) &&
1525 "Invalid insertelement instruction operands!");
1533 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1534 const Value *Index) {
1535 if (!Vec->getType()->isVectorTy())
1536 return false; // First operand of insertelement must be vector type.
1538 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1539 return false;// Second operand of insertelement must be vector element type.
1541 if (!Index->getType()->isIntegerTy(32))
1542 return false; // Third operand of insertelement must be i32.
1547 //===----------------------------------------------------------------------===//
1548 // ShuffleVectorInst Implementation
1549 //===----------------------------------------------------------------------===//
1551 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1553 Instruction *InsertBefore)
1554 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1555 cast<VectorType>(Mask->getType())->getNumElements()),
1557 OperandTraits<ShuffleVectorInst>::op_begin(this),
1558 OperandTraits<ShuffleVectorInst>::operands(this),
1560 assert(isValidOperands(V1, V2, Mask) &&
1561 "Invalid shuffle vector instruction operands!");
1568 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1570 BasicBlock *InsertAtEnd)
1571 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1572 cast<VectorType>(Mask->getType())->getNumElements()),
1574 OperandTraits<ShuffleVectorInst>::op_begin(this),
1575 OperandTraits<ShuffleVectorInst>::operands(this),
1577 assert(isValidOperands(V1, V2, Mask) &&
1578 "Invalid shuffle vector instruction operands!");
1586 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1587 const Value *Mask) {
1588 // V1 and V2 must be vectors of the same type.
1589 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1592 // Mask must be vector of i32.
1593 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
1594 if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32))
1597 // Check to see if Mask is valid.
1598 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1601 if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
1602 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1603 for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
1604 if (ConstantInt *CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
1605 if (CI->uge(V1Size*2))
1607 } else if (!isa<UndefValue>(MV->getOperand(i))) {
1614 if (const ConstantDataSequential *CDS =
1615 dyn_cast<ConstantDataSequential>(Mask)) {
1616 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1617 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
1618 if (CDS->getElementAsInteger(i) >= V1Size*2)
1623 // The bitcode reader can create a place holder for a forward reference
1624 // used as the shuffle mask. When this occurs, the shuffle mask will
1625 // fall into this case and fail. To avoid this error, do this bit of
1626 // ugliness to allow such a mask pass.
1627 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask))
1628 if (CE->getOpcode() == Instruction::UserOp1)
1634 /// getMaskValue - Return the index from the shuffle mask for the specified
1635 /// output result. This is either -1 if the element is undef or a number less
1636 /// than 2*numelements.
1637 int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) {
1638 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
1639 if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask))
1640 return CDS->getElementAsInteger(i);
1641 Constant *C = Mask->getAggregateElement(i);
1642 if (isa<UndefValue>(C))
1644 return cast<ConstantInt>(C)->getZExtValue();
1647 /// getShuffleMask - Return the full mask for this instruction, where each
1648 /// element is the element number and undef's are returned as -1.
1649 void ShuffleVectorInst::getShuffleMask(Constant *Mask,
1650 SmallVectorImpl<int> &Result) {
1651 unsigned NumElts = Mask->getType()->getVectorNumElements();
1653 if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) {
1654 for (unsigned i = 0; i != NumElts; ++i)
1655 Result.push_back(CDS->getElementAsInteger(i));
1658 for (unsigned i = 0; i != NumElts; ++i) {
1659 Constant *C = Mask->getAggregateElement(i);
1660 Result.push_back(isa<UndefValue>(C) ? -1 :
1661 cast<ConstantInt>(C)->getZExtValue());
1666 //===----------------------------------------------------------------------===//
1667 // InsertValueInst Class
1668 //===----------------------------------------------------------------------===//
1670 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
1671 const Twine &Name) {
1672 assert(NumOperands == 2 && "NumOperands not initialized?");
1674 // There's no fundamental reason why we require at least one index
1675 // (other than weirdness with &*IdxBegin being invalid; see
1676 // getelementptr's init routine for example). But there's no
1677 // present need to support it.
1678 assert(Idxs.size() > 0 && "InsertValueInst must have at least one index");
1680 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
1681 Val->getType() && "Inserted value must match indexed type!");
1685 Indices.append(Idxs.begin(), Idxs.end());
1689 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
1690 : Instruction(IVI.getType(), InsertValue,
1691 OperandTraits<InsertValueInst>::op_begin(this), 2),
1692 Indices(IVI.Indices) {
1693 Op<0>() = IVI.getOperand(0);
1694 Op<1>() = IVI.getOperand(1);
1695 SubclassOptionalData = IVI.SubclassOptionalData;
1698 //===----------------------------------------------------------------------===//
1699 // ExtractValueInst Class
1700 //===----------------------------------------------------------------------===//
1702 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
1703 assert(NumOperands == 1 && "NumOperands not initialized?");
1705 // There's no fundamental reason why we require at least one index.
1706 // But there's no present need to support it.
1707 assert(Idxs.size() > 0 && "ExtractValueInst must have at least one index");
1709 Indices.append(Idxs.begin(), Idxs.end());
1713 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
1714 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
1715 Indices(EVI.Indices) {
1716 SubclassOptionalData = EVI.SubclassOptionalData;
1719 // getIndexedType - Returns the type of the element that would be extracted
1720 // with an extractvalue instruction with the specified parameters.
1722 // A null type is returned if the indices are invalid for the specified
1725 Type *ExtractValueInst::getIndexedType(Type *Agg,
1726 ArrayRef<unsigned> Idxs) {
1727 for (unsigned CurIdx = 0; CurIdx != Idxs.size(); ++CurIdx) {
1728 unsigned Index = Idxs[CurIdx];
1729 // We can't use CompositeType::indexValid(Index) here.
1730 // indexValid() always returns true for arrays because getelementptr allows
1731 // out-of-bounds indices. Since we don't allow those for extractvalue and
1732 // insertvalue we need to check array indexing manually.
1733 // Since the only other types we can index into are struct types it's just
1734 // as easy to check those manually as well.
1735 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
1736 if (Index >= AT->getNumElements())
1738 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
1739 if (Index >= ST->getNumElements())
1742 // Not a valid type to index into.
1746 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
1748 return const_cast<Type*>(Agg);
1751 //===----------------------------------------------------------------------===//
1752 // BinaryOperator Class
1753 //===----------------------------------------------------------------------===//
1755 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
1756 Type *Ty, const Twine &Name,
1757 Instruction *InsertBefore)
1758 : Instruction(Ty, iType,
1759 OperandTraits<BinaryOperator>::op_begin(this),
1760 OperandTraits<BinaryOperator>::operands(this),
1768 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
1769 Type *Ty, const Twine &Name,
1770 BasicBlock *InsertAtEnd)
1771 : Instruction(Ty, iType,
1772 OperandTraits<BinaryOperator>::op_begin(this),
1773 OperandTraits<BinaryOperator>::operands(this),
1782 void BinaryOperator::init(BinaryOps iType) {
1783 Value *LHS = getOperand(0), *RHS = getOperand(1);
1784 (void)LHS; (void)RHS; // Silence warnings.
1785 assert(LHS->getType() == RHS->getType() &&
1786 "Binary operator operand types must match!");
1791 assert(getType() == LHS->getType() &&
1792 "Arithmetic operation should return same type as operands!");
1793 assert(getType()->isIntOrIntVectorTy() &&
1794 "Tried to create an integer operation on a non-integer type!");
1796 case FAdd: case FSub:
1798 assert(getType() == LHS->getType() &&
1799 "Arithmetic operation should return same type as operands!");
1800 assert(getType()->isFPOrFPVectorTy() &&
1801 "Tried to create a floating-point operation on a "
1802 "non-floating-point type!");
1806 assert(getType() == LHS->getType() &&
1807 "Arithmetic operation should return same type as operands!");
1808 assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
1809 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
1810 "Incorrect operand type (not integer) for S/UDIV");
1813 assert(getType() == LHS->getType() &&
1814 "Arithmetic operation should return same type as operands!");
1815 assert(getType()->isFPOrFPVectorTy() &&
1816 "Incorrect operand type (not floating point) for FDIV");
1820 assert(getType() == LHS->getType() &&
1821 "Arithmetic operation should return same type as operands!");
1822 assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
1823 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
1824 "Incorrect operand type (not integer) for S/UREM");
1827 assert(getType() == LHS->getType() &&
1828 "Arithmetic operation should return same type as operands!");
1829 assert(getType()->isFPOrFPVectorTy() &&
1830 "Incorrect operand type (not floating point) for FREM");
1835 assert(getType() == LHS->getType() &&
1836 "Shift operation should return same type as operands!");
1837 assert((getType()->isIntegerTy() ||
1838 (getType()->isVectorTy() &&
1839 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
1840 "Tried to create a shift operation on a non-integral type!");
1844 assert(getType() == LHS->getType() &&
1845 "Logical operation should return same type as operands!");
1846 assert((getType()->isIntegerTy() ||
1847 (getType()->isVectorTy() &&
1848 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
1849 "Tried to create a logical operation on a non-integral type!");
1857 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
1859 Instruction *InsertBefore) {
1860 assert(S1->getType() == S2->getType() &&
1861 "Cannot create binary operator with two operands of differing type!");
1862 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
1865 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
1867 BasicBlock *InsertAtEnd) {
1868 BinaryOperator *Res = Create(Op, S1, S2, Name);
1869 InsertAtEnd->getInstList().push_back(Res);
1873 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
1874 Instruction *InsertBefore) {
1875 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1876 return new BinaryOperator(Instruction::Sub,
1878 Op->getType(), Name, InsertBefore);
1881 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
1882 BasicBlock *InsertAtEnd) {
1883 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1884 return new BinaryOperator(Instruction::Sub,
1886 Op->getType(), Name, InsertAtEnd);
1889 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
1890 Instruction *InsertBefore) {
1891 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1892 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
1895 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
1896 BasicBlock *InsertAtEnd) {
1897 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1898 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
1901 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
1902 Instruction *InsertBefore) {
1903 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1904 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
1907 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
1908 BasicBlock *InsertAtEnd) {
1909 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1910 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
1913 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
1914 Instruction *InsertBefore) {
1915 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1916 return new BinaryOperator(Instruction::FSub, zero, Op,
1917 Op->getType(), Name, InsertBefore);
1920 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
1921 BasicBlock *InsertAtEnd) {
1922 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
1923 return new BinaryOperator(Instruction::FSub, zero, Op,
1924 Op->getType(), Name, InsertAtEnd);
1927 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
1928 Instruction *InsertBefore) {
1929 Constant *C = Constant::getAllOnesValue(Op->getType());
1930 return new BinaryOperator(Instruction::Xor, Op, C,
1931 Op->getType(), Name, InsertBefore);
1934 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
1935 BasicBlock *InsertAtEnd) {
1936 Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
1937 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
1938 Op->getType(), Name, InsertAtEnd);
1942 // isConstantAllOnes - Helper function for several functions below
1943 static inline bool isConstantAllOnes(const Value *V) {
1944 if (const Constant *C = dyn_cast<Constant>(V))
1945 return C->isAllOnesValue();
1949 bool BinaryOperator::isNeg(const Value *V) {
1950 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
1951 if (Bop->getOpcode() == Instruction::Sub)
1952 if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0)))
1953 return C->isNegativeZeroValue();
1957 bool BinaryOperator::isFNeg(const Value *V) {
1958 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
1959 if (Bop->getOpcode() == Instruction::FSub)
1960 if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0)))
1961 return C->isNegativeZeroValue();
1965 bool BinaryOperator::isNot(const Value *V) {
1966 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
1967 return (Bop->getOpcode() == Instruction::Xor &&
1968 (isConstantAllOnes(Bop->getOperand(1)) ||
1969 isConstantAllOnes(Bop->getOperand(0))));
1973 Value *BinaryOperator::getNegArgument(Value *BinOp) {
1974 return cast<BinaryOperator>(BinOp)->getOperand(1);
1977 const Value *BinaryOperator::getNegArgument(const Value *BinOp) {
1978 return getNegArgument(const_cast<Value*>(BinOp));
1981 Value *BinaryOperator::getFNegArgument(Value *BinOp) {
1982 return cast<BinaryOperator>(BinOp)->getOperand(1);
1985 const Value *BinaryOperator::getFNegArgument(const Value *BinOp) {
1986 return getFNegArgument(const_cast<Value*>(BinOp));
1989 Value *BinaryOperator::getNotArgument(Value *BinOp) {
1990 assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!");
1991 BinaryOperator *BO = cast<BinaryOperator>(BinOp);
1992 Value *Op0 = BO->getOperand(0);
1993 Value *Op1 = BO->getOperand(1);
1994 if (isConstantAllOnes(Op0)) return Op1;
1996 assert(isConstantAllOnes(Op1));
2000 const Value *BinaryOperator::getNotArgument(const Value *BinOp) {
2001 return getNotArgument(const_cast<Value*>(BinOp));
2005 // swapOperands - Exchange the two operands to this instruction. This
2006 // instruction is safe to use on any binary instruction and does not
2007 // modify the semantics of the instruction. If the instruction is
2008 // order dependent (SetLT f.e.) the opcode is changed.
2010 bool BinaryOperator::swapOperands() {
2011 if (!isCommutative())
2012 return true; // Can't commute operands
2013 Op<0>().swap(Op<1>());
2017 void BinaryOperator::setHasNoUnsignedWrap(bool b) {
2018 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
2021 void BinaryOperator::setHasNoSignedWrap(bool b) {
2022 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
2025 void BinaryOperator::setIsExact(bool b) {
2026 cast<PossiblyExactOperator>(this)->setIsExact(b);
2029 bool BinaryOperator::hasNoUnsignedWrap() const {
2030 return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
2033 bool BinaryOperator::hasNoSignedWrap() const {
2034 return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
2037 bool BinaryOperator::isExact() const {
2038 return cast<PossiblyExactOperator>(this)->isExact();
2041 //===----------------------------------------------------------------------===//
2042 // FPMathOperator Class
2043 //===----------------------------------------------------------------------===//
2045 /// getFPAccuracy - Get the maximum error permitted by this operation in ULPs.
2046 /// An accuracy of 0.0 means that the operation should be performed with the
2047 /// default precision.
2048 float FPMathOperator::getFPAccuracy() const {
2050 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2053 ConstantFP *Accuracy = cast<ConstantFP>(MD->getOperand(0));
2054 return Accuracy->getValueAPF().convertToFloat();
2058 //===----------------------------------------------------------------------===//
2060 //===----------------------------------------------------------------------===//
2062 void CastInst::anchor() {}
2064 // Just determine if this cast only deals with integral->integral conversion.
2065 bool CastInst::isIntegerCast() const {
2066 switch (getOpcode()) {
2067 default: return false;
2068 case Instruction::ZExt:
2069 case Instruction::SExt:
2070 case Instruction::Trunc:
2072 case Instruction::BitCast:
2073 return getOperand(0)->getType()->isIntegerTy() &&
2074 getType()->isIntegerTy();
2078 bool CastInst::isLosslessCast() const {
2079 // Only BitCast can be lossless, exit fast if we're not BitCast
2080 if (getOpcode() != Instruction::BitCast)
2083 // Identity cast is always lossless
2084 Type* SrcTy = getOperand(0)->getType();
2085 Type* DstTy = getType();
2089 // Pointer to pointer is always lossless.
2090 if (SrcTy->isPointerTy())
2091 return DstTy->isPointerTy();
2092 return false; // Other types have no identity values
2095 /// This function determines if the CastInst does not require any bits to be
2096 /// changed in order to effect the cast. Essentially, it identifies cases where
2097 /// no code gen is necessary for the cast, hence the name no-op cast. For
2098 /// example, the following are all no-op casts:
2099 /// # bitcast i32* %x to i8*
2100 /// # bitcast <2 x i32> %x to <4 x i16>
2101 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2102 /// @brief Determine if the described cast is a no-op.
2103 bool CastInst::isNoopCast(Instruction::CastOps Opcode,
2108 default: llvm_unreachable("Invalid CastOp");
2109 case Instruction::Trunc:
2110 case Instruction::ZExt:
2111 case Instruction::SExt:
2112 case Instruction::FPTrunc:
2113 case Instruction::FPExt:
2114 case Instruction::UIToFP:
2115 case Instruction::SIToFP:
2116 case Instruction::FPToUI:
2117 case Instruction::FPToSI:
2118 return false; // These always modify bits
2119 case Instruction::BitCast:
2120 return true; // BitCast never modifies bits.
2121 case Instruction::PtrToInt:
2122 return IntPtrTy->getScalarSizeInBits() ==
2123 DestTy->getScalarSizeInBits();
2124 case Instruction::IntToPtr:
2125 return IntPtrTy->getScalarSizeInBits() ==
2126 SrcTy->getScalarSizeInBits();
2130 /// @brief Determine if a cast is a no-op.
2131 bool CastInst::isNoopCast(Type *IntPtrTy) const {
2132 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
2135 /// This function determines if a pair of casts can be eliminated and what
2136 /// opcode should be used in the elimination. This assumes that there are two
2137 /// instructions like this:
2138 /// * %F = firstOpcode SrcTy %x to MidTy
2139 /// * %S = secondOpcode MidTy %F to DstTy
2140 /// The function returns a resultOpcode so these two casts can be replaced with:
2141 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
2142 /// If no such cast is permited, the function returns 0.
2143 unsigned CastInst::isEliminableCastPair(
2144 Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2145 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2146 Type *DstIntPtrTy) {
2147 // Define the 144 possibilities for these two cast instructions. The values
2148 // in this matrix determine what to do in a given situation and select the
2149 // case in the switch below. The rows correspond to firstOp, the columns
2150 // correspond to secondOp. In looking at the table below, keep in mind
2151 // the following cast properties:
2153 // Size Compare Source Destination
2154 // Operator Src ? Size Type Sign Type Sign
2155 // -------- ------------ ------------------- ---------------------
2156 // TRUNC > Integer Any Integral Any
2157 // ZEXT < Integral Unsigned Integer Any
2158 // SEXT < Integral Signed Integer Any
2159 // FPTOUI n/a FloatPt n/a Integral Unsigned
2160 // FPTOSI n/a FloatPt n/a Integral Signed
2161 // UITOFP n/a Integral Unsigned FloatPt n/a
2162 // SITOFP n/a Integral Signed FloatPt n/a
2163 // FPTRUNC > FloatPt n/a FloatPt n/a
2164 // FPEXT < FloatPt n/a FloatPt n/a
2165 // PTRTOINT n/a Pointer n/a Integral Unsigned
2166 // INTTOPTR n/a Integral Unsigned Pointer n/a
2167 // BITCAST = FirstClass n/a FirstClass n/a
2169 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2170 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2171 // into "fptoui double to i64", but this loses information about the range
2172 // of the produced value (we no longer know the top-part is all zeros).
2173 // Further this conversion is often much more expensive for typical hardware,
2174 // and causes issues when building libgcc. We disallow fptosi+sext for the
2176 const unsigned numCastOps =
2177 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2178 static const uint8_t CastResults[numCastOps][numCastOps] = {
2179 // T F F U S F F P I B -+
2180 // R Z S P P I I T P 2 N T |
2181 // U E E 2 2 2 2 R E I T C +- secondOp
2182 // N X X U S F F N X N 2 V |
2183 // C T T I I P P C T T P T -+
2184 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3 }, // Trunc -+
2185 { 8, 1, 9,99,99, 2, 0,99,99,99, 2, 3 }, // ZExt |
2186 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3 }, // SExt |
2187 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3 }, // FPToUI |
2188 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3 }, // FPToSI |
2189 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4 }, // UIToFP +- firstOp
2190 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4 }, // SIToFP |
2191 { 99,99,99, 0, 0,99,99, 1, 0,99,99, 4 }, // FPTrunc |
2192 { 99,99,99, 2, 2,99,99,10, 2,99,99, 4 }, // FPExt |
2193 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3 }, // PtrToInt |
2194 { 99,99,99,99,99,99,99,99,99,13,99,12 }, // IntToPtr |
2195 { 5, 5, 5, 6, 6, 5, 5, 6, 6,11, 5, 1 }, // BitCast -+
2198 // If either of the casts are a bitcast from scalar to vector, disallow the
2199 // merging. However, bitcast of A->B->A are allowed.
2200 bool isFirstBitcast = (firstOp == Instruction::BitCast);
2201 bool isSecondBitcast = (secondOp == Instruction::BitCast);
2202 bool chainedBitcast = (SrcTy == DstTy && isFirstBitcast && isSecondBitcast);
2204 // Check if any of the bitcasts convert scalars<->vectors.
2205 if ((isFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2206 (isSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2207 // Unless we are bitcasing to the original type, disallow optimizations.
2208 if (!chainedBitcast) return 0;
2210 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2211 [secondOp-Instruction::CastOpsBegin];
2214 // categorically disallowed
2217 // allowed, use first cast's opcode
2220 // allowed, use second cast's opcode
2223 // no-op cast in second op implies firstOp as long as the DestTy
2224 // is integer and we are not converting between a vector and a
2226 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2230 // no-op cast in second op implies firstOp as long as the DestTy
2231 // is floating point.
2232 if (DstTy->isFloatingPointTy())
2236 // no-op cast in first op implies secondOp as long as the SrcTy
2238 if (SrcTy->isIntegerTy())
2242 // no-op cast in first op implies secondOp as long as the SrcTy
2243 // is a floating point.
2244 if (SrcTy->isFloatingPointTy())
2248 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size
2249 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2251 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2252 unsigned MidSize = MidTy->getScalarSizeInBits();
2253 if (MidSize >= PtrSize)
2254 return Instruction::BitCast;
2258 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size
2259 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2260 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2261 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2262 unsigned DstSize = DstTy->getScalarSizeInBits();
2263 if (SrcSize == DstSize)
2264 return Instruction::BitCast;
2265 else if (SrcSize < DstSize)
2269 case 9: // zext, sext -> zext, because sext can't sign extend after zext
2270 return Instruction::ZExt;
2272 // fpext followed by ftrunc is allowed if the bit size returned to is
2273 // the same as the original, in which case its just a bitcast
2275 return Instruction::BitCast;
2276 return 0; // If the types are not the same we can't eliminate it.
2278 // bitcast followed by ptrtoint is allowed as long as the bitcast
2279 // is a pointer to pointer cast.
2280 if (SrcTy->isPointerTy() && MidTy->isPointerTy())
2284 // inttoptr, bitcast -> intptr if bitcast is a ptr to ptr cast
2285 if (MidTy->isPointerTy() && DstTy->isPointerTy())
2289 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2292 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2293 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2294 unsigned DstSize = DstTy->getScalarSizeInBits();
2295 if (SrcSize <= PtrSize && SrcSize == DstSize)
2296 return Instruction::BitCast;
2300 // cast combination can't happen (error in input). This is for all cases
2301 // where the MidTy is not the same for the two cast instructions.
2302 llvm_unreachable("Invalid Cast Combination");
2304 llvm_unreachable("Error in CastResults table!!!");
2308 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
2309 const Twine &Name, Instruction *InsertBefore) {
2310 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2311 // Construct and return the appropriate CastInst subclass
2313 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
2314 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
2315 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
2316 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
2317 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
2318 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
2319 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
2320 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
2321 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
2322 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
2323 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
2324 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
2325 default: llvm_unreachable("Invalid opcode provided");
2329 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
2330 const Twine &Name, BasicBlock *InsertAtEnd) {
2331 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2332 // Construct and return the appropriate CastInst subclass
2334 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
2335 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
2336 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
2337 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
2338 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
2339 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
2340 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
2341 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
2342 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
2343 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
2344 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
2345 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
2346 default: llvm_unreachable("Invalid opcode provided");
2350 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
2352 Instruction *InsertBefore) {
2353 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2354 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2355 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
2358 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
2360 BasicBlock *InsertAtEnd) {
2361 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2362 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2363 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
2366 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
2368 Instruction *InsertBefore) {
2369 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2370 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2371 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
2374 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
2376 BasicBlock *InsertAtEnd) {
2377 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2378 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2379 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
2382 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
2384 Instruction *InsertBefore) {
2385 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2386 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2387 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
2390 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
2392 BasicBlock *InsertAtEnd) {
2393 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2394 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2395 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
2398 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
2400 BasicBlock *InsertAtEnd) {
2401 assert(S->getType()->isPointerTy() && "Invalid cast");
2402 assert((Ty->isIntegerTy() || Ty->isPointerTy()) &&
2405 if (Ty->isIntegerTy())
2406 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
2407 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2410 /// @brief Create a BitCast or a PtrToInt cast instruction
2411 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
2413 Instruction *InsertBefore) {
2414 assert(S->getType()->isPointerTy() && "Invalid cast");
2415 assert((Ty->isIntegerTy() || Ty->isPointerTy()) &&
2418 if (Ty->isIntegerTy())
2419 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
2420 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2423 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
2424 bool isSigned, const Twine &Name,
2425 Instruction *InsertBefore) {
2426 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
2427 "Invalid integer cast");
2428 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2429 unsigned DstBits = Ty->getScalarSizeInBits();
2430 Instruction::CastOps opcode =
2431 (SrcBits == DstBits ? Instruction::BitCast :
2432 (SrcBits > DstBits ? Instruction::Trunc :
2433 (isSigned ? Instruction::SExt : Instruction::ZExt)));
2434 return Create(opcode, C, Ty, Name, InsertBefore);
2437 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
2438 bool isSigned, const Twine &Name,
2439 BasicBlock *InsertAtEnd) {
2440 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
2442 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2443 unsigned DstBits = Ty->getScalarSizeInBits();
2444 Instruction::CastOps opcode =
2445 (SrcBits == DstBits ? Instruction::BitCast :
2446 (SrcBits > DstBits ? Instruction::Trunc :
2447 (isSigned ? Instruction::SExt : Instruction::ZExt)));
2448 return Create(opcode, C, Ty, Name, InsertAtEnd);
2451 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
2453 Instruction *InsertBefore) {
2454 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2456 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2457 unsigned DstBits = Ty->getScalarSizeInBits();
2458 Instruction::CastOps opcode =
2459 (SrcBits == DstBits ? Instruction::BitCast :
2460 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2461 return Create(opcode, C, Ty, Name, InsertBefore);
2464 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
2466 BasicBlock *InsertAtEnd) {
2467 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2469 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2470 unsigned DstBits = Ty->getScalarSizeInBits();
2471 Instruction::CastOps opcode =
2472 (SrcBits == DstBits ? Instruction::BitCast :
2473 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2474 return Create(opcode, C, Ty, Name, InsertAtEnd);
2477 // Check whether it is valid to call getCastOpcode for these types.
2478 // This routine must be kept in sync with getCastOpcode.
2479 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
2480 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
2483 if (SrcTy == DestTy)
2486 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
2487 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
2488 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2489 // An element by element cast. Valid if casting the elements is valid.
2490 SrcTy = SrcVecTy->getElementType();
2491 DestTy = DestVecTy->getElementType();
2494 // Get the bit sizes, we'll need these
2495 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2496 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2498 // Run through the possibilities ...
2499 if (DestTy->isIntegerTy()) { // Casting to integral
2500 if (SrcTy->isIntegerTy()) { // Casting from integral
2502 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
2504 } else if (SrcTy->isVectorTy()) { // Casting from vector
2505 return DestBits == SrcBits;
2506 } else { // Casting from something else
2507 return SrcTy->isPointerTy();
2509 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
2510 if (SrcTy->isIntegerTy()) { // Casting from integral
2512 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
2514 } else if (SrcTy->isVectorTy()) { // Casting from vector
2515 return DestBits == SrcBits;
2516 } else { // Casting from something else
2519 } else if (DestTy->isVectorTy()) { // Casting to vector
2520 return DestBits == SrcBits;
2521 } else if (DestTy->isPointerTy()) { // Casting to pointer
2522 if (SrcTy->isPointerTy()) { // Casting from pointer
2524 } else if (SrcTy->isIntegerTy()) { // Casting from integral
2526 } else { // Casting from something else
2529 } else if (DestTy->isX86_MMXTy()) {
2530 if (SrcTy->isVectorTy()) {
2531 return DestBits == SrcBits; // 64-bit vector to MMX
2535 } else { // Casting to something else
2540 // Provide a way to get a "cast" where the cast opcode is inferred from the
2541 // types and size of the operand. This, basically, is a parallel of the
2542 // logic in the castIsValid function below. This axiom should hold:
2543 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
2544 // should not assert in castIsValid. In other words, this produces a "correct"
2545 // casting opcode for the arguments passed to it.
2546 // This routine must be kept in sync with isCastable.
2547 Instruction::CastOps
2548 CastInst::getCastOpcode(
2549 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
2550 Type *SrcTy = Src->getType();
2552 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
2553 "Only first class types are castable!");
2555 if (SrcTy == DestTy)
2558 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
2559 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
2560 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2561 // An element by element cast. Find the appropriate opcode based on the
2563 SrcTy = SrcVecTy->getElementType();
2564 DestTy = DestVecTy->getElementType();
2567 // Get the bit sizes, we'll need these
2568 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2569 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2571 // Run through the possibilities ...
2572 if (DestTy->isIntegerTy()) { // Casting to integral
2573 if (SrcTy->isIntegerTy()) { // Casting from integral
2574 if (DestBits < SrcBits)
2575 return Trunc; // int -> smaller int
2576 else if (DestBits > SrcBits) { // its an extension
2578 return SExt; // signed -> SEXT
2580 return ZExt; // unsigned -> ZEXT
2582 return BitCast; // Same size, No-op cast
2584 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
2586 return FPToSI; // FP -> sint
2588 return FPToUI; // FP -> uint
2589 } else if (SrcTy->isVectorTy()) {
2590 assert(DestBits == SrcBits &&
2591 "Casting vector to integer of different width");
2592 return BitCast; // Same size, no-op cast
2594 assert(SrcTy->isPointerTy() &&
2595 "Casting from a value that is not first-class type");
2596 return PtrToInt; // ptr -> int
2598 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
2599 if (SrcTy->isIntegerTy()) { // Casting from integral
2601 return SIToFP; // sint -> FP
2603 return UIToFP; // uint -> FP
2604 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
2605 if (DestBits < SrcBits) {
2606 return FPTrunc; // FP -> smaller FP
2607 } else if (DestBits > SrcBits) {
2608 return FPExt; // FP -> larger FP
2610 return BitCast; // same size, no-op cast
2612 } else if (SrcTy->isVectorTy()) {
2613 assert(DestBits == SrcBits &&
2614 "Casting vector to floating point of different width");
2615 return BitCast; // same size, no-op cast
2617 llvm_unreachable("Casting pointer or non-first class to float");
2618 } else if (DestTy->isVectorTy()) {
2619 assert(DestBits == SrcBits &&
2620 "Illegal cast to vector (wrong type or size)");
2622 } else if (DestTy->isPointerTy()) {
2623 if (SrcTy->isPointerTy()) {
2624 return BitCast; // ptr -> ptr
2625 } else if (SrcTy->isIntegerTy()) {
2626 return IntToPtr; // int -> ptr
2628 llvm_unreachable("Casting pointer to other than pointer or int");
2629 } else if (DestTy->isX86_MMXTy()) {
2630 if (SrcTy->isVectorTy()) {
2631 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
2632 return BitCast; // 64-bit vector to MMX
2634 llvm_unreachable("Illegal cast to X86_MMX");
2636 llvm_unreachable("Casting to type that is not first-class");
2639 //===----------------------------------------------------------------------===//
2640 // CastInst SubClass Constructors
2641 //===----------------------------------------------------------------------===//
2643 /// Check that the construction parameters for a CastInst are correct. This
2644 /// could be broken out into the separate constructors but it is useful to have
2645 /// it in one place and to eliminate the redundant code for getting the sizes
2646 /// of the types involved.
2648 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
2650 // Check for type sanity on the arguments
2651 Type *SrcTy = S->getType();
2652 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
2653 SrcTy->isAggregateType() || DstTy->isAggregateType())
2656 // Get the size of the types in bits, we'll need this later
2657 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2658 unsigned DstBitSize = DstTy->getScalarSizeInBits();
2660 // If these are vector types, get the lengths of the vectors (using zero for
2661 // scalar types means that checking that vector lengths match also checks that
2662 // scalars are not being converted to vectors or vectors to scalars).
2663 unsigned SrcLength = SrcTy->isVectorTy() ?
2664 cast<VectorType>(SrcTy)->getNumElements() : 0;
2665 unsigned DstLength = DstTy->isVectorTy() ?
2666 cast<VectorType>(DstTy)->getNumElements() : 0;
2668 // Switch on the opcode provided
2670 default: return false; // This is an input error
2671 case Instruction::Trunc:
2672 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
2673 SrcLength == DstLength && SrcBitSize > DstBitSize;
2674 case Instruction::ZExt:
2675 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
2676 SrcLength == DstLength && SrcBitSize < DstBitSize;
2677 case Instruction::SExt:
2678 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
2679 SrcLength == DstLength && SrcBitSize < DstBitSize;
2680 case Instruction::FPTrunc:
2681 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
2682 SrcLength == DstLength && SrcBitSize > DstBitSize;
2683 case Instruction::FPExt:
2684 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
2685 SrcLength == DstLength && SrcBitSize < DstBitSize;
2686 case Instruction::UIToFP:
2687 case Instruction::SIToFP:
2688 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
2689 SrcLength == DstLength;
2690 case Instruction::FPToUI:
2691 case Instruction::FPToSI:
2692 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
2693 SrcLength == DstLength;
2694 case Instruction::PtrToInt:
2695 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
2697 if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
2698 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
2700 return SrcTy->getScalarType()->isPointerTy() &&
2701 DstTy->getScalarType()->isIntegerTy();
2702 case Instruction::IntToPtr:
2703 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
2705 if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
2706 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
2708 return SrcTy->getScalarType()->isIntegerTy() &&
2709 DstTy->getScalarType()->isPointerTy();
2710 case Instruction::BitCast:
2711 // BitCast implies a no-op cast of type only. No bits change.
2712 // However, you can't cast pointers to anything but pointers.
2713 if (SrcTy->isPointerTy() != DstTy->isPointerTy())
2716 // Now we know we're not dealing with a pointer/non-pointer mismatch. In all
2717 // these cases, the cast is okay if the source and destination bit widths
2719 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
2723 TruncInst::TruncInst(
2724 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2725 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
2726 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
2729 TruncInst::TruncInst(
2730 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2731 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
2732 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
2736 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2737 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
2738 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
2742 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2743 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
2744 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
2747 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2748 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
2749 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
2753 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2754 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
2755 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
2758 FPTruncInst::FPTruncInst(
2759 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2760 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
2761 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
2764 FPTruncInst::FPTruncInst(
2765 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2766 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
2767 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
2770 FPExtInst::FPExtInst(
2771 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2772 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
2773 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
2776 FPExtInst::FPExtInst(
2777 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2778 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
2779 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
2782 UIToFPInst::UIToFPInst(
2783 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2784 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
2785 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
2788 UIToFPInst::UIToFPInst(
2789 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2790 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
2791 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
2794 SIToFPInst::SIToFPInst(
2795 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2796 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
2797 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
2800 SIToFPInst::SIToFPInst(
2801 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2802 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
2803 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
2806 FPToUIInst::FPToUIInst(
2807 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2808 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
2809 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
2812 FPToUIInst::FPToUIInst(
2813 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2814 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
2815 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
2818 FPToSIInst::FPToSIInst(
2819 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2820 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
2821 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
2824 FPToSIInst::FPToSIInst(
2825 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2826 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
2827 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
2830 PtrToIntInst::PtrToIntInst(
2831 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2832 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
2833 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
2836 PtrToIntInst::PtrToIntInst(
2837 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2838 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
2839 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
2842 IntToPtrInst::IntToPtrInst(
2843 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2844 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
2845 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
2848 IntToPtrInst::IntToPtrInst(
2849 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2850 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
2851 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
2854 BitCastInst::BitCastInst(
2855 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
2856 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
2857 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
2860 BitCastInst::BitCastInst(
2861 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
2862 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
2863 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
2866 //===----------------------------------------------------------------------===//
2868 //===----------------------------------------------------------------------===//
2870 void CmpInst::anchor() {}
2872 CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate,
2873 Value *LHS, Value *RHS, const Twine &Name,
2874 Instruction *InsertBefore)
2875 : Instruction(ty, op,
2876 OperandTraits<CmpInst>::op_begin(this),
2877 OperandTraits<CmpInst>::operands(this),
2881 setPredicate((Predicate)predicate);
2885 CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate,
2886 Value *LHS, Value *RHS, const Twine &Name,
2887 BasicBlock *InsertAtEnd)
2888 : Instruction(ty, op,
2889 OperandTraits<CmpInst>::op_begin(this),
2890 OperandTraits<CmpInst>::operands(this),
2894 setPredicate((Predicate)predicate);
2899 CmpInst::Create(OtherOps Op, unsigned short predicate,
2900 Value *S1, Value *S2,
2901 const Twine &Name, Instruction *InsertBefore) {
2902 if (Op == Instruction::ICmp) {
2904 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
2907 return new ICmpInst(CmpInst::Predicate(predicate),
2912 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
2915 return new FCmpInst(CmpInst::Predicate(predicate),
2920 CmpInst::Create(OtherOps Op, unsigned short predicate, Value *S1, Value *S2,
2921 const Twine &Name, BasicBlock *InsertAtEnd) {
2922 if (Op == Instruction::ICmp) {
2923 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
2926 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
2930 void CmpInst::swapOperands() {
2931 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
2934 cast<FCmpInst>(this)->swapOperands();
2937 bool CmpInst::isCommutative() const {
2938 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
2939 return IC->isCommutative();
2940 return cast<FCmpInst>(this)->isCommutative();
2943 bool CmpInst::isEquality() const {
2944 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
2945 return IC->isEquality();
2946 return cast<FCmpInst>(this)->isEquality();
2950 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
2952 default: llvm_unreachable("Unknown cmp predicate!");
2953 case ICMP_EQ: return ICMP_NE;
2954 case ICMP_NE: return ICMP_EQ;
2955 case ICMP_UGT: return ICMP_ULE;
2956 case ICMP_ULT: return ICMP_UGE;
2957 case ICMP_UGE: return ICMP_ULT;
2958 case ICMP_ULE: return ICMP_UGT;
2959 case ICMP_SGT: return ICMP_SLE;
2960 case ICMP_SLT: return ICMP_SGE;
2961 case ICMP_SGE: return ICMP_SLT;
2962 case ICMP_SLE: return ICMP_SGT;
2964 case FCMP_OEQ: return FCMP_UNE;
2965 case FCMP_ONE: return FCMP_UEQ;
2966 case FCMP_OGT: return FCMP_ULE;
2967 case FCMP_OLT: return FCMP_UGE;
2968 case FCMP_OGE: return FCMP_ULT;
2969 case FCMP_OLE: return FCMP_UGT;
2970 case FCMP_UEQ: return FCMP_ONE;
2971 case FCMP_UNE: return FCMP_OEQ;
2972 case FCMP_UGT: return FCMP_OLE;
2973 case FCMP_ULT: return FCMP_OGE;
2974 case FCMP_UGE: return FCMP_OLT;
2975 case FCMP_ULE: return FCMP_OGT;
2976 case FCMP_ORD: return FCMP_UNO;
2977 case FCMP_UNO: return FCMP_ORD;
2978 case FCMP_TRUE: return FCMP_FALSE;
2979 case FCMP_FALSE: return FCMP_TRUE;
2983 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
2985 default: llvm_unreachable("Unknown icmp predicate!");
2986 case ICMP_EQ: case ICMP_NE:
2987 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
2989 case ICMP_UGT: return ICMP_SGT;
2990 case ICMP_ULT: return ICMP_SLT;
2991 case ICMP_UGE: return ICMP_SGE;
2992 case ICMP_ULE: return ICMP_SLE;
2996 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
2998 default: llvm_unreachable("Unknown icmp predicate!");
2999 case ICMP_EQ: case ICMP_NE:
3000 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3002 case ICMP_SGT: return ICMP_UGT;
3003 case ICMP_SLT: return ICMP_ULT;
3004 case ICMP_SGE: return ICMP_UGE;
3005 case ICMP_SLE: return ICMP_ULE;
3009 /// Initialize a set of values that all satisfy the condition with C.
3012 ICmpInst::makeConstantRange(Predicate pred, const APInt &C) {
3015 uint32_t BitWidth = C.getBitWidth();
3017 default: llvm_unreachable("Invalid ICmp opcode to ConstantRange ctor!");
3018 case ICmpInst::ICMP_EQ: Upper++; break;
3019 case ICmpInst::ICMP_NE: Lower++; break;
3020 case ICmpInst::ICMP_ULT:
3021 Lower = APInt::getMinValue(BitWidth);
3022 // Check for an empty-set condition.
3024 return ConstantRange(BitWidth, /*isFullSet=*/false);
3026 case ICmpInst::ICMP_SLT:
3027 Lower = APInt::getSignedMinValue(BitWidth);
3028 // Check for an empty-set condition.
3030 return ConstantRange(BitWidth, /*isFullSet=*/false);
3032 case ICmpInst::ICMP_UGT:
3033 Lower++; Upper = APInt::getMinValue(BitWidth); // Min = Next(Max)
3034 // Check for an empty-set condition.
3036 return ConstantRange(BitWidth, /*isFullSet=*/false);
3038 case ICmpInst::ICMP_SGT:
3039 Lower++; Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max)
3040 // Check for an empty-set condition.
3042 return ConstantRange(BitWidth, /*isFullSet=*/false);
3044 case ICmpInst::ICMP_ULE:
3045 Lower = APInt::getMinValue(BitWidth); Upper++;
3046 // Check for a full-set condition.
3048 return ConstantRange(BitWidth, /*isFullSet=*/true);
3050 case ICmpInst::ICMP_SLE:
3051 Lower = APInt::getSignedMinValue(BitWidth); Upper++;
3052 // Check for a full-set condition.
3054 return ConstantRange(BitWidth, /*isFullSet=*/true);
3056 case ICmpInst::ICMP_UGE:
3057 Upper = APInt::getMinValue(BitWidth); // Min = Next(Max)
3058 // Check for a full-set condition.
3060 return ConstantRange(BitWidth, /*isFullSet=*/true);
3062 case ICmpInst::ICMP_SGE:
3063 Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max)
3064 // Check for a full-set condition.
3066 return ConstantRange(BitWidth, /*isFullSet=*/true);
3069 return ConstantRange(Lower, Upper);
3072 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
3074 default: llvm_unreachable("Unknown cmp predicate!");
3075 case ICMP_EQ: case ICMP_NE:
3077 case ICMP_SGT: return ICMP_SLT;
3078 case ICMP_SLT: return ICMP_SGT;
3079 case ICMP_SGE: return ICMP_SLE;
3080 case ICMP_SLE: return ICMP_SGE;
3081 case ICMP_UGT: return ICMP_ULT;
3082 case ICMP_ULT: return ICMP_UGT;
3083 case ICMP_UGE: return ICMP_ULE;
3084 case ICMP_ULE: return ICMP_UGE;
3086 case FCMP_FALSE: case FCMP_TRUE:
3087 case FCMP_OEQ: case FCMP_ONE:
3088 case FCMP_UEQ: case FCMP_UNE:
3089 case FCMP_ORD: case FCMP_UNO:
3091 case FCMP_OGT: return FCMP_OLT;
3092 case FCMP_OLT: return FCMP_OGT;
3093 case FCMP_OGE: return FCMP_OLE;
3094 case FCMP_OLE: return FCMP_OGE;
3095 case FCMP_UGT: return FCMP_ULT;
3096 case FCMP_ULT: return FCMP_UGT;
3097 case FCMP_UGE: return FCMP_ULE;
3098 case FCMP_ULE: return FCMP_UGE;
3102 bool CmpInst::isUnsigned(unsigned short predicate) {
3103 switch (predicate) {
3104 default: return false;
3105 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
3106 case ICmpInst::ICMP_UGE: return true;
3110 bool CmpInst::isSigned(unsigned short predicate) {
3111 switch (predicate) {
3112 default: return false;
3113 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
3114 case ICmpInst::ICMP_SGE: return true;
3118 bool CmpInst::isOrdered(unsigned short predicate) {
3119 switch (predicate) {
3120 default: return false;
3121 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
3122 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
3123 case FCmpInst::FCMP_ORD: return true;
3127 bool CmpInst::isUnordered(unsigned short predicate) {
3128 switch (predicate) {
3129 default: return false;
3130 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
3131 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
3132 case FCmpInst::FCMP_UNO: return true;
3136 bool CmpInst::isTrueWhenEqual(unsigned short predicate) {
3138 default: return false;
3139 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3140 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3144 bool CmpInst::isFalseWhenEqual(unsigned short predicate) {
3146 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3147 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3148 default: return false;
3153 //===----------------------------------------------------------------------===//
3154 // SwitchInst Implementation
3155 //===----------------------------------------------------------------------===//
3157 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
3158 assert(Value && Default && NumReserved);
3159 ReservedSpace = NumReserved;
3161 OperandList = allocHungoffUses(ReservedSpace);
3163 OperandList[0] = Value;
3164 OperandList[1] = Default;
3167 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3168 /// switch on and a default destination. The number of additional cases can
3169 /// be specified here to make memory allocation more efficient. This
3170 /// constructor can also autoinsert before another instruction.
3171 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3172 Instruction *InsertBefore)
3173 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3174 0, 0, InsertBefore) {
3175 init(Value, Default, 2+NumCases*2);
3178 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3179 /// switch on and a default destination. The number of additional cases can
3180 /// be specified here to make memory allocation more efficient. This
3181 /// constructor also autoinserts at the end of the specified BasicBlock.
3182 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3183 BasicBlock *InsertAtEnd)
3184 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3185 0, 0, InsertAtEnd) {
3186 init(Value, Default, 2+NumCases*2);
3189 SwitchInst::SwitchInst(const SwitchInst &SI)
3190 : TerminatorInst(SI.getType(), Instruction::Switch, 0, 0) {
3191 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
3192 NumOperands = SI.getNumOperands();
3193 Use *OL = OperandList, *InOL = SI.OperandList;
3194 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
3196 OL[i+1] = InOL[i+1];
3198 TheSubsets = SI.TheSubsets;
3199 SubclassOptionalData = SI.SubclassOptionalData;
3202 SwitchInst::~SwitchInst() {
3207 /// addCase - Add an entry to the switch instruction...
3209 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
3210 IntegersSubsetToBB Mapping;
3212 // FIXME: Currently we work with ConstantInt based cases.
3213 // So inititalize IntItem container directly from ConstantInt.
3214 Mapping.add(IntItem::fromConstantInt(OnVal));
3215 IntegersSubset CaseRanges = Mapping.getCase();
3216 addCase(CaseRanges, Dest);
3219 void SwitchInst::addCase(IntegersSubset& OnVal, BasicBlock *Dest) {
3220 unsigned NewCaseIdx = getNumCases();
3221 unsigned OpNo = NumOperands;
3222 if (OpNo+2 > ReservedSpace)
3223 growOperands(); // Get more space!
3224 // Initialize some new operands.
3225 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
3226 NumOperands = OpNo+2;
3228 SubsetsIt TheSubsetsIt = TheSubsets.insert(TheSubsets.end(), OnVal);
3230 CaseIt Case(this, NewCaseIdx, TheSubsetsIt);
3231 Case.updateCaseValueOperand(OnVal);
3232 Case.setSuccessor(Dest);
3235 /// removeCase - This method removes the specified case and its successor
3236 /// from the switch instruction.
3237 void SwitchInst::removeCase(CaseIt& i) {
3238 unsigned idx = i.getCaseIndex();
3240 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
3242 unsigned NumOps = getNumOperands();
3243 Use *OL = OperandList;
3245 // Overwrite this case with the end of the list.
3246 if (2 + (idx + 1) * 2 != NumOps) {
3247 OL[2 + idx * 2] = OL[NumOps - 2];
3248 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
3251 // Nuke the last value.
3252 OL[NumOps-2].set(0);
3253 OL[NumOps-2+1].set(0);
3255 // Do the same with TheCases collection:
3256 if (i.SubsetIt != --TheSubsets.end()) {
3257 *i.SubsetIt = TheSubsets.back();
3258 TheSubsets.pop_back();
3260 TheSubsets.pop_back();
3261 i.SubsetIt = TheSubsets.end();
3264 NumOperands = NumOps-2;
3267 /// growOperands - grow operands - This grows the operand list in response
3268 /// to a push_back style of operation. This grows the number of ops by 3 times.
3270 void SwitchInst::growOperands() {
3271 unsigned e = getNumOperands();
3272 unsigned NumOps = e*3;
3274 ReservedSpace = NumOps;
3275 Use *NewOps = allocHungoffUses(NumOps);
3276 Use *OldOps = OperandList;
3277 for (unsigned i = 0; i != e; ++i) {
3278 NewOps[i] = OldOps[i];
3280 OperandList = NewOps;
3281 Use::zap(OldOps, OldOps + e, true);
3285 BasicBlock *SwitchInst::getSuccessorV(unsigned idx) const {
3286 return getSuccessor(idx);
3288 unsigned SwitchInst::getNumSuccessorsV() const {
3289 return getNumSuccessors();
3291 void SwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
3292 setSuccessor(idx, B);
3295 //===----------------------------------------------------------------------===//
3296 // IndirectBrInst Implementation
3297 //===----------------------------------------------------------------------===//
3299 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
3300 assert(Address && Address->getType()->isPointerTy() &&
3301 "Address of indirectbr must be a pointer");
3302 ReservedSpace = 1+NumDests;
3304 OperandList = allocHungoffUses(ReservedSpace);
3306 OperandList[0] = Address;
3310 /// growOperands - grow operands - This grows the operand list in response
3311 /// to a push_back style of operation. This grows the number of ops by 2 times.
3313 void IndirectBrInst::growOperands() {
3314 unsigned e = getNumOperands();
3315 unsigned NumOps = e*2;
3317 ReservedSpace = NumOps;
3318 Use *NewOps = allocHungoffUses(NumOps);
3319 Use *OldOps = OperandList;
3320 for (unsigned i = 0; i != e; ++i)
3321 NewOps[i] = OldOps[i];
3322 OperandList = NewOps;
3323 Use::zap(OldOps, OldOps + e, true);
3326 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
3327 Instruction *InsertBefore)
3328 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
3329 0, 0, InsertBefore) {
3330 init(Address, NumCases);
3333 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
3334 BasicBlock *InsertAtEnd)
3335 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
3336 0, 0, InsertAtEnd) {
3337 init(Address, NumCases);
3340 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
3341 : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
3342 allocHungoffUses(IBI.getNumOperands()),
3343 IBI.getNumOperands()) {
3344 Use *OL = OperandList, *InOL = IBI.OperandList;
3345 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
3347 SubclassOptionalData = IBI.SubclassOptionalData;
3350 IndirectBrInst::~IndirectBrInst() {
3354 /// addDestination - Add a destination.
3356 void IndirectBrInst::addDestination(BasicBlock *DestBB) {
3357 unsigned OpNo = NumOperands;
3358 if (OpNo+1 > ReservedSpace)
3359 growOperands(); // Get more space!
3360 // Initialize some new operands.
3361 assert(OpNo < ReservedSpace && "Growing didn't work!");
3362 NumOperands = OpNo+1;
3363 OperandList[OpNo] = DestBB;
3366 /// removeDestination - This method removes the specified successor from the
3367 /// indirectbr instruction.
3368 void IndirectBrInst::removeDestination(unsigned idx) {
3369 assert(idx < getNumOperands()-1 && "Successor index out of range!");
3371 unsigned NumOps = getNumOperands();
3372 Use *OL = OperandList;
3374 // Replace this value with the last one.
3375 OL[idx+1] = OL[NumOps-1];
3377 // Nuke the last value.
3378 OL[NumOps-1].set(0);
3379 NumOperands = NumOps-1;
3382 BasicBlock *IndirectBrInst::getSuccessorV(unsigned idx) const {
3383 return getSuccessor(idx);
3385 unsigned IndirectBrInst::getNumSuccessorsV() const {
3386 return getNumSuccessors();
3388 void IndirectBrInst::setSuccessorV(unsigned idx, BasicBlock *B) {
3389 setSuccessor(idx, B);
3392 //===----------------------------------------------------------------------===//
3393 // clone_impl() implementations
3394 //===----------------------------------------------------------------------===//
3396 // Define these methods here so vtables don't get emitted into every translation
3397 // unit that uses these classes.
3399 GetElementPtrInst *GetElementPtrInst::clone_impl() const {
3400 return new (getNumOperands()) GetElementPtrInst(*this);
3403 BinaryOperator *BinaryOperator::clone_impl() const {
3404 return Create(getOpcode(), Op<0>(), Op<1>());
3407 FCmpInst* FCmpInst::clone_impl() const {
3408 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
3411 ICmpInst* ICmpInst::clone_impl() const {
3412 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
3415 ExtractValueInst *ExtractValueInst::clone_impl() const {
3416 return new ExtractValueInst(*this);
3419 InsertValueInst *InsertValueInst::clone_impl() const {
3420 return new InsertValueInst(*this);
3423 AllocaInst *AllocaInst::clone_impl() const {
3424 return new AllocaInst(getAllocatedType(),
3425 (Value*)getOperand(0),
3429 LoadInst *LoadInst::clone_impl() const {
3430 return new LoadInst(getOperand(0), Twine(), isVolatile(),
3431 getAlignment(), getOrdering(), getSynchScope());
3434 StoreInst *StoreInst::clone_impl() const {
3435 return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
3436 getAlignment(), getOrdering(), getSynchScope());
3440 AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
3441 AtomicCmpXchgInst *Result =
3442 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
3443 getOrdering(), getSynchScope());
3444 Result->setVolatile(isVolatile());
3448 AtomicRMWInst *AtomicRMWInst::clone_impl() const {
3449 AtomicRMWInst *Result =
3450 new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
3451 getOrdering(), getSynchScope());
3452 Result->setVolatile(isVolatile());
3456 FenceInst *FenceInst::clone_impl() const {
3457 return new FenceInst(getContext(), getOrdering(), getSynchScope());
3460 TruncInst *TruncInst::clone_impl() const {
3461 return new TruncInst(getOperand(0), getType());
3464 ZExtInst *ZExtInst::clone_impl() const {
3465 return new ZExtInst(getOperand(0), getType());
3468 SExtInst *SExtInst::clone_impl() const {
3469 return new SExtInst(getOperand(0), getType());
3472 FPTruncInst *FPTruncInst::clone_impl() const {
3473 return new FPTruncInst(getOperand(0), getType());
3476 FPExtInst *FPExtInst::clone_impl() const {
3477 return new FPExtInst(getOperand(0), getType());
3480 UIToFPInst *UIToFPInst::clone_impl() const {
3481 return new UIToFPInst(getOperand(0), getType());
3484 SIToFPInst *SIToFPInst::clone_impl() const {
3485 return new SIToFPInst(getOperand(0), getType());
3488 FPToUIInst *FPToUIInst::clone_impl() const {
3489 return new FPToUIInst(getOperand(0), getType());
3492 FPToSIInst *FPToSIInst::clone_impl() const {
3493 return new FPToSIInst(getOperand(0), getType());
3496 PtrToIntInst *PtrToIntInst::clone_impl() const {
3497 return new PtrToIntInst(getOperand(0), getType());
3500 IntToPtrInst *IntToPtrInst::clone_impl() const {
3501 return new IntToPtrInst(getOperand(0), getType());
3504 BitCastInst *BitCastInst::clone_impl() const {
3505 return new BitCastInst(getOperand(0), getType());
3508 CallInst *CallInst::clone_impl() const {
3509 return new(getNumOperands()) CallInst(*this);
3512 SelectInst *SelectInst::clone_impl() const {
3513 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
3516 VAArgInst *VAArgInst::clone_impl() const {
3517 return new VAArgInst(getOperand(0), getType());
3520 ExtractElementInst *ExtractElementInst::clone_impl() const {
3521 return ExtractElementInst::Create(getOperand(0), getOperand(1));
3524 InsertElementInst *InsertElementInst::clone_impl() const {
3525 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
3528 ShuffleVectorInst *ShuffleVectorInst::clone_impl() const {
3529 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2));
3532 PHINode *PHINode::clone_impl() const {
3533 return new PHINode(*this);
3536 LandingPadInst *LandingPadInst::clone_impl() const {
3537 return new LandingPadInst(*this);
3540 ReturnInst *ReturnInst::clone_impl() const {
3541 return new(getNumOperands()) ReturnInst(*this);
3544 BranchInst *BranchInst::clone_impl() const {
3545 return new(getNumOperands()) BranchInst(*this);
3548 SwitchInst *SwitchInst::clone_impl() const {
3549 return new SwitchInst(*this);
3552 IndirectBrInst *IndirectBrInst::clone_impl() const {
3553 return new IndirectBrInst(*this);
3557 InvokeInst *InvokeInst::clone_impl() const {
3558 return new(getNumOperands()) InvokeInst(*this);
3561 ResumeInst *ResumeInst::clone_impl() const {
3562 return new(1) ResumeInst(*this);
3565 UnreachableInst *UnreachableInst::clone_impl() const {
3566 LLVMContext &Context = getContext();
3567 return new UnreachableInst(Context);