1 //===-- Execution.cpp - Implement code to simulate the program ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the actual instruction interpreter.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "interpreter"
15 #include "Interpreter.h"
16 #include "llvm/Constants.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/CodeGen/IntrinsicLowering.h"
20 #include "llvm/Support/GetElementPtrTypeIterator.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/MathExtras.h"
28 STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
29 static Interpreter *TheEE = 0;
31 //===----------------------------------------------------------------------===//
32 // Various Helper Functions
33 //===----------------------------------------------------------------------===//
35 inline void initializeAPInt(GenericValue &GV, const Type* Ty,
36 ExecutionContext &SF) {
37 if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty))
38 GV.APIntVal = SF.getAPInt(ITy->getBitWidth());
41 static inline uint64_t doSignExtension(uint64_t Val, const IntegerType* ITy) {
42 // Determine if the value is signed or not
43 bool isSigned = (Val & (1 << (ITy->getBitWidth()-1))) != 0;
44 // If its signed, extend the sign bits
46 Val |= ~ITy->getBitMask();
50 static inline void maskToBitWidth(GenericValue& GV, unsigned BitWidth) {
51 uint64_t BitMask = ~(uint64_t)(0ull) >> (64-BitWidth);
53 GV.Int8Val &= BitMask;
54 else if (BitWidth <= 16)
55 GV.Int16Val &= BitMask;
56 else if (BitWidth <= 32)
57 GV.Int32Val &= BitMask;
58 else if (BitWidth <= 64)
59 GV.Int64Val &= BitMask;
61 assert(GV.APIntVal && "Unallocated GV.APIntVal");
62 *(GV.APIntVal) &= APInt::getAllOnesValue(BitWidth);
66 static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
70 void Interpreter::initializeExecutionEngine() {
74 //===----------------------------------------------------------------------===//
75 // Binary Instruction Implementations
76 //===----------------------------------------------------------------------===//
78 #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
79 case Type::TY##TyID: Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; break
81 #define IMPLEMENT_INTEGER_BINOP(OP, TY) \
82 case Type::IntegerTyID: { \
83 unsigned BitWidth = cast<IntegerType>(TY)->getBitWidth(); \
85 Dest.Int1Val = Src1.Int1Val OP Src2.Int1Val; \
86 maskToBitWidth(Dest, BitWidth); \
87 } else if (BitWidth <= 8) {\
88 Dest.Int8Val = Src1.Int8Val OP Src2.Int8Val; \
89 maskToBitWidth(Dest, BitWidth); \
90 } else if (BitWidth <= 16) {\
91 Dest.Int16Val = Src1.Int16Val OP Src2.Int16Val; \
92 maskToBitWidth(Dest, BitWidth); \
93 } else if (BitWidth <= 32) {\
94 Dest.Int32Val = Src1.Int32Val OP Src2.Int32Val; \
95 maskToBitWidth(Dest, BitWidth); \
96 } else if (BitWidth <= 64) {\
97 Dest.Int64Val = Src1.Int64Val OP Src2.Int64Val; \
98 maskToBitWidth(Dest, BitWidth); \
100 *(Dest.APIntVal) = *(Src1.APIntVal) OP *(Src2.APIntVal); \
104 #define IMPLEMENT_SIGNED_BINOP(OP, TY, APOP) \
105 if (const IntegerType *ITy = dyn_cast<IntegerType>(TY)) { \
106 unsigned BitWidth = ITy->getBitWidth(); \
107 if (BitWidth <= 8) { \
108 Dest.Int8Val = ((int8_t)Src1.Int8Val) OP ((int8_t)Src2.Int8Val); \
109 maskToBitWidth(Dest, BitWidth); \
110 } else if (BitWidth <= 16) { \
111 Dest.Int16Val = ((int16_t)Src1.Int16Val) OP ((int16_t)Src2.Int16Val); \
112 maskToBitWidth(Dest, BitWidth); \
113 } else if (BitWidth <= 32) { \
114 Dest.Int32Val = ((int32_t)Src1.Int32Val) OP ((int32_t)Src2.Int32Val); \
115 maskToBitWidth(Dest, BitWidth); \
116 } else if (BitWidth <= 64) { \
117 Dest.Int64Val = ((int64_t)Src1.Int64Val) OP ((int64_t)Src2.Int64Val); \
118 maskToBitWidth(Dest, BitWidth); \
120 *(Dest.APIntVal) = Src1.APIntVal->APOP(*(Src2.APIntVal)); \
122 cerr << "Unhandled type for " #OP " operator: " << *Ty << "\n"; \
126 #define IMPLEMENT_UNSIGNED_BINOP(OP, TY, APOP) \
127 if (const IntegerType *ITy = dyn_cast<IntegerType>(TY)) { \
128 unsigned BitWidth = ITy->getBitWidth(); \
129 if (BitWidth <= 8) {\
130 Dest.Int8Val = ((uint8_t)Src1.Int8Val) OP ((uint8_t)Src2.Int8Val); \
131 maskToBitWidth(Dest, BitWidth); \
132 } else if (BitWidth <= 16) {\
133 Dest.Int16Val = ((uint16_t)Src1.Int16Val) OP ((uint16_t)Src2.Int16Val); \
134 maskToBitWidth(Dest, BitWidth); \
135 } else if (BitWidth <= 32) {\
136 Dest.Int32Val = ((uint32_t)Src1.Int32Val) OP ((uint32_t)Src2.Int32Val); \
137 maskToBitWidth(Dest, BitWidth); \
138 } else if (BitWidth <= 64) {\
139 Dest.Int64Val = ((uint64_t)Src1.Int64Val) OP ((uint64_t)Src2.Int64Val); \
140 maskToBitWidth(Dest, BitWidth); \
142 *(Dest.APIntVal) = Src1.APIntVal->APOP(*(Src2.APIntVal)); \
144 cerr << "Unhandled type for " #OP " operator: " << *Ty << "\n"; \
148 static void executeAddInst(GenericValue &Dest, GenericValue Src1,
149 GenericValue Src2, const Type *Ty) {
150 switch (Ty->getTypeID()) {
151 IMPLEMENT_INTEGER_BINOP(+, Ty);
152 IMPLEMENT_BINARY_OPERATOR(+, Float);
153 IMPLEMENT_BINARY_OPERATOR(+, Double);
155 cerr << "Unhandled type for Add instruction: " << *Ty << "\n";
160 static void executeSubInst(GenericValue &Dest, GenericValue Src1,
161 GenericValue Src2, const Type *Ty) {
162 switch (Ty->getTypeID()) {
163 IMPLEMENT_INTEGER_BINOP(-, Ty);
164 IMPLEMENT_BINARY_OPERATOR(-, Float);
165 IMPLEMENT_BINARY_OPERATOR(-, Double);
167 cerr << "Unhandled type for Sub instruction: " << *Ty << "\n";
172 static void executeMulInst(GenericValue &Dest, GenericValue Src1,
173 GenericValue Src2, const Type *Ty) {
174 switch (Ty->getTypeID()) {
175 IMPLEMENT_INTEGER_BINOP(*, Ty);
176 IMPLEMENT_BINARY_OPERATOR(*, Float);
177 IMPLEMENT_BINARY_OPERATOR(*, Double);
179 cerr << "Unhandled type for Mul instruction: " << *Ty << "\n";
184 static void executeUDivInst(GenericValue &Dest, GenericValue Src1,
185 GenericValue Src2, const Type *Ty) {
186 IMPLEMENT_UNSIGNED_BINOP(/,Ty,udiv)
189 static void executeSDivInst(GenericValue &Dest, GenericValue Src1,
190 GenericValue Src2, const Type *Ty) {
191 IMPLEMENT_SIGNED_BINOP(/,Ty,sdiv)
194 static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
195 GenericValue Src2, const Type *Ty) {
196 switch (Ty->getTypeID()) {
197 IMPLEMENT_BINARY_OPERATOR(/, Float);
198 IMPLEMENT_BINARY_OPERATOR(/, Double);
200 cerr << "Unhandled type for FDiv instruction: " << *Ty << "\n";
205 static void executeURemInst(GenericValue &Dest, GenericValue Src1,
206 GenericValue Src2, const Type *Ty) {
207 IMPLEMENT_UNSIGNED_BINOP(%,Ty,urem)
210 static void executeSRemInst(GenericValue &Dest, GenericValue Src1,
211 GenericValue Src2, const Type *Ty) {
212 IMPLEMENT_SIGNED_BINOP(%,Ty,srem)
215 static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
216 GenericValue Src2, const Type *Ty) {
217 switch (Ty->getTypeID()) {
218 case Type::FloatTyID:
219 Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
221 case Type::DoubleTyID:
222 Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
225 cerr << "Unhandled type for Rem instruction: " << *Ty << "\n";
230 static void executeAndInst(GenericValue &Dest, GenericValue Src1,
231 GenericValue Src2, const Type *Ty) {
232 IMPLEMENT_UNSIGNED_BINOP(&,Ty,And)
235 static void executeOrInst(GenericValue &Dest, GenericValue Src1,
236 GenericValue Src2, const Type *Ty) {
237 IMPLEMENT_UNSIGNED_BINOP(|,Ty,Or)
240 static void executeXorInst(GenericValue &Dest, GenericValue Src1,
241 GenericValue Src2, const Type *Ty) {
242 IMPLEMENT_UNSIGNED_BINOP(^,Ty,Xor)
245 #define IMPLEMENT_SIGNED_ICMP(OP, TY, APOP) \
246 case Type::IntegerTyID: { \
247 const IntegerType* ITy = cast<IntegerType>(TY); \
248 unsigned BitWidth = ITy->getBitWidth(); \
249 int64_t LHS = 0, RHS = 0; \
250 if (BitWidth <= 8) { \
251 LHS = int64_t(doSignExtension(uint64_t(Src1.Int8Val), ITy)); \
252 RHS = int64_t(doSignExtension(uint64_t(Src2.Int8Val), ITy)); \
253 Dest.Int1Val = LHS OP RHS; \
254 } else if (BitWidth <= 16) { \
255 LHS = int64_t(doSignExtension(uint64_t(Src1.Int16Val), ITy)); \
256 RHS = int64_t(doSignExtension(uint64_t(Src2.Int16Val), ITy)); \
257 Dest.Int1Val = LHS OP RHS; \
258 } else if (BitWidth <= 32) { \
259 LHS = int64_t(doSignExtension(uint64_t(Src1.Int32Val), ITy)); \
260 RHS = int64_t(doSignExtension(uint64_t(Src2.Int32Val), ITy)); \
261 Dest.Int1Val = LHS OP RHS; \
262 } else if (BitWidth <= 64) { \
263 LHS = int64_t(doSignExtension(uint64_t(Src1.Int64Val), ITy)); \
264 RHS = int64_t(doSignExtension(uint64_t(Src2.Int64Val), ITy)); \
265 Dest.Int1Val = LHS OP RHS; \
267 Dest.Int1Val = Src1.APIntVal->APOP(*(Src2.APIntVal)); \
272 #define IMPLEMENT_UNSIGNED_ICMP(OP, TY, APOP) \
273 case Type::IntegerTyID: { \
274 unsigned BitWidth = cast<IntegerType>(TY)->getBitWidth(); \
275 if (BitWidth == 1) { \
276 Dest.Int1Val = ((uint8_t)Src1.Int1Val) OP ((uint8_t)Src2.Int1Val); \
277 maskToBitWidth(Dest, BitWidth); \
278 } else if (BitWidth <= 8) { \
279 Dest.Int1Val = ((uint8_t)Src1.Int8Val) OP ((uint8_t)Src2.Int8Val); \
280 maskToBitWidth(Dest, BitWidth); \
281 } else if (BitWidth <= 16) { \
282 Dest.Int1Val = ((uint16_t)Src1.Int16Val) OP ((uint16_t)Src2.Int16Val); \
283 maskToBitWidth(Dest, BitWidth); \
284 } else if (BitWidth <= 32) { \
285 Dest.Int1Val = ((uint32_t)Src1.Int32Val) OP ((uint32_t)Src2.Int32Val); \
286 maskToBitWidth(Dest, BitWidth); \
287 } else if (BitWidth <= 64) { \
288 Dest.Int1Val = ((uint64_t)Src1.Int64Val) OP ((uint64_t)Src2.Int64Val); \
289 maskToBitWidth(Dest, BitWidth); \
291 Dest.Int1Val = Src1.APIntVal->APOP(*(Src2.APIntVal)); \
296 // Handle pointers specially because they must be compared with only as much
297 // width as the host has. We _do not_ want to be comparing 64 bit values when
298 // running on a 32-bit target, otherwise the upper 32 bits might mess up
299 // comparisons if they contain garbage.
300 #define IMPLEMENT_POINTER_ICMP(OP) \
301 case Type::PointerTyID: \
302 Dest.Int1Val = (void*)(intptr_t)Src1.PointerVal OP \
303 (void*)(intptr_t)Src2.PointerVal; break
305 static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
308 switch (Ty->getTypeID()) {
309 IMPLEMENT_UNSIGNED_ICMP(==, Ty, eq);
310 IMPLEMENT_POINTER_ICMP(==);
312 cerr << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
318 static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
321 switch (Ty->getTypeID()) {
322 IMPLEMENT_UNSIGNED_ICMP(!=, Ty, ne);
323 IMPLEMENT_POINTER_ICMP(!=);
325 cerr << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
331 static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
334 switch (Ty->getTypeID()) {
335 IMPLEMENT_UNSIGNED_ICMP(<, Ty, ult);
336 IMPLEMENT_POINTER_ICMP(<);
338 cerr << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
344 static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
347 switch (Ty->getTypeID()) {
348 IMPLEMENT_SIGNED_ICMP(<, Ty, slt);
349 IMPLEMENT_POINTER_ICMP(<);
351 cerr << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
357 static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
360 switch (Ty->getTypeID()) {
361 IMPLEMENT_UNSIGNED_ICMP(>, Ty, ugt);
362 IMPLEMENT_POINTER_ICMP(>);
364 cerr << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
370 static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
373 switch (Ty->getTypeID()) {
374 IMPLEMENT_SIGNED_ICMP(>, Ty, sgt);
375 IMPLEMENT_POINTER_ICMP(>);
377 cerr << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
383 static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
386 switch (Ty->getTypeID()) {
387 IMPLEMENT_UNSIGNED_ICMP(<=, Ty, ule);
388 IMPLEMENT_POINTER_ICMP(<=);
390 cerr << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
396 static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
399 switch (Ty->getTypeID()) {
400 IMPLEMENT_SIGNED_ICMP(<=, Ty, sle);
401 IMPLEMENT_POINTER_ICMP(<=);
403 cerr << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
409 static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
412 switch (Ty->getTypeID()) {
413 IMPLEMENT_UNSIGNED_ICMP(>=, Ty, uge);
414 IMPLEMENT_POINTER_ICMP(>=);
416 cerr << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
422 static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
425 switch (Ty->getTypeID()) {
426 IMPLEMENT_SIGNED_ICMP(>=, Ty, sge);
427 IMPLEMENT_POINTER_ICMP(>=);
429 cerr << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
435 void Interpreter::visitICmpInst(ICmpInst &I) {
436 ExecutionContext &SF = ECStack.back();
437 const Type *Ty = I.getOperand(0)->getType();
438 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
439 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
440 GenericValue R; // Result
442 switch (I.getPredicate()) {
443 case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
444 case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
445 case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
446 case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
447 case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
448 case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
449 case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
450 case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
451 case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
452 case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
454 cerr << "Don't know how to handle this ICmp predicate!\n-->" << I;
461 #define IMPLEMENT_FCMP(OP, TY) \
462 case Type::TY##TyID: Dest.Int1Val = Src1.TY##Val OP Src2.TY##Val; break
464 static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
467 switch (Ty->getTypeID()) {
468 IMPLEMENT_FCMP(==, Float);
469 IMPLEMENT_FCMP(==, Double);
471 cerr << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
477 static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
480 switch (Ty->getTypeID()) {
481 IMPLEMENT_FCMP(!=, Float);
482 IMPLEMENT_FCMP(!=, Double);
485 cerr << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
491 static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
494 switch (Ty->getTypeID()) {
495 IMPLEMENT_FCMP(<=, Float);
496 IMPLEMENT_FCMP(<=, Double);
498 cerr << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
504 static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
507 switch (Ty->getTypeID()) {
508 IMPLEMENT_FCMP(>=, Float);
509 IMPLEMENT_FCMP(>=, Double);
511 cerr << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
517 static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
520 switch (Ty->getTypeID()) {
521 IMPLEMENT_FCMP(<, Float);
522 IMPLEMENT_FCMP(<, Double);
524 cerr << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
530 static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
533 switch (Ty->getTypeID()) {
534 IMPLEMENT_FCMP(>, Float);
535 IMPLEMENT_FCMP(>, Double);
537 cerr << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
543 #define IMPLEMENT_UNORDERED(TY, X,Y) \
544 if (TY == Type::FloatTy) \
545 if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
546 Dest.Int1Val = true; \
549 else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
550 Dest.Int1Val = true; \
555 static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
558 IMPLEMENT_UNORDERED(Ty, Src1, Src2)
559 return executeFCMP_OEQ(Src1, Src2, Ty);
562 static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
565 IMPLEMENT_UNORDERED(Ty, Src1, Src2)
566 return executeFCMP_ONE(Src1, Src2, Ty);
569 static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
572 IMPLEMENT_UNORDERED(Ty, Src1, Src2)
573 return executeFCMP_OLE(Src1, Src2, Ty);
576 static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
579 IMPLEMENT_UNORDERED(Ty, Src1, Src2)
580 return executeFCMP_OGE(Src1, Src2, Ty);
583 static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
586 IMPLEMENT_UNORDERED(Ty, Src1, Src2)
587 return executeFCMP_OLT(Src1, Src2, Ty);
590 static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
593 IMPLEMENT_UNORDERED(Ty, Src1, Src2)
594 return executeFCMP_OGT(Src1, Src2, Ty);
597 static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
600 if (Ty == Type::FloatTy)
601 Dest.Int1Val = (Src1.FloatVal == Src1.FloatVal &&
602 Src2.FloatVal == Src2.FloatVal);
604 Dest.Int1Val = (Src1.DoubleVal == Src1.DoubleVal &&
605 Src2.DoubleVal == Src2.DoubleVal);
609 static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
612 if (Ty == Type::FloatTy)
613 Dest.Int1Val = (Src1.FloatVal != Src1.FloatVal ||
614 Src2.FloatVal != Src2.FloatVal);
616 Dest.Int1Val = (Src1.DoubleVal != Src1.DoubleVal ||
617 Src2.DoubleVal != Src2.DoubleVal);
621 void Interpreter::visitFCmpInst(FCmpInst &I) {
622 ExecutionContext &SF = ECStack.back();
623 const Type *Ty = I.getOperand(0)->getType();
624 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
625 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
626 GenericValue R; // Result
628 switch (I.getPredicate()) {
629 case FCmpInst::FCMP_FALSE: R.Int1Val = false; break;
630 case FCmpInst::FCMP_TRUE: R.Int1Val = true; break;
631 case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
632 case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
633 case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
634 case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
635 case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
636 case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
637 case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
638 case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
639 case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
640 case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
641 case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
642 case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
643 case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
644 case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
646 cerr << "Don't know how to handle this FCmp predicate!\n-->" << I;
653 static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
654 GenericValue Src2, const Type *Ty) {
657 case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
658 case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
659 case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
660 case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
661 case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
662 case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
663 case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
664 case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
665 case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
666 case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
667 case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
668 case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
669 case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
670 case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
671 case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
672 case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
673 case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
674 case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
675 case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
676 case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
677 case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
678 case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
679 case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
680 case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
681 case FCmpInst::FCMP_FALSE: {
683 Result.Int1Val = false;
686 case FCmpInst::FCMP_TRUE: {
688 Result.Int1Val = true;
692 cerr << "Unhandled Cmp predicate\n";
697 void Interpreter::visitBinaryOperator(BinaryOperator &I) {
698 ExecutionContext &SF = ECStack.back();
699 const Type *Ty = I.getOperand(0)->getType();
700 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
701 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
702 GenericValue R; // Result
703 initializeAPInt(R, Ty, SF);
705 switch (I.getOpcode()) {
706 case Instruction::Add: executeAddInst (R, Src1, Src2, Ty); break;
707 case Instruction::Sub: executeSubInst (R, Src1, Src2, Ty); break;
708 case Instruction::Mul: executeMulInst (R, Src1, Src2, Ty); break;
709 case Instruction::UDiv: executeUDivInst (R, Src1, Src2, Ty); break;
710 case Instruction::SDiv: executeSDivInst (R, Src1, Src2, Ty); break;
711 case Instruction::FDiv: executeFDivInst (R, Src1, Src2, Ty); break;
712 case Instruction::URem: executeURemInst (R, Src1, Src2, Ty); break;
713 case Instruction::SRem: executeSRemInst (R, Src1, Src2, Ty); break;
714 case Instruction::FRem: executeFRemInst (R, Src1, Src2, Ty); break;
715 case Instruction::And: executeAndInst (R, Src1, Src2, Ty); break;
716 case Instruction::Or: executeOrInst (R, Src1, Src2, Ty); break;
717 case Instruction::Xor: executeXorInst (R, Src1, Src2, Ty); break;
719 cerr << "Don't know how to handle this binary operator!\n-->" << I;
726 static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
728 return Src1.Int1Val ? Src2 : Src3;
731 void Interpreter::visitSelectInst(SelectInst &I) {
732 ExecutionContext &SF = ECStack.back();
733 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
734 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
735 GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
737 initializeAPInt(R, I.getOperand(1)->getType(), SF);
738 R = executeSelectInst(Src1, Src2, Src3);
743 //===----------------------------------------------------------------------===//
744 // Terminator Instruction Implementations
745 //===----------------------------------------------------------------------===//
747 void Interpreter::exitCalled(GenericValue GV) {
748 // runAtExitHandlers() assumes there are no stack frames, but
749 // if exit() was called, then it had a stack frame. Blow away
750 // the stack before interpreting atexit handlers.
752 runAtExitHandlers ();
756 /// Pop the last stack frame off of ECStack and then copy the result
757 /// back into the result variable if we are not returning void. The
758 /// result variable may be the ExitValue, or the Value of the calling
759 /// CallInst if there was a previous stack frame. This method may
760 /// invalidate any ECStack iterators you have. This method also takes
761 /// care of switching to the normal destination BB, if we are returning
764 void Interpreter::popStackAndReturnValueToCaller (const Type *RetTy,
765 GenericValue Result) {
766 // Pop the current stack frame.
769 if (ECStack.empty()) { // Finished main. Put result into exit code...
770 if (RetTy && RetTy->isInteger()) { // Nonvoid return type?
771 ExitValue = Result; // Capture the exit value of the program
773 memset(&ExitValue, 0, sizeof(ExitValue));
776 // If we have a previous stack frame, and we have a previous call,
777 // fill in the return value...
778 ExecutionContext &CallingSF = ECStack.back();
779 if (Instruction *I = CallingSF.Caller.getInstruction()) {
780 if (CallingSF.Caller.getType() != Type::VoidTy) // Save result...
781 SetValue(I, Result, CallingSF);
782 if (InvokeInst *II = dyn_cast<InvokeInst> (I))
783 SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
784 CallingSF.Caller = CallSite(); // We returned from the call...
789 void Interpreter::visitReturnInst(ReturnInst &I) {
790 ExecutionContext &SF = ECStack.back();
791 const Type *RetTy = Type::VoidTy;
794 // Save away the return value... (if we are not 'ret void')
795 if (I.getNumOperands()) {
796 RetTy = I.getReturnValue()->getType();
797 Result = getOperandValue(I.getReturnValue(), SF);
800 popStackAndReturnValueToCaller(RetTy, Result);
803 void Interpreter::visitUnwindInst(UnwindInst &I) {
808 if (ECStack.empty ())
810 Inst = ECStack.back ().Caller.getInstruction ();
811 } while (!(Inst && isa<InvokeInst> (Inst)));
813 // Return from invoke
814 ExecutionContext &InvokingSF = ECStack.back ();
815 InvokingSF.Caller = CallSite ();
817 // Go to exceptional destination BB of invoke instruction
818 SwitchToNewBasicBlock(cast<InvokeInst>(Inst)->getUnwindDest(), InvokingSF);
821 void Interpreter::visitUnreachableInst(UnreachableInst &I) {
822 cerr << "ERROR: Program executed an 'unreachable' instruction!\n";
826 void Interpreter::visitBranchInst(BranchInst &I) {
827 ExecutionContext &SF = ECStack.back();
830 Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
831 if (!I.isUnconditional()) {
832 Value *Cond = I.getCondition();
833 if (getOperandValue(Cond, SF).Int1Val == 0) // If false cond...
834 Dest = I.getSuccessor(1);
836 SwitchToNewBasicBlock(Dest, SF);
839 void Interpreter::visitSwitchInst(SwitchInst &I) {
840 ExecutionContext &SF = ECStack.back();
841 GenericValue CondVal = getOperandValue(I.getOperand(0), SF);
842 const Type *ElTy = I.getOperand(0)->getType();
844 // Check to see if any of the cases match...
845 BasicBlock *Dest = 0;
846 for (unsigned i = 2, e = I.getNumOperands(); i != e; i += 2)
847 if (executeICMP_EQ(CondVal,
848 getOperandValue(I.getOperand(i), SF), ElTy).Int1Val) {
849 Dest = cast<BasicBlock>(I.getOperand(i+1));
853 if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
854 SwitchToNewBasicBlock(Dest, SF);
857 // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
858 // This function handles the actual updating of block and instruction iterators
859 // as well as execution of all of the PHI nodes in the destination block.
861 // This method does this because all of the PHI nodes must be executed
862 // atomically, reading their inputs before any of the results are updated. Not
863 // doing this can cause problems if the PHI nodes depend on other PHI nodes for
864 // their inputs. If the input PHI node is updated before it is read, incorrect
865 // results can happen. Thus we use a two phase approach.
867 void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
868 BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
869 SF.CurBB = Dest; // Update CurBB to branch destination
870 SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
872 if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
874 // Loop over all of the PHI nodes in the current block, reading their inputs.
875 std::vector<GenericValue> ResultValues;
877 for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
878 // Search for the value corresponding to this previous bb...
879 int i = PN->getBasicBlockIndex(PrevBB);
880 assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
881 Value *IncomingValue = PN->getIncomingValue(i);
883 // Save the incoming value for this PHI node...
884 ResultValues.push_back(getOperandValue(IncomingValue, SF));
887 // Now loop over all of the PHI nodes setting their values...
888 SF.CurInst = SF.CurBB->begin();
889 for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
890 PHINode *PN = cast<PHINode>(SF.CurInst);
891 SetValue(PN, ResultValues[i], SF);
895 //===----------------------------------------------------------------------===//
896 // Memory Instruction Implementations
897 //===----------------------------------------------------------------------===//
899 void Interpreter::visitAllocationInst(AllocationInst &I) {
900 ExecutionContext &SF = ECStack.back();
902 const Type *Ty = I.getType()->getElementType(); // Type to be allocated
904 // Get the number of elements being allocated by the array...
905 unsigned NumElements = getOperandValue(I.getOperand(0), SF).Int32Val;
907 // Allocate enough memory to hold the type...
908 void *Memory = malloc(NumElements * (size_t)TD.getTypeSize(Ty));
910 GenericValue Result = PTOGV(Memory);
911 assert(Result.PointerVal != 0 && "Null pointer returned by malloc!");
912 SetValue(&I, Result, SF);
914 if (I.getOpcode() == Instruction::Alloca)
915 ECStack.back().Allocas.add(Memory);
918 void Interpreter::visitFreeInst(FreeInst &I) {
919 ExecutionContext &SF = ECStack.back();
920 assert(isa<PointerType>(I.getOperand(0)->getType()) && "Freeing nonptr?");
921 GenericValue Value = getOperandValue(I.getOperand(0), SF);
922 // TODO: Check to make sure memory is allocated
923 free(GVTOP(Value)); // Free memory
926 // getElementOffset - The workhorse for getelementptr.
928 GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
930 ExecutionContext &SF) {
931 assert(isa<PointerType>(Ptr->getType()) &&
932 "Cannot getElementOffset of a nonpointer type!");
936 for (; I != E; ++I) {
937 if (const StructType *STy = dyn_cast<StructType>(*I)) {
938 const StructLayout *SLO = TD.getStructLayout(STy);
940 const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
941 unsigned Index = unsigned(CPU->getZExtValue());
943 Total += (PointerTy)SLO->getElementOffset(Index);
945 const SequentialType *ST = cast<SequentialType>(*I);
946 // Get the index number for the array... which must be long type...
947 GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
951 cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
953 Idx = (int64_t)(int32_t)IdxGV.Int32Val;
954 else if (BitWidth == 64)
955 Idx = (int64_t)IdxGV.Int64Val;
957 assert(0 && "Invalid index type for getelementptr");
958 Total += PointerTy(TD.getTypeSize(ST->getElementType())*Idx);
963 Result.PointerVal = getOperandValue(Ptr, SF).PointerVal + Total;
967 void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
968 ExecutionContext &SF = ECStack.back();
969 SetValue(&I, TheEE->executeGEPOperation(I.getPointerOperand(),
970 gep_type_begin(I), gep_type_end(I), SF), SF);
973 void Interpreter::visitLoadInst(LoadInst &I) {
974 ExecutionContext &SF = ECStack.back();
975 GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
976 GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
978 initializeAPInt(Result, I.getType(), SF);
979 LoadValueFromMemory(Result, Ptr, I.getType());
980 SetValue(&I, Result, SF);
983 void Interpreter::visitStoreInst(StoreInst &I) {
984 ExecutionContext &SF = ECStack.back();
985 GenericValue Val = getOperandValue(I.getOperand(0), SF);
986 GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
987 StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
988 I.getOperand(0)->getType());
991 //===----------------------------------------------------------------------===//
992 // Miscellaneous Instruction Implementations
993 //===----------------------------------------------------------------------===//
995 void Interpreter::visitCallSite(CallSite CS) {
996 ExecutionContext &SF = ECStack.back();
998 // Check to see if this is an intrinsic function call...
999 if (Function *F = CS.getCalledFunction())
1000 if (F->isDeclaration ())
1001 switch (F->getIntrinsicID()) {
1002 case Intrinsic::not_intrinsic:
1004 case Intrinsic::vastart: { // va_start
1005 GenericValue ArgIndex;
1006 ArgIndex.UIntPairVal.first = ECStack.size() - 1;
1007 ArgIndex.UIntPairVal.second = 0;
1008 SetValue(CS.getInstruction(), ArgIndex, SF);
1011 case Intrinsic::vaend: // va_end is a noop for the interpreter
1013 case Intrinsic::vacopy: // va_copy: dest = src
1014 SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
1017 // If it is an unknown intrinsic function, use the intrinsic lowering
1018 // class to transform it into hopefully tasty LLVM code.
1020 Instruction *Prev = CS.getInstruction()->getPrev();
1021 BasicBlock *Parent = CS.getInstruction()->getParent();
1022 IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
1024 // Restore the CurInst pointer to the first instruction newly inserted, if
1027 SF.CurInst = Parent->begin();
1036 std::vector<GenericValue> ArgVals;
1037 const unsigned NumArgs = SF.Caller.arg_size();
1038 ArgVals.reserve(NumArgs);
1039 for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
1040 e = SF.Caller.arg_end(); i != e; ++i) {
1042 ArgVals.push_back(getOperandValue(V, SF));
1043 // Promote all integral types whose size is < sizeof(int) into ints. We do
1044 // this by zero or sign extending the value as appropriate according to the
1046 const Type *Ty = V->getType();
1047 if (Ty->isInteger()) {
1048 if (Ty->getPrimitiveSizeInBits() == 1)
1049 ArgVals.back().Int32Val = ArgVals.back().Int1Val;
1050 else if (Ty->getPrimitiveSizeInBits() <= 8)
1051 ArgVals.back().Int32Val = ArgVals.back().Int8Val;
1052 else if (Ty->getPrimitiveSizeInBits() <= 16)
1053 ArgVals.back().Int32Val = ArgVals.back().Int16Val;
1057 // To handle indirect calls, we must get the pointer value from the argument
1058 // and treat it as a function pointer.
1059 GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
1060 callFunction((Function*)GVTOP(SRC), ArgVals);
1063 static void executeShlInst(GenericValue &Dest, GenericValue Src1,
1064 GenericValue Src2, const Type *Ty) {
1065 if (const IntegerType *ITy = cast<IntegerType>(Ty)) {
1066 unsigned BitWidth = ITy->getBitWidth();
1067 if (BitWidth <= 8) {
1068 Dest.Int8Val = ((uint8_t)Src1.Int8Val) << ((uint32_t)Src2.Int8Val);
1069 maskToBitWidth(Dest, BitWidth);
1070 } else if (BitWidth <= 16) {
1071 Dest.Int16Val = ((uint16_t)Src1.Int16Val) << ((uint32_t)Src2.Int16Val);
1072 maskToBitWidth(Dest, BitWidth);
1073 } else if (BitWidth <= 32) {
1074 Dest.Int32Val = ((uint32_t)Src1.Int32Val) << ((uint32_t)Src2.Int32Val);
1075 maskToBitWidth(Dest, BitWidth);
1076 } else if (BitWidth <= 64) {
1077 Dest.Int64Val = ((uint64_t)Src1.Int64Val) << ((uint32_t)Src2.Int64Val);
1078 maskToBitWidth(Dest, BitWidth);
1080 *(Dest.APIntVal) = Src1.APIntVal->shl(Src2.APIntVal->getZExtValue());
1083 cerr << "Unhandled type for Shl instruction: " << *Ty << "\n";
1088 static void executeLShrInst(GenericValue &Dest, GenericValue Src1,
1089 GenericValue Src2, const Type *Ty) {
1090 if (const IntegerType *ITy = cast<IntegerType>(Ty)) {
1091 unsigned BitWidth = ITy->getBitWidth();
1092 if (BitWidth <= 8) {
1093 Dest.Int8Val = ((uint8_t)Src1.Int8Val) >> ((uint32_t)Src2.Int8Val);
1094 maskToBitWidth(Dest, BitWidth);
1095 } else if (BitWidth <= 16) {
1096 Dest.Int16Val = ((uint16_t)Src1.Int16Val) >> ((uint32_t)Src2.Int16Val);
1097 maskToBitWidth(Dest, BitWidth);
1098 } else if (BitWidth <= 32) {
1099 Dest.Int32Val = ((uint32_t)Src1.Int32Val) >> ((uint32_t)Src2.Int32Val);
1100 maskToBitWidth(Dest, BitWidth);
1101 } else if (BitWidth <= 64) {
1102 Dest.Int64Val = ((uint64_t)Src1.Int64Val) >> ((uint32_t)Src2.Int64Val);
1103 maskToBitWidth(Dest, BitWidth);
1105 *(Dest.APIntVal) = Src1.APIntVal->lshr(Src2.APIntVal->getZExtValue());
1108 cerr << "Unhandled type for LShr instruction: " << *Ty << "\n";
1113 static void executeAShrInst(GenericValue &Dest, GenericValue Src1,
1114 GenericValue Src2, const Type *Ty) {
1115 if (const IntegerType *ITy = cast<IntegerType>(Ty)) {
1116 unsigned BitWidth = ITy->getBitWidth();
1117 if (BitWidth <= 8) {
1118 Dest.Int8Val = ((int8_t)Src1.Int8Val) >> ((int32_t)Src2.Int8Val);
1119 maskToBitWidth(Dest, BitWidth);
1120 } else if (BitWidth <= 16) {
1121 Dest.Int16Val = ((int16_t)Src1.Int16Val) >> ((int32_t)Src2.Int8Val);
1122 maskToBitWidth(Dest, BitWidth);
1123 } else if (BitWidth <= 32) {
1124 Dest.Int32Val = ((int32_t)Src1.Int32Val) >> ((int32_t)Src2.Int8Val);
1125 maskToBitWidth(Dest, BitWidth);
1126 } else if (BitWidth <= 64) {
1127 Dest.Int64Val = ((int64_t)Src1.Int64Val) >> ((int32_t)Src2.Int8Val);
1128 maskToBitWidth(Dest, BitWidth);
1130 *(Dest.APIntVal) = Src1.APIntVal->ashr(Src2.APIntVal->getZExtValue());
1133 cerr << "Unhandled type for AShr instruction: " << *Ty << "\n";
1138 void Interpreter::visitShl(BinaryOperator &I) {
1139 ExecutionContext &SF = ECStack.back();
1140 const Type *Ty = I.getOperand(0)->getType();
1141 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1142 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1144 initializeAPInt(Dest, Ty, SF);
1145 executeShlInst (Dest, Src1, Src2, Ty);
1146 SetValue(&I, Dest, SF);
1149 void Interpreter::visitLShr(BinaryOperator &I) {
1150 ExecutionContext &SF = ECStack.back();
1151 const Type *Ty = I.getOperand(0)->getType();
1152 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1153 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1155 initializeAPInt(Dest, Ty, SF);
1156 executeLShrInst (Dest, Src1, Src2, Ty);
1157 SetValue(&I, Dest, SF);
1160 void Interpreter::visitAShr(BinaryOperator &I) {
1161 ExecutionContext &SF = ECStack.back();
1162 const Type *Ty = I.getOperand(0)->getType();
1163 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1164 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1166 initializeAPInt(Dest, Ty, SF);
1167 executeAShrInst (Dest, Src1, Src2, Ty);
1168 SetValue(&I, Dest, SF);
1171 #define INTEGER_ASSIGN(DEST, BITWIDTH, VAL) \
1173 uint64_t Mask = ~(uint64_t)(0ull) >> (64-BITWIDTH); \
1174 if (BITWIDTH == 1) { \
1175 Dest.Int1Val = (bool) (VAL & Mask); \
1176 } else if (BITWIDTH <= 8) { \
1177 Dest.Int8Val = (uint8_t) (VAL & Mask); \
1178 } else if (BITWIDTH <= 16) { \
1179 Dest.Int16Val = (uint16_t) (VAL & Mask); \
1180 } else if (BITWIDTH <= 32) { \
1181 Dest.Int32Val = (uint32_t) (VAL & Mask); \
1183 Dest.Int64Val = (uint64_t) (VAL & Mask); \
1186 GenericValue Interpreter::executeTruncInst(Value *SrcVal, const Type *DstTy,
1187 ExecutionContext &SF) {
1188 const Type *SrcTy = SrcVal->getType();
1189 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1190 const IntegerType *DITy = cast<IntegerType>(DstTy);
1191 const IntegerType *SITy = cast<IntegerType>(SrcTy);
1192 unsigned DBitWidth = DITy->getBitWidth();
1193 unsigned SBitWidth = SITy->getBitWidth();
1194 assert(SBitWidth > DBitWidth && "Invalid truncate");
1196 if (DBitWidth > 64) {
1197 // Both values are APInt, just use the APInt trunc
1198 initializeAPInt(Dest, DstTy, SF);
1199 *(Dest.APIntVal) = Src.APIntVal->trunc(DBitWidth);
1203 uint64_t MaskedVal = 0;
1204 uint64_t Mask = (1ULL << DBitWidth) - 1;
1206 // Mask the source value to its actual bit width. This ensures that any
1207 // high order bits are cleared.
1209 MaskedVal = Src.Int8Val & Mask;
1210 else if (SBitWidth <= 16)
1211 MaskedVal = Src.Int16Val & Mask;
1212 else if (SBitWidth <= 32)
1213 MaskedVal = Src.Int32Val & Mask;
1214 else if (SBitWidth <= 64)
1215 MaskedVal = Src.Int64Val & Mask;
1217 MaskedVal = Src.APIntVal->trunc(DBitWidth).getZExtValue();
1219 INTEGER_ASSIGN(Dest, DBitWidth, MaskedVal);
1223 GenericValue Interpreter::executeSExtInst(Value *SrcVal, const Type *DstTy,
1224 ExecutionContext &SF) {
1225 const Type *SrcTy = SrcVal->getType();
1226 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1227 const IntegerType *DITy = cast<IntegerType>(DstTy);
1228 const IntegerType *SITy = cast<IntegerType>(SrcTy);
1229 unsigned DBitWidth = DITy->getBitWidth();
1230 unsigned SBitWidth = SITy->getBitWidth();
1231 assert(SBitWidth < DBitWidth && "Invalid sign extend");
1233 if (SBitWidth > 64) {
1234 // Both values are APInt, just use the APInt::sext method;
1235 initializeAPInt(Dest, DstTy, SF);
1236 *(Dest.APIntVal) = Src.APIntVal->sext(DBitWidth);
1240 // Normalize to a 64-bit value.
1241 uint64_t Normalized = 0;
1243 Normalized = Src.Int8Val;
1244 else if (SBitWidth <= 16)
1245 Normalized = Src.Int16Val;
1246 else if (SBitWidth <= 32)
1247 Normalized = Src.Int32Val;
1249 Normalized = Src.Int64Val;
1251 if (DBitWidth > 64) {
1252 // Destination is an APint, construct it and return
1253 initializeAPInt(Dest, DstTy, SF);
1254 *(Dest.APIntVal) = APInt(SBitWidth, Normalized).sext(DBitWidth);
1258 Normalized = doSignExtension(Normalized, SITy);
1260 // Now that we have a sign extended value, assign it to the destination
1261 INTEGER_ASSIGN(Dest, DBitWidth, Normalized);
1265 GenericValue Interpreter::executeZExtInst(Value *SrcVal, const Type *DstTy,
1266 ExecutionContext &SF) {
1267 const Type *SrcTy = SrcVal->getType();
1268 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1269 const IntegerType *DITy = cast<IntegerType>(DstTy);
1270 const IntegerType *SITy = cast<IntegerType>(SrcTy);
1271 unsigned DBitWidth = DITy->getBitWidth();
1272 unsigned SBitWidth = SITy->getBitWidth();
1273 assert(SBitWidth < DBitWidth && "Invalid sign extend");
1275 if (SBitWidth > 64) {
1276 // Both values are APInt, just use the APInt::sext method;
1277 initializeAPInt(Dest, DstTy, SF);
1278 *(Dest.APIntVal) = Src.APIntVal->zext(DBitWidth);
1282 uint64_t Extended = 0;
1284 // For sign extension from bool, we must extend the source bits.
1285 Extended = (uint64_t) (Src.Int1Val & 1);
1286 else if (SBitWidth <= 8)
1287 Extended = (uint64_t) (uint8_t)Src.Int8Val;
1288 else if (SBitWidth <= 16)
1289 Extended = (uint64_t) (uint16_t)Src.Int16Val;
1290 else if (SBitWidth <= 32)
1291 Extended = (uint64_t) (uint32_t)Src.Int32Val;
1293 Extended = (uint64_t) Src.Int64Val;
1295 if (DBitWidth > 64) {
1296 // Destination is an APint, construct it and return
1297 initializeAPInt(Dest, DstTy, SF);
1298 *(Dest.APIntVal) = APInt(SBitWidth, Extended).zext(DBitWidth);
1302 // Now that we have a sign extended value, assign it to the destination
1303 INTEGER_ASSIGN(Dest, DBitWidth, Extended);
1307 GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy,
1308 ExecutionContext &SF) {
1309 const Type *SrcTy = SrcVal->getType();
1310 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1311 assert(SrcTy == Type::DoubleTy && DstTy == Type::FloatTy &&
1312 "Invalid FPTrunc instruction");
1313 Dest.FloatVal = (float) Src.DoubleVal;
1317 GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy,
1318 ExecutionContext &SF) {
1319 const Type *SrcTy = SrcVal->getType();
1320 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1321 assert(SrcTy == Type::FloatTy && DstTy == Type::DoubleTy &&
1322 "Invalid FPTrunc instruction");
1323 Dest.DoubleVal = (double) Src.FloatVal;
1327 GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy,
1328 ExecutionContext &SF) {
1329 const Type *SrcTy = SrcVal->getType();
1330 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1331 const IntegerType *DITy = cast<IntegerType>(DstTy);
1332 unsigned DBitWidth = DITy->getBitWidth();
1333 assert(SrcTy->isFloatingPoint() && "Invalid FPToUI instruction");
1335 if (DBitWidth > 64) {
1336 initializeAPInt(Dest, DITy, SF);
1337 if (SrcTy->getTypeID() == Type::FloatTyID)
1338 *(Dest.APIntVal) = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1340 *(Dest.APIntVal) = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1344 uint64_t Converted = 0;
1345 if (SrcTy->getTypeID() == Type::FloatTyID)
1346 Converted = (uint64_t) Src.FloatVal;
1348 Converted = (uint64_t) Src.DoubleVal;
1350 INTEGER_ASSIGN(Dest, DBitWidth, Converted);
1354 GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy,
1355 ExecutionContext &SF) {
1356 const Type *SrcTy = SrcVal->getType();
1357 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1358 const IntegerType *DITy = cast<IntegerType>(DstTy);
1359 unsigned DBitWidth = DITy->getBitWidth();
1360 assert(SrcTy->isFloatingPoint() && "Invalid FPToSI instruction");
1362 if (DBitWidth > 64) {
1363 initializeAPInt(Dest, DITy, SF);
1364 if (SrcTy->getTypeID() == Type::FloatTyID)
1365 *(Dest.APIntVal) = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1367 *(Dest.APIntVal) = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1371 int64_t Converted = 0;
1372 if (SrcTy->getTypeID() == Type::FloatTyID)
1373 Converted = (int64_t) Src.FloatVal;
1375 Converted = (int64_t) Src.DoubleVal;
1377 INTEGER_ASSIGN(Dest, DBitWidth, Converted);
1381 GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy,
1382 ExecutionContext &SF) {
1383 const Type *SrcTy = SrcVal->getType();
1384 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1385 const IntegerType *SITy = cast<IntegerType>(SrcTy);
1386 unsigned SBitWidth = SITy->getBitWidth();
1387 assert(DstTy->isFloatingPoint() && "Invalid UIToFP instruction");
1389 if (SBitWidth > 64) {
1390 if (DstTy->getTypeID() == Type::FloatTyID)
1391 Dest.FloatVal = APIntOps::RoundAPIntToFloat(*(Src.APIntVal));
1393 Dest.DoubleVal = APIntOps::RoundAPIntToDouble(*(Src.APIntVal));
1397 uint64_t Converted = 0;
1399 Converted = (uint64_t) Src.Int1Val;
1400 else if (SBitWidth <= 8)
1401 Converted = (uint64_t) Src.Int8Val;
1402 else if (SBitWidth <= 16)
1403 Converted = (uint64_t) Src.Int16Val;
1404 else if (SBitWidth <= 32)
1405 Converted = (uint64_t) Src.Int32Val;
1407 Converted = (uint64_t) Src.Int64Val;
1409 if (DstTy->getTypeID() == Type::FloatTyID)
1410 Dest.FloatVal = (float) Converted;
1412 Dest.DoubleVal = (double) Converted;
1416 GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy,
1417 ExecutionContext &SF) {
1418 const Type *SrcTy = SrcVal->getType();
1419 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1420 const IntegerType *SITy = cast<IntegerType>(SrcTy);
1421 unsigned SBitWidth = SITy->getBitWidth();
1422 assert(DstTy->isFloatingPoint() && "Invalid SIToFP instruction");
1424 if (SBitWidth > 64) {
1425 if (DstTy->getTypeID() == Type::FloatTyID)
1426 Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(*(Src.APIntVal));
1428 Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(*(Src.APIntVal));
1432 int64_t Converted = 0;
1434 Converted = 0LL - Src.Int1Val;
1435 else if (SBitWidth <= 8)
1436 Converted = (int64_t) (int8_t)Src.Int8Val;
1437 else if (SBitWidth <= 16)
1438 Converted = (int64_t) (int16_t)Src.Int16Val;
1439 else if (SBitWidth <= 32)
1440 Converted = (int64_t) (int32_t)Src.Int32Val;
1442 Converted = (int64_t) Src.Int64Val;
1444 if (DstTy->getTypeID() == Type::FloatTyID)
1445 Dest.FloatVal = (float) Converted;
1447 Dest.DoubleVal = (double) Converted;
1451 GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy,
1452 ExecutionContext &SF) {
1453 const Type *SrcTy = SrcVal->getType();
1454 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1455 const IntegerType *DITy = cast<IntegerType>(DstTy);
1456 unsigned DBitWidth = DITy->getBitWidth();
1457 assert(isa<PointerType>(SrcTy) && "Invalid PtrToInt instruction");
1459 if (DBitWidth > 64) {
1460 initializeAPInt(Dest, DstTy, SF);
1461 *(Dest.APIntVal) = (intptr_t) Src.PointerVal;
1464 INTEGER_ASSIGN(Dest, DBitWidth, (intptr_t) Src.PointerVal);
1468 GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
1469 ExecutionContext &SF) {
1470 const Type *SrcTy = SrcVal->getType();
1471 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1472 const IntegerType *SITy = cast<IntegerType>(SrcTy);
1473 unsigned SBitWidth = SITy->getBitWidth();
1474 assert(isa<PointerType>(DstTy) && "Invalid PtrToInt instruction");
1476 uint64_t Converted = 0;
1478 Converted = (uint64_t) Src.Int1Val;
1479 else if (SBitWidth <= 8)
1480 Converted = (uint64_t) Src.Int8Val;
1481 else if (SBitWidth <= 16)
1482 Converted = (uint64_t) Src.Int16Val;
1483 else if (SBitWidth <= 32)
1484 Converted = (uint64_t) Src.Int32Val;
1485 else if (SBitWidth <= 64)
1486 Converted = (uint64_t) Src.Int64Val;
1488 Converted = (uint64_t) Src.APIntVal->trunc(64).getZExtValue();
1490 Dest.PointerVal = (PointerTy) Converted;
1494 GenericValue Interpreter::executeBitCastInst(Value *SrcVal, const Type *DstTy,
1495 ExecutionContext &SF) {
1497 const Type *SrcTy = SrcVal->getType();
1498 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1499 if (isa<PointerType>(DstTy)) {
1500 assert(isa<PointerType>(SrcTy) && "Invalid BitCast");
1501 Dest.PointerVal = Src.PointerVal;
1502 } else if (DstTy->isInteger()) {
1503 const IntegerType *DITy = cast<IntegerType>(DstTy);
1504 unsigned DBitWidth = DITy->getBitWidth();
1505 if (SrcTy == Type::FloatTy) {
1506 Dest.Int32Val = FloatToBits(Src.FloatVal);
1507 } else if (SrcTy == Type::DoubleTy) {
1508 Dest.Int64Val = DoubleToBits(Src.DoubleVal);
1509 } else if (SrcTy->isInteger()) {
1510 const IntegerType *SITy = cast<IntegerType>(SrcTy);
1511 unsigned SBitWidth = SITy->getBitWidth();
1512 assert(SBitWidth == DBitWidth && "Invalid BitCast");
1513 if (SBitWidth == 1) {
1514 Dest.Int1Val = Src.Int1Val;
1515 maskToBitWidth(Dest, DBitWidth);
1516 } else if (SBitWidth <= 8) {
1517 Dest.Int8Val = Src.Int8Val;
1518 maskToBitWidth(Dest, DBitWidth);
1519 } else if (SBitWidth <= 16) {
1520 Dest.Int16Val = Src.Int16Val;
1521 maskToBitWidth(Dest, DBitWidth);
1522 } else if (SBitWidth <= 32) {
1523 Dest.Int32Val = Src.Int32Val;
1524 maskToBitWidth(Dest, DBitWidth);
1525 } else if (SBitWidth <= 64) {
1526 Dest.Int64Val = Src.Int64Val;
1527 maskToBitWidth(Dest, DBitWidth);
1529 *(Dest.APIntVal) = *(Src.APIntVal);
1532 assert(0 && "Invalid BitCast");
1533 } else if (DstTy == Type::FloatTy) {
1534 if (SrcTy->isInteger())
1535 Dest.FloatVal = BitsToFloat(Src.Int32Val);
1537 Dest.FloatVal = Src.FloatVal;
1538 } else if (DstTy == Type::DoubleTy) {
1539 if (SrcTy->isInteger())
1540 Dest.DoubleVal = BitsToDouble(Src.Int64Val);
1542 Dest.DoubleVal = Src.DoubleVal;
1544 assert(0 && "Invalid Bitcast");
1549 void Interpreter::visitTruncInst(TruncInst &I) {
1550 ExecutionContext &SF = ECStack.back();
1551 SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
1554 void Interpreter::visitSExtInst(SExtInst &I) {
1555 ExecutionContext &SF = ECStack.back();
1556 SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
1559 void Interpreter::visitZExtInst(ZExtInst &I) {
1560 ExecutionContext &SF = ECStack.back();
1561 SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
1564 void Interpreter::visitFPTruncInst(FPTruncInst &I) {
1565 ExecutionContext &SF = ECStack.back();
1566 SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
1569 void Interpreter::visitFPExtInst(FPExtInst &I) {
1570 ExecutionContext &SF = ECStack.back();
1571 SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
1574 void Interpreter::visitUIToFPInst(UIToFPInst &I) {
1575 ExecutionContext &SF = ECStack.back();
1576 SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1579 void Interpreter::visitSIToFPInst(SIToFPInst &I) {
1580 ExecutionContext &SF = ECStack.back();
1581 SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1584 void Interpreter::visitFPToUIInst(FPToUIInst &I) {
1585 ExecutionContext &SF = ECStack.back();
1586 SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
1589 void Interpreter::visitFPToSIInst(FPToSIInst &I) {
1590 ExecutionContext &SF = ECStack.back();
1591 SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
1594 void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
1595 ExecutionContext &SF = ECStack.back();
1596 SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
1599 void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
1600 ExecutionContext &SF = ECStack.back();
1601 SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
1604 void Interpreter::visitBitCastInst(BitCastInst &I) {
1605 ExecutionContext &SF = ECStack.back();
1606 SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
1609 #define IMPLEMENT_VAARG(TY) \
1610 case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
1612 void Interpreter::visitVAArgInst(VAArgInst &I) {
1613 ExecutionContext &SF = ECStack.back();
1615 // Get the incoming valist parameter. LLI treats the valist as a
1616 // (ec-stack-depth var-arg-index) pair.
1617 GenericValue VAList = getOperandValue(I.getOperand(0), SF);
1619 GenericValue Src = ECStack[VAList.UIntPairVal.first]
1620 .VarArgs[VAList.UIntPairVal.second];
1621 const Type *Ty = I.getType();
1622 switch (Ty->getTypeID()) {
1623 case Type::IntegerTyID: {
1624 unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1625 if (BitWidth == 1) {
1626 Dest.Int1Val = Src.Int1Val;
1627 maskToBitWidth(Dest, BitWidth);
1628 } else if (BitWidth <= 8) {
1629 Dest.Int8Val = Src.Int8Val;
1630 maskToBitWidth(Dest, BitWidth);
1631 } else if (BitWidth <= 16) {
1632 Dest.Int16Val = Src.Int16Val;
1633 maskToBitWidth(Dest, BitWidth);
1634 } else if (BitWidth <= 32) {
1635 Dest.Int32Val = Src.Int32Val;
1636 maskToBitWidth(Dest, BitWidth);
1637 } else if (BitWidth <= 64) {
1638 Dest.Int64Val = Src.Int64Val;
1639 maskToBitWidth(Dest, BitWidth);
1641 *(Dest.APIntVal) = *(Src.APIntVal);
1644 IMPLEMENT_VAARG(Pointer);
1645 IMPLEMENT_VAARG(Float);
1646 IMPLEMENT_VAARG(Double);
1648 cerr << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
1652 // Set the Value of this Instruction.
1653 SetValue(&I, Dest, SF);
1655 // Move the pointer to the next vararg.
1656 ++VAList.UIntPairVal.second;
1659 GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
1660 ExecutionContext &SF) {
1661 switch (CE->getOpcode()) {
1662 case Instruction::Trunc:
1663 return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
1664 case Instruction::ZExt:
1665 return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
1666 case Instruction::SExt:
1667 return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
1668 case Instruction::FPTrunc:
1669 return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
1670 case Instruction::FPExt:
1671 return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
1672 case Instruction::UIToFP:
1673 return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
1674 case Instruction::SIToFP:
1675 return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
1676 case Instruction::FPToUI:
1677 return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
1678 case Instruction::FPToSI:
1679 return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
1680 case Instruction::PtrToInt:
1681 return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
1682 case Instruction::IntToPtr:
1683 return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
1684 case Instruction::BitCast:
1685 return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
1686 case Instruction::GetElementPtr:
1687 return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
1688 gep_type_end(CE), SF);
1689 case Instruction::FCmp:
1690 case Instruction::ICmp:
1691 return executeCmpInst(CE->getPredicate(),
1692 getOperandValue(CE->getOperand(0), SF),
1693 getOperandValue(CE->getOperand(1), SF),
1694 CE->getOperand(0)->getType());
1695 case Instruction::Select:
1696 return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
1697 getOperandValue(CE->getOperand(1), SF),
1698 getOperandValue(CE->getOperand(2), SF));
1703 // The cases below here require a GenericValue parameter for the result
1704 // so we initialize one, compute it and then return it.
1706 initializeAPInt(Dest, CE->getType(), SF);
1707 switch (CE->getOpcode()) {
1708 case Instruction::Add:
1709 executeAddInst(Dest, getOperandValue(CE->getOperand(0), SF),
1710 getOperandValue(CE->getOperand(1), SF),
1711 CE->getOperand(0)->getType());
1712 case Instruction::Sub:
1713 executeSubInst(Dest, getOperandValue(CE->getOperand(0), SF),
1714 getOperandValue(CE->getOperand(1), SF),
1715 CE->getOperand(0)->getType());
1716 case Instruction::Mul:
1717 executeMulInst(Dest, getOperandValue(CE->getOperand(0), SF),
1718 getOperandValue(CE->getOperand(1), SF),
1719 CE->getOperand(0)->getType());
1720 case Instruction::SDiv:
1721 executeSDivInst(Dest, getOperandValue(CE->getOperand(0), SF),
1722 getOperandValue(CE->getOperand(1), SF),
1723 CE->getOperand(0)->getType());
1724 case Instruction::UDiv:
1725 executeUDivInst(Dest, getOperandValue(CE->getOperand(0), SF),
1726 getOperandValue(CE->getOperand(1), SF),
1727 CE->getOperand(0)->getType());
1728 case Instruction::FDiv:
1729 executeFDivInst(Dest, getOperandValue(CE->getOperand(0), SF),
1730 getOperandValue(CE->getOperand(1), SF),
1731 CE->getOperand(0)->getType());
1732 case Instruction::URem:
1733 executeURemInst(Dest, getOperandValue(CE->getOperand(0), SF),
1734 getOperandValue(CE->getOperand(1), SF),
1735 CE->getOperand(0)->getType());
1736 case Instruction::SRem:
1737 executeSRemInst(Dest, getOperandValue(CE->getOperand(0), SF),
1738 getOperandValue(CE->getOperand(1), SF),
1739 CE->getOperand(0)->getType());
1740 case Instruction::FRem:
1741 executeFRemInst(Dest, getOperandValue(CE->getOperand(0), SF),
1742 getOperandValue(CE->getOperand(1), SF),
1743 CE->getOperand(0)->getType());
1744 case Instruction::And:
1745 executeAndInst(Dest, getOperandValue(CE->getOperand(0), SF),
1746 getOperandValue(CE->getOperand(1), SF),
1747 CE->getOperand(0)->getType());
1748 case Instruction::Or:
1749 executeOrInst(Dest, getOperandValue(CE->getOperand(0), SF),
1750 getOperandValue(CE->getOperand(1), SF),
1751 CE->getOperand(0)->getType());
1752 case Instruction::Xor:
1753 executeXorInst(Dest, getOperandValue(CE->getOperand(0), SF),
1754 getOperandValue(CE->getOperand(1), SF),
1755 CE->getOperand(0)->getType());
1756 case Instruction::Shl:
1757 executeShlInst(Dest, getOperandValue(CE->getOperand(0), SF),
1758 getOperandValue(CE->getOperand(1), SF),
1759 CE->getOperand(0)->getType());
1760 case Instruction::LShr:
1761 executeLShrInst(Dest, getOperandValue(CE->getOperand(0), SF),
1762 getOperandValue(CE->getOperand(1), SF),
1763 CE->getOperand(0)->getType());
1764 case Instruction::AShr:
1765 executeAShrInst(Dest, getOperandValue(CE->getOperand(0), SF),
1766 getOperandValue(CE->getOperand(1), SF),
1767 CE->getOperand(0)->getType());
1769 cerr << "Unhandled ConstantExpr: " << *CE << "\n";
1771 return GenericValue();
1776 GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
1777 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
1778 return getConstantExprValue(CE, SF);
1779 } else if (Constant *CPV = dyn_cast<Constant>(V)) {
1780 return getConstantValue(CPV);
1781 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1782 return PTOGV(getPointerToGlobal(GV));
1784 return SF.Values[V];
1788 //===----------------------------------------------------------------------===//
1789 // Dispatch and Execution Code
1790 //===----------------------------------------------------------------------===//
1792 //===----------------------------------------------------------------------===//
1793 // callFunction - Execute the specified function...
1795 void Interpreter::callFunction(Function *F,
1796 const std::vector<GenericValue> &ArgVals) {
1797 assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 ||
1798 ECStack.back().Caller.arg_size() == ArgVals.size()) &&
1799 "Incorrect number of arguments passed into function call!");
1800 // Make a new stack frame... and fill it in.
1801 ECStack.push_back(ExecutionContext());
1802 ExecutionContext &StackFrame = ECStack.back();
1803 StackFrame.CurFunction = F;
1805 // Special handling for external functions.
1806 if (F->isDeclaration()) {
1807 GenericValue Result = callExternalFunction (F, ArgVals);
1808 // Simulate a 'ret' instruction of the appropriate type.
1809 popStackAndReturnValueToCaller (F->getReturnType (), Result);
1813 // Get pointers to first LLVM BB & Instruction in function.
1814 StackFrame.CurBB = F->begin();
1815 StackFrame.CurInst = StackFrame.CurBB->begin();
1817 // Run through the function arguments and initialize their values...
1818 assert((ArgVals.size() == F->arg_size() ||
1819 (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
1820 "Invalid number of values passed to function invocation!");
1822 // Handle non-varargs arguments...
1824 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI, ++i)
1825 SetValue(AI, ArgVals[i], StackFrame);
1827 // Handle varargs arguments...
1828 StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
1831 void Interpreter::run() {
1832 while (!ECStack.empty()) {
1833 // Interpret a single instruction & increment the "PC".
1834 ExecutionContext &SF = ECStack.back(); // Current stack frame
1835 Instruction &I = *SF.CurInst++; // Increment before execute
1837 // Track the number of dynamic instructions executed.
1840 DOUT << "About to interpret: " << I;
1841 visit(I); // Dispatch to one of the visit* methods...