1 //===-- SlotCalculator.cpp - Calculate what slots values land in ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a useful analysis step to figure out what numbered slots
11 // values in a program will land in (keeping track of per plane information).
13 // This is used when writing a file to disk, either in bytecode or assembly.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/Analysis/SlotCalculator.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/iOther.h"
21 #include "llvm/Module.h"
22 #include "llvm/SymbolTable.h"
23 #include "llvm/Analysis/ConstantsScanner.h"
24 #include "Support/PostOrderIterator.h"
25 #include "Support/STLExtras.h"
30 #define SC_DEBUG(X) std::cerr << X
35 SlotCalculator::SlotCalculator(const Module *M, bool buildBytecodeInfo) {
36 BuildBytecodeInfo = buildBytecodeInfo;
37 ModuleContainsAllFunctionConstants = false;
40 // Preload table... Make sure that all of the primitive types are in the table
41 // and that their Primitive ID is equal to their slot #
43 SC_DEBUG("Inserting primitive types:\n");
44 for (unsigned i = 0; i < Type::FirstDerivedTyID; ++i) {
45 assert(Type::getPrimitiveType((Type::PrimitiveID)i));
46 insertValue(Type::getPrimitiveType((Type::PrimitiveID)i), true);
49 if (M == 0) return; // Empty table...
53 SlotCalculator::SlotCalculator(const Function *M, bool buildBytecodeInfo) {
54 BuildBytecodeInfo = buildBytecodeInfo;
55 ModuleContainsAllFunctionConstants = false;
56 TheModule = M ? M->getParent() : 0;
58 // Preload table... Make sure that all of the primitive types are in the table
59 // and that their Primitive ID is equal to their slot #
61 SC_DEBUG("Inserting primitive types:\n");
62 for (unsigned i = 0; i < Type::FirstDerivedTyID; ++i) {
63 assert(Type::getPrimitiveType((Type::PrimitiveID)i));
64 insertValue(Type::getPrimitiveType((Type::PrimitiveID)i), true);
67 if (TheModule == 0) return; // Empty table...
69 processModule(); // Process module level stuff
70 incorporateFunction(M); // Start out in incorporated state
73 unsigned SlotCalculator::getGlobalSlot(const Value *V) const {
74 assert(!CompactionTable.empty() &&
75 "This method can only be used when compaction is enabled!");
76 if (const ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(V))
78 std::map<const Value*, unsigned>::const_iterator I = NodeMap.find(V);
79 assert(I != NodeMap.end() && "Didn't find global slot entry!");
83 SlotCalculator::TypePlane &SlotCalculator::getPlane(unsigned Plane) {
84 unsigned PIdx = Plane;
85 if (CompactionTable.empty()) { // No compaction table active?
87 } else if (!CompactionTable[Plane].empty()) { // Compaction table active.
88 assert(Plane < CompactionTable.size());
89 return CompactionTable[Plane];
91 // Final case: compaction table active, but this plane is not
92 // compactified. If the type plane is compactified, unmap back to the
93 // global type plane corresponding to "Plane".
94 if (!CompactionTable[Type::TypeTyID].empty()) {
95 const Type *Ty = cast<Type>(CompactionTable[Type::TypeTyID][Plane]);
96 std::map<const Value*, unsigned>::iterator It = NodeMap.find(Ty);
97 assert(It != NodeMap.end() && "Type not in global constant map?");
102 // Okay we are just returning an entry out of the main Table. Make sure the
103 // plane exists and return it.
104 if (PIdx >= Table.size())
105 Table.resize(PIdx+1);
110 // processModule - Process all of the module level function declarations and
111 // types that are available.
113 void SlotCalculator::processModule() {
114 SC_DEBUG("begin processModule!\n");
116 // Add all of the global variables to the value table...
118 for (Module::const_giterator I = TheModule->gbegin(), E = TheModule->gend();
122 // Scavenge the types out of the functions, then add the functions themselves
123 // to the value table...
125 for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
129 // Add all of the module level constants used as initializers
131 for (Module::const_giterator I = TheModule->gbegin(), E = TheModule->gend();
133 if (I->hasInitializer())
134 getOrCreateSlot(I->getInitializer());
136 // Now that all global constants have been added, rearrange constant planes
137 // that contain constant strings so that the strings occur at the start of the
138 // plane, not somewhere in the middle.
140 if (BuildBytecodeInfo) {
141 TypePlane &Types = Table[Type::TypeTyID];
142 for (unsigned plane = 0, e = Table.size(); plane != e; ++plane) {
143 if (const ArrayType *AT = dyn_cast<ArrayType>(Types[plane]))
144 if (AT->getElementType() == Type::SByteTy ||
145 AT->getElementType() == Type::UByteTy) {
146 TypePlane &Plane = Table[plane];
147 unsigned FirstNonStringID = 0;
148 for (unsigned i = 0, e = Plane.size(); i != e; ++i)
149 if (cast<ConstantArray>(Plane[i])->isString()) {
150 // Check to see if we have to shuffle this string around. If not,
151 // don't do anything.
152 if (i != FirstNonStringID) {
153 // Swap the plane entries....
154 std::swap(Plane[i], Plane[FirstNonStringID]);
156 // Keep the NodeMap up to date.
157 NodeMap[Plane[i]] = i;
158 NodeMap[Plane[FirstNonStringID]] = FirstNonStringID;
166 // If we are emitting a bytecode file, scan all of the functions for their
167 // constants, which allows us to emit more compact modules. This is optional,
168 // and is just used to compactify the constants used by different functions
171 // This functionality is completely optional for the bytecode writer, but
172 // tends to produce smaller bytecode files. This should not be used in the
173 // future by clients that want to, for example, build and emit functions on
174 // the fly. For now, however, it is unconditionally enabled when building
175 // bytecode information.
177 if (BuildBytecodeInfo) {
178 ModuleContainsAllFunctionConstants = true;
180 SC_DEBUG("Inserting function constants:\n");
181 for (Module::const_iterator F = TheModule->begin(), E = TheModule->end();
183 for (const_inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I){
184 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
185 if (isa<Constant>(I->getOperand(op)))
186 getOrCreateSlot(I->getOperand(op));
187 getOrCreateSlot(I->getType());
188 if (const VANextInst *VAN = dyn_cast<VANextInst>(*I))
189 getOrCreateSlot(VAN->getArgType());
191 processSymbolTableConstants(&F->getSymbolTable());
195 // Insert constants that are named at module level into the slot pool so that
196 // the module symbol table can refer to them...
198 if (BuildBytecodeInfo) {
199 SC_DEBUG("Inserting SymbolTable values:\n");
200 processSymbolTable(&TheModule->getSymbolTable());
203 // Now that we have collected together all of the information relevant to the
204 // module, compactify the type table if it is particularly big and outputting
205 // a bytecode file. The basic problem we run into is that some programs have
206 // a large number of types, which causes the type field to overflow its size,
207 // which causes instructions to explode in size (particularly call
208 // instructions). To avoid this behavior, we "sort" the type table so that
209 // all non-value types are pushed to the end of the type table, giving nice
210 // low numbers to the types that can be used by instructions, thus reducing
211 // the amount of explodage we suffer.
212 if (BuildBytecodeInfo && Table[Type::TypeTyID].size() >= 64) {
213 // Scan through the type table moving value types to the start of the table.
214 TypePlane *Types = &Table[Type::TypeTyID];
215 unsigned FirstNonValueTypeID = 0;
216 for (unsigned i = 0, e = Types->size(); i != e; ++i)
217 if (cast<Type>((*Types)[i])->isFirstClassType() ||
218 cast<Type>((*Types)[i])->isPrimitiveType()) {
219 // Check to see if we have to shuffle this type around. If not, don't
221 if (i != FirstNonValueTypeID) {
222 assert(i != Type::TypeTyID && FirstNonValueTypeID != Type::TypeTyID &&
223 "Cannot move around the type plane!");
225 // Swap the type ID's.
226 std::swap((*Types)[i], (*Types)[FirstNonValueTypeID]);
228 // Keep the NodeMap up to date.
229 NodeMap[(*Types)[i]] = i;
230 NodeMap[(*Types)[FirstNonValueTypeID]] = FirstNonValueTypeID;
232 // When we move a type, make sure to move its value plane as needed.
233 if (Table.size() > FirstNonValueTypeID) {
234 if (Table.size() <= i) Table.resize(i+1);
235 std::swap(Table[i], Table[FirstNonValueTypeID]);
236 Types = &Table[Type::TypeTyID];
239 ++FirstNonValueTypeID;
243 SC_DEBUG("end processModule!\n");
246 // processSymbolTable - Insert all of the values in the specified symbol table
247 // into the values table...
249 void SlotCalculator::processSymbolTable(const SymbolTable *ST) {
250 for (SymbolTable::const_iterator I = ST->begin(), E = ST->end(); I != E; ++I)
251 for (SymbolTable::type_const_iterator TI = I->second.begin(),
252 TE = I->second.end(); TI != TE; ++TI)
253 getOrCreateSlot(TI->second);
256 void SlotCalculator::processSymbolTableConstants(const SymbolTable *ST) {
257 for (SymbolTable::const_iterator I = ST->begin(), E = ST->end(); I != E; ++I)
258 for (SymbolTable::type_const_iterator TI = I->second.begin(),
259 TE = I->second.end(); TI != TE; ++TI)
260 if (isa<Constant>(TI->second) || isa<Type>(TI->second))
261 getOrCreateSlot(TI->second);
265 void SlotCalculator::incorporateFunction(const Function *F) {
266 assert(ModuleLevel.size() == 0 && "Module already incorporated!");
268 SC_DEBUG("begin processFunction!\n");
270 // If we emitted all of the function constants, build a compaction table.
271 if (BuildBytecodeInfo && ModuleContainsAllFunctionConstants)
272 buildCompactionTable(F);
274 // Update the ModuleLevel entries to be accurate.
275 ModuleLevel.resize(getNumPlanes());
276 for (unsigned i = 0, e = getNumPlanes(); i != e; ++i)
277 ModuleLevel[i] = getPlane(i).size();
279 // Iterate over function arguments, adding them to the value table...
280 for(Function::const_aiterator I = F->abegin(), E = F->aend(); I != E; ++I)
283 if (BuildBytecodeInfo && // Assembly writer does not need this!
284 !ModuleContainsAllFunctionConstants) {
285 // Iterate over all of the instructions in the function, looking for
286 // constant values that are referenced. Add these to the value pools
287 // before any nonconstant values. This will be turned into the constant
288 // pool for the bytecode writer.
291 // Emit all of the constants that are being used by the instructions in
293 for_each(constant_begin(F), constant_end(F),
294 bind_obj(this, &SlotCalculator::getOrCreateSlot));
296 // If there is a symbol table, it is possible that the user has names for
297 // constants that are not being used. In this case, we will have problems
298 // if we don't emit the constants now, because otherwise we will get
299 // symbol table references to constants not in the output. Scan for these
302 processSymbolTableConstants(&F->getSymbolTable());
305 SC_DEBUG("Inserting Instructions:\n");
307 // Add all of the instructions to the type planes...
308 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
310 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) {
312 if (const VANextInst *VAN = dyn_cast<VANextInst>(I))
313 getOrCreateSlot(VAN->getArgType());
317 // If we are building a compaction table, prune out planes that do not benefit
318 // from being compactified.
319 if (!CompactionTable.empty())
320 pruneCompactionTable();
322 SC_DEBUG("end processFunction!\n");
325 void SlotCalculator::purgeFunction() {
326 assert(ModuleLevel.size() != 0 && "Module not incorporated!");
327 unsigned NumModuleTypes = ModuleLevel.size();
329 SC_DEBUG("begin purgeFunction!\n");
331 // First, free the compaction map if used.
332 CompactionNodeMap.clear();
334 // Next, remove values from existing type planes
335 for (unsigned i = 0; i != NumModuleTypes; ++i) {
336 // Size of plane before function came
337 unsigned ModuleLev = getModuleLevel(i);
338 assert(int(ModuleLev) >= 0 && "BAD!");
340 TypePlane &Plane = getPlane(i);
342 assert(ModuleLev <= Plane.size() && "module levels higher than elements?");
343 while (Plane.size() != ModuleLev) {
344 assert(!isa<GlobalValue>(Plane.back()) &&
345 "Functions cannot define globals!");
346 NodeMap.erase(Plane.back()); // Erase from nodemap
347 Plane.pop_back(); // Shrink plane
351 // We don't need this state anymore, free it up.
354 // Finally, remove any type planes defined by the function...
355 if (!CompactionTable.empty()) {
356 CompactionTable.clear();
358 while (Table.size() > NumModuleTypes) {
359 TypePlane &Plane = Table.back();
360 SC_DEBUG("Removing Plane " << (Table.size()-1) << " of size "
361 << Plane.size() << "\n");
362 while (Plane.size()) {
363 assert(!isa<GlobalValue>(Plane.back()) &&
364 "Functions cannot define globals!");
365 NodeMap.erase(Plane.back()); // Erase from nodemap
366 Plane.pop_back(); // Shrink plane
369 Table.pop_back(); // Nuke the plane, we don't like it.
373 SC_DEBUG("end purgeFunction!\n");
376 static inline bool hasNullValue(unsigned TyID) {
377 return TyID != Type::LabelTyID && TyID != Type::TypeTyID &&
378 TyID != Type::VoidTyID;
381 /// getOrCreateCompactionTableSlot - This method is used to build up the initial
382 /// approximation of the compaction table.
383 unsigned SlotCalculator::getOrCreateCompactionTableSlot(const Value *V) {
384 if (const ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(V))
386 std::map<const Value*, unsigned>::iterator I =
387 CompactionNodeMap.lower_bound(V);
388 if (I != CompactionNodeMap.end() && I->first == V)
389 return I->second; // Already exists?
391 // Make sure the type is in the table.
393 if (!CompactionTable[Type::TypeTyID].empty())
394 Ty = getOrCreateCompactionTableSlot(V->getType());
395 else // If the type plane was decompactified, use the global plane ID
396 Ty = getSlot(V->getType());
397 if (CompactionTable.size() <= Ty)
398 CompactionTable.resize(Ty+1);
400 assert(!isa<Type>(V) || ModuleLevel.empty());
402 TypePlane &TyPlane = CompactionTable[Ty];
404 // Make sure to insert the null entry if the thing we are inserting is not a
406 if (TyPlane.empty() && hasNullValue(V->getType()->getPrimitiveID())) {
407 Value *ZeroInitializer = Constant::getNullValue(V->getType());
408 if (V != ZeroInitializer) {
409 TyPlane.push_back(ZeroInitializer);
410 CompactionNodeMap[ZeroInitializer] = 0;
414 unsigned SlotNo = TyPlane.size();
415 TyPlane.push_back(V);
416 CompactionNodeMap.insert(std::make_pair(V, SlotNo));
421 /// buildCompactionTable - Since all of the function constants and types are
422 /// stored in the module-level constant table, we don't need to emit a function
423 /// constant table. Also due to this, the indices for various constants and
424 /// types might be very large in large programs. In order to avoid blowing up
425 /// the size of instructions in the bytecode encoding, we build a compaction
426 /// table, which defines a mapping from function-local identifiers to global
428 void SlotCalculator::buildCompactionTable(const Function *F) {
429 assert(CompactionNodeMap.empty() && "Compaction table already built!");
430 // First step, insert the primitive types.
431 CompactionTable.resize(Type::TypeTyID+1);
432 for (unsigned i = 0; i != Type::FirstDerivedTyID; ++i) {
433 const Type *PrimTy = Type::getPrimitiveType((Type::PrimitiveID)i);
434 CompactionTable[Type::TypeTyID].push_back(PrimTy);
435 CompactionNodeMap[PrimTy] = i;
438 // Next, include any types used by function arguments.
439 for (Function::const_aiterator I = F->abegin(), E = F->aend(); I != E; ++I)
440 getOrCreateCompactionTableSlot(I->getType());
442 // Next, find all of the types and values that are referred to by the
443 // instructions in the program.
444 for (const_inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
445 getOrCreateCompactionTableSlot(I->getType());
446 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
447 if (isa<Constant>(I->getOperand(op)) ||
448 isa<GlobalValue>(I->getOperand(op)))
449 getOrCreateCompactionTableSlot(I->getOperand(op));
450 if (const VANextInst *VAN = dyn_cast<VANextInst>(*I))
451 getOrCreateCompactionTableSlot(VAN->getArgType());
454 const SymbolTable &ST = F->getSymbolTable();
455 for (SymbolTable::const_iterator I = ST.begin(), E = ST.end(); I != E; ++I)
456 for (SymbolTable::type_const_iterator TI = I->second.begin(),
457 TE = I->second.end(); TI != TE; ++TI)
458 if (isa<Constant>(TI->second) || isa<Type>(TI->second) ||
459 isa<GlobalValue>(TI->second))
460 getOrCreateCompactionTableSlot(TI->second);
462 // Now that we have all of the values in the table, and know what types are
463 // referenced, make sure that there is at least the zero initializer in any
464 // used type plane. Since the type was used, we will be emitting instructions
465 // to the plane even if there are no constants in it.
466 CompactionTable.resize(CompactionTable[Type::TypeTyID].size());
467 for (unsigned i = 0, e = CompactionTable.size(); i != e; ++i)
468 if (CompactionTable[i].empty() && i != Type::VoidTyID &&
469 i != Type::LabelTyID) {
470 const Type *Ty = cast<Type>(CompactionTable[Type::TypeTyID][i]);
471 getOrCreateCompactionTableSlot(Constant::getNullValue(Ty));
474 // Okay, now at this point, we have a legal compaction table. Since we want
475 // to emit the smallest possible binaries, do not compactify the type plane if
476 // it will not save us anything. Because we have not yet incorporated the
477 // function body itself yet, we don't know whether or not it's a good idea to
478 // compactify other planes. We will defer this decision until later.
479 TypePlane &GlobalTypes = Table[Type::TypeTyID];
481 // All of the values types will be scrunched to the start of the types plane
482 // of the global table. Figure out just how many there are.
483 assert(!GlobalTypes.empty() && "No global types???");
484 unsigned NumFCTypes = GlobalTypes.size()-1;
485 while (!cast<Type>(GlobalTypes[NumFCTypes])->isFirstClassType())
488 // If there are fewer that 64 types, no instructions will be exploded due to
489 // the size of the type operands. Thus there is no need to compactify types.
490 // Also, if the compaction table contains most of the entries in the global
491 // table, there really is no reason to compactify either.
492 if (NumFCTypes < 64) {
493 // Decompactifying types is tricky, because we have to move type planes all
494 // over the place. At least we don't need to worry about updating the
495 // CompactionNodeMap for non-types though.
496 std::vector<TypePlane> TmpCompactionTable;
497 std::swap(CompactionTable, TmpCompactionTable);
499 std::swap(Types, TmpCompactionTable[Type::TypeTyID]);
501 // Move each plane back over to the uncompactified plane
502 while (!Types.empty()) {
503 const Type *Ty = cast<Type>(Types.back());
505 CompactionNodeMap.erase(Ty); // Decompactify type!
507 if (Ty != Type::TypeTy) {
508 // Find the global slot number for this type.
509 int TySlot = getSlot(Ty);
510 assert(TySlot != -1 && "Type doesn't exist in global table?");
512 // Now we know where to put the compaction table plane.
513 if (CompactionTable.size() <= unsigned(TySlot))
514 CompactionTable.resize(TySlot+1);
515 // Move the plane back into the compaction table.
516 std::swap(CompactionTable[TySlot], TmpCompactionTable[Types.size()]);
518 // And remove the empty plane we just moved in.
519 TmpCompactionTable.pop_back();
526 /// pruneCompactionTable - Once the entire function being processed has been
527 /// incorporated into the current compaction table, look over the compaction
528 /// table and check to see if there are any values whose compaction will not
529 /// save us any space in the bytecode file. If compactifying these values
530 /// serves no purpose, then we might as well not even emit the compactification
531 /// information to the bytecode file, saving a bit more space.
533 /// Note that the type plane has already been compactified if possible.
535 void SlotCalculator::pruneCompactionTable() {
536 TypePlane &TyPlane = CompactionTable[Type::TypeTyID];
537 for (unsigned ctp = 0, e = CompactionTable.size(); ctp != e; ++ctp)
538 if (ctp != Type::TypeTyID && !CompactionTable[ctp].empty()) {
539 TypePlane &CPlane = CompactionTable[ctp];
540 unsigned GlobalSlot = ctp;
541 if (!TyPlane.empty())
542 GlobalSlot = getGlobalSlot(TyPlane[ctp]);
544 if (GlobalSlot >= Table.size())
545 Table.resize(GlobalSlot+1);
546 TypePlane &GPlane = Table[GlobalSlot];
548 unsigned ModLevel = getModuleLevel(ctp);
549 unsigned NumFunctionObjs = CPlane.size()-ModLevel;
551 // If the maximum index required if all entries in this plane were merged
552 // into the global plane is less than 64, go ahead and eliminate the
554 bool PrunePlane = GPlane.size() + NumFunctionObjs < 64;
556 // If there are no function-local values defined, and the maximum
557 // referenced global entry is less than 64, we don't need to compactify.
558 if (!PrunePlane && NumFunctionObjs == 0) {
560 for (unsigned i = 0; i != ModLevel; ++i) {
561 unsigned Idx = NodeMap[CPlane[i]];
562 if (Idx > MaxIdx) MaxIdx = Idx;
564 PrunePlane = MaxIdx < 64;
567 // Ok, finally, if we decided to prune this plane out of the compaction
571 std::swap(OldPlane, CPlane);
573 // Loop over the function local objects, relocating them to the global
575 for (unsigned i = ModLevel, e = OldPlane.size(); i != e; ++i) {
576 const Value *V = OldPlane[i];
577 CompactionNodeMap.erase(V);
578 assert(NodeMap.count(V) == 0 && "Value already in table??");
582 // For compactified global values, just remove them from the compaction
584 for (unsigned i = 0; i != ModLevel; ++i)
585 CompactionNodeMap.erase(OldPlane[i]);
587 // Update the new modulelevel for this plane.
588 assert(ctp < ModuleLevel.size() && "Cannot set modulelevel!");
589 ModuleLevel[ctp] = GPlane.size()-NumFunctionObjs;
590 assert((int)ModuleLevel[ctp] >= 0 && "Bad computation!");
596 int SlotCalculator::getSlot(const Value *V) const {
597 // If there is a CompactionTable active...
598 if (!CompactionNodeMap.empty()) {
599 std::map<const Value*, unsigned>::const_iterator I =
600 CompactionNodeMap.find(V);
601 if (I != CompactionNodeMap.end())
602 return (int)I->second;
603 // Otherwise, if it's not in the compaction table, it must be in a
604 // non-compactified plane.
607 std::map<const Value*, unsigned>::const_iterator I = NodeMap.find(V);
608 if (I != NodeMap.end())
609 return (int)I->second;
611 // Do not number ConstantPointerRef's at all. They are an abomination.
612 if (const ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(V))
613 return getSlot(CPR->getValue());
619 int SlotCalculator::getOrCreateSlot(const Value *V) {
620 if (V->getType() == Type::VoidTy) return -1;
622 int SlotNo = getSlot(V); // Check to see if it's already in!
623 if (SlotNo != -1) return SlotNo;
625 // Do not number ConstantPointerRef's at all. They are an abomination.
626 if (const ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(V))
627 return getOrCreateSlot(CPR->getValue());
629 if (!isa<GlobalValue>(V)) // Initializers for globals are handled explicitly
630 if (const Constant *C = dyn_cast<Constant>(V)) {
631 assert(CompactionNodeMap.empty() &&
632 "All needed constants should be in the compaction map already!");
634 // If we are emitting a bytecode file, do not index the characters that
635 // make up constant strings. We emit constant strings as special
636 // entities that don't require their individual characters to be emitted.
637 if (!BuildBytecodeInfo || !isa<ConstantArray>(C) ||
638 !cast<ConstantArray>(C)->isString()) {
639 // This makes sure that if a constant has uses (for example an array of
640 // const ints), that they are inserted also.
642 for (User::const_op_iterator I = C->op_begin(), E = C->op_end();
646 assert(ModuleLevel.empty() &&
647 "How can a constant string be directly accessed in a function?");
648 // Otherwise, if we are emitting a bytecode file and this IS a string,
650 if (!C->isNullValue())
651 ConstantStrings.push_back(cast<ConstantArray>(C));
655 return insertValue(V);
659 int SlotCalculator::insertValue(const Value *D, bool dontIgnore) {
660 assert(D && "Can't insert a null value!");
661 assert(getSlot(D) == -1 && "Value is already in the table!");
663 // If we are building a compaction map, and if this plane is being compacted,
664 // insert the value into the compaction map, not into the global map.
665 if (!CompactionNodeMap.empty()) {
666 if (D->getType() == Type::VoidTy) return -1; // Do not insert void values
667 assert(!isa<Type>(D) && !isa<Constant>(D) && !isa<GlobalValue>(D) &&
668 "Types, constants, and globals should be in global SymTab!");
670 int Plane = getSlot(D->getType());
671 assert(Plane != -1 && CompactionTable.size() > (unsigned)Plane &&
672 "Didn't find value type!");
673 if (!CompactionTable[Plane].empty())
674 return getOrCreateCompactionTableSlot(D);
677 // If this node does not contribute to a plane, or if the node has a
678 // name and we don't want names, then ignore the silly node... Note that types
679 // do need slot numbers so that we can keep track of where other values land.
681 if (!dontIgnore) // Don't ignore nonignorables!
682 if (D->getType() == Type::VoidTy || // Ignore void type nodes
683 (!BuildBytecodeInfo && // Ignore named and constants
684 (D->hasName() || isa<Constant>(D)) && !isa<Type>(D))) {
685 SC_DEBUG("ignored value " << *D << "\n");
686 return -1; // We do need types unconditionally though
689 // If it's a type, make sure that all subtypes of the type are included...
690 if (const Type *TheTy = dyn_cast<Type>(D)) {
692 // Insert the current type before any subtypes. This is important because
693 // recursive types elements are inserted in a bottom up order. Changing
694 // this here can break things. For example:
696 // global { \2 * } { { \2 }* null }
698 int ResultSlot = doInsertValue(TheTy);
699 SC_DEBUG(" Inserted type: " << TheTy->getDescription() << " slot=" <<
702 // Loop over any contained types in the definition... in post
705 for (po_iterator<const Type*> I = po_begin(TheTy), E = po_end(TheTy);
708 const Type *SubTy = *I;
709 // If we haven't seen this sub type before, add it to our type table!
710 if (getSlot(SubTy) == -1) {
711 SC_DEBUG(" Inserting subtype: " << SubTy->getDescription() << "\n");
712 int Slot = doInsertValue(SubTy);
713 SC_DEBUG(" Inserted subtype: " << SubTy->getDescription() <<
714 " slot=" << Slot << "\n");
721 // Okay, everything is happy, actually insert the silly value now...
722 return doInsertValue(D);
725 // doInsertValue - This is a small helper function to be called only
728 int SlotCalculator::doInsertValue(const Value *D) {
729 const Type *Typ = D->getType();
732 // Used for debugging DefSlot=-1 assertion...
733 //if (Typ == Type::TypeTy)
734 // cerr << "Inserting type '" << cast<Type>(D)->getDescription() << "'!\n";
736 if (Typ->isDerivedType()) {
738 if (CompactionTable.empty())
739 ValSlot = getSlot(Typ);
741 ValSlot = getGlobalSlot(Typ);
742 if (ValSlot == -1) { // Have we already entered this type?
743 // Nope, this is the first we have seen the type, process it.
744 ValSlot = insertValue(Typ, true);
745 assert(ValSlot != -1 && "ProcessType returned -1 for a type?");
747 Ty = (unsigned)ValSlot;
749 Ty = Typ->getPrimitiveID();
752 if (Table.size() <= Ty) // Make sure we have the type plane allocated...
753 Table.resize(Ty+1, TypePlane());
755 // If this is the first value to get inserted into the type plane, make sure
756 // to insert the implicit null value...
757 if (Table[Ty].empty() && BuildBytecodeInfo && hasNullValue(Ty)) {
758 Value *ZeroInitializer = Constant::getNullValue(Typ);
760 // If we are pushing zeroinit, it will be handled below.
761 if (D != ZeroInitializer) {
762 Table[Ty].push_back(ZeroInitializer);
763 NodeMap[ZeroInitializer] = 0;
767 // Insert node into table and NodeMap...
768 unsigned DestSlot = NodeMap[D] = Table[Ty].size();
769 Table[Ty].push_back(D);
771 SC_DEBUG(" Inserting value [" << Ty << "] = " << D << " slot=" <<
773 // G = Global, C = Constant, T = Type, F = Function, o = other
774 SC_DEBUG((isa<GlobalVariable>(D) ? "G" : (isa<Constant>(D) ? "C" :
775 (isa<Type>(D) ? "T" : (isa<Function>(D) ? "F" : "o")))));
777 return (int)DestSlot;