#include "llvm/System/Disassembler.h"
#include "llvm/System/Memory.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include <algorithm>
-#include <set>
+#ifndef NDEBUG
+#include <iomanip>
+#endif
using namespace llvm;
STATISTIC(NumBytes, "Number of bytes of machine code compiled");
/// corresponds to.
std::map<void*, Function*> StubToFunctionMap;
- /// GlobalToLazyPtrMap - Keep track of the lazy pointer created for a
+ /// GlobalToNonLazyPtrMap - Keep track of the lazy pointer created for a
/// particular GlobalVariable so that we can reuse them if necessary.
- std::map<GlobalValue*, void*> GlobalToLazyPtrMap;
+ std::map<GlobalValue*, void*> GlobalToNonLazyPtrMap;
public:
std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) {
}
std::map<GlobalValue*, void*>&
- getGlobalToLazyPtrMap(const MutexGuard& locked) {
+ getGlobalToNonLazyPtrMap(const MutexGuard& locked) {
assert(locked.holds(TheJIT->lock));
- return GlobalToLazyPtrMap;
+ return GlobalToNonLazyPtrMap;
}
};
/// specified address, created lazily on demand.
void *getExternalFunctionStub(void *FnAddr);
- /// getGlobalValueLazyPtr - Return a lazy pointer containing the specified
- /// GV address.
- void *getGlobalValueLazyPtr(GlobalValue *V, void *GVAddress);
+ /// getGlobalValueNonLazyPtr - Return a non-lazy pointer containing the
+ /// specified GV address.
+ void *getGlobalValueNonLazyPtr(GlobalValue *V, void *GVAddress);
/// AddCallbackAtLocation - If the target is capable of rewriting an
/// instruction without the use of a stub, record the location of the use so
return Stub;
}
-/// getGlobalValueLazyPtr - Return a lazy pointer containing the specified
+/// getGlobalValueNonLazyPtr - Return a lazy pointer containing the specified
/// GV address.
-void *JITResolver::getGlobalValueLazyPtr(GlobalValue *GV, void *GVAddress) {
+void *JITResolver::getGlobalValueNonLazyPtr(GlobalValue *GV, void *GVAddress) {
MutexGuard locked(TheJIT->lock);
// If we already have a stub for this global variable, recycle it.
- void *&LazyPtr = state.getGlobalToLazyPtrMap(locked)[GV];
- if (LazyPtr) return LazyPtr;
+ void *&NonLazyPtr = state.getGlobalToNonLazyPtrMap(locked)[GV];
+ if (NonLazyPtr) return NonLazyPtr;
// Otherwise, codegen a new lazy pointer.
- LazyPtr = TheJIT->getJITInfo().emitGlobalValueLazyPtr(GV, GVAddress,
- *TheJIT->getCodeEmitter());
+ NonLazyPtr = TheJIT->getJITInfo().emitGlobalValueNonLazyPtr(GV, GVAddress,
+ *TheJIT->getCodeEmitter());
- DOUT << "JIT: Stub emitted at [" << LazyPtr << "] for GV '"
+ DOUT << "JIT: Stub emitted at [" << NonLazyPtr << "] for GV '"
<< GV->getName() << "'\n";
- return LazyPtr;
+ return NonLazyPtr;
}
/// getExternalFunctionStub - Return a stub for the function at the
if (!idx) {
idx = ++nextGOTIndex;
revGOTMap[addr] = idx;
- DOUT << "Adding GOT entry " << idx << " for addr " << addr << "\n";
+ DOUT << "JIT: Adding GOT entry " << idx << " for addr [" << addr << "]\n";
}
return idx;
}
MachineModuleInfo* MMI;
// GVSet - a set to keep track of which globals have been seen
- std::set<const GlobalVariable*> GVSet;
+ SmallPtrSet<const GlobalVariable*, 8> GVSet;
public:
JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit) {
unsigned Alignment = 1);
virtual void* finishFunctionStub(const GlobalValue *F);
+ /// allocateSpace - Reserves space in the current block if any, or
+ /// allocate a new one of the given size.
+ virtual void *allocateSpace(intptr_t Size, unsigned Alignment);
+
virtual void addRelocation(const MachineRelocation &MR) {
Relocations.push_back(MR);
}
if (MBBLocations.size() <= (unsigned)MBB->getNumber())
MBBLocations.resize((MBB->getNumber()+1)*2);
MBBLocations[MBB->getNumber()] = getCurrentPCValue();
+ DOUT << "JIT: Emitting BB" << MBB->getNumber() << " at ["
+ << (void*) getCurrentPCValue() << "]\n";
}
virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const;
if (ExceptionHandling) DE->setModuleInfo(Info);
}
+ void setMemoryExecutable(void) {
+ MemMgr->setMemoryExecutable();
+ }
+
private:
void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
- void *getPointerToGVLazyPtr(GlobalValue *V, void *Reference,
- bool NoNeedStub);
+ void *getPointerToGVNonLazyPtr(GlobalValue *V, void *Reference,
+ bool NoNeedStub);
unsigned addSizeOfGlobal(const GlobalVariable *GV, unsigned Size);
unsigned addSizeOfGlobalsInConstantVal(const Constant *C, unsigned Size);
unsigned addSizeOfGlobalsInInitializer(const Constant *Init, unsigned Size);
return Resolver.getFunctionStub(F);
}
-void *JITEmitter::getPointerToGVLazyPtr(GlobalValue *V, void *Reference,
+void *JITEmitter::getPointerToGVNonLazyPtr(GlobalValue *V, void *Reference,
bool DoesntNeedStub) {
// Make sure GV is emitted first.
// FIXME: For now, if the GV is an external function we force the JIT to
- // compile it so the lazy pointer will contain the fully resolved address.
+ // compile it so the non-lazy pointer will contain the fully resolved address.
void *GVAddress = getPointerToGlobal(V, Reference, true);
- return Resolver.getGlobalValueLazyPtr(V, GVAddress);
+ return Resolver.getGlobalValueNonLazyPtr(V, GVAddress);
}
static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP) {
size_t GVSize = (size_t)TheJIT->getTargetData()->getABITypeSize(ElTy);
size_t GVAlign =
(size_t)TheJIT->getTargetData()->getPreferredAlignment(GV);
- DOUT << "Adding in size " << GVSize << " alignment " << GVAlign;
+ DOUT << "JIT: Adding in size " << GVSize << " alignment " << GVAlign;
DEBUG(GV->dump());
// Assume code section ends with worst possible alignment, so first
// variable needs maximal padding.
if (C->getType()->getTypeID() == Type::PointerTyID)
if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C))
- if (GVSet.insert(GV).second)
+ if (GVSet.insert(GV))
Size = addSizeOfGlobal(GV, Size);
return Size;
unsigned NumOps = Desc.getNumOperands();
for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) {
const MachineOperand &MO = MI.getOperand(CurOp);
- if (MO.isGlobalAddress()) {
+ if (MO.isGlobal()) {
GlobalValue* V = MO.getGlobal();
const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V);
if (!GV)
// assuming the addresses of the new globals in this module
// start at 0 (or something) and adjusting them after codegen
// complete. Another possibility is to grab a marker bit in GV.
- if (GVSet.insert(GV).second)
+ if (GVSet.insert(GV))
// A variable as yet unseen. Add in its size.
Size = addSizeOfGlobal(GV, Size);
}
}
}
}
- DOUT << "About to look through initializers\n";
+ DOUT << "JIT: About to look through initializers\n";
// Look for more globals that are referenced only from initializers.
// GVSet.end is computed each time because the set can grow as we go.
- for (std::set<const GlobalVariable *>::iterator I = GVSet.begin();
+ for (SmallPtrSet<const GlobalVariable *, 8>::iterator I = GVSet.begin();
I != GVSet.end(); I++) {
const GlobalVariable* GV = *I;
if (GV->hasInitializer())
}
void JITEmitter::startFunction(MachineFunction &F) {
+ DOUT << "JIT: Starting CodeGen of Function "
+ << F.getFunction()->getName() << "\n";
+
uintptr_t ActualSize = 0;
+ // Set the memory writable, if it's not already
+ MemMgr->setMemoryWritable();
if (MemMgr->NeedsExactSize()) {
- DOUT << "ExactSize\n";
+ DOUT << "JIT: ExactSize\n";
const TargetInstrInfo* TII = F.getTarget().getInstrInfo();
MachineJumpTableInfo *MJTI = F.getJumpTableInfo();
MachineConstantPool *MCP = F.getConstantPool();
// Add the function size
ActualSize += TII->GetFunctionSizeInBytes(F);
- DOUT << "ActualSize before globals " << ActualSize << "\n";
+ DOUT << "JIT: ActualSize before globals " << ActualSize << "\n";
// Add the size of the globals that will be allocated after this function.
// These are all the ones referenced from this function that were not
// previously allocated.
ActualSize += GetSizeOfGlobalsInBytes(F);
- DOUT << "ActualSize after globals " << ActualSize << "\n";
+ DOUT << "JIT: ActualSize after globals " << ActualSize << "\n";
}
BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
// Resolve the relocations to concrete pointers.
for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
MachineRelocation &MR = Relocations[i];
- void *ResultPtr;
- if (MR.isString()) {
- ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString());
-
- // If the target REALLY wants a stub for this function, emit it now.
- if (!MR.doesntNeedStub())
- ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
- } else if (MR.isGlobalValue()) {
- ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
- BufferBegin+MR.getMachineCodeOffset(),
- MR.doesntNeedStub());
- } else if (MR.isGlobalValueLazyPtr()) {
- ResultPtr = getPointerToGVLazyPtr(MR.getGlobalValue(),
+ void *ResultPtr = 0;
+ if (!MR.letTargetResolve()) {
+ if (MR.isString()) {
+ ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString());
+
+ // If the target REALLY wants a stub for this function, emit it now.
+ if (!MR.doesntNeedStub())
+ ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
+ } else if (MR.isGlobalValue()) {
+ ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
+ BufferBegin+MR.getMachineCodeOffset(),
+ MR.doesntNeedStub());
+ } else if (MR.isGlobalValueNonLazyPtr()) {
+ ResultPtr = getPointerToGVNonLazyPtr(MR.getGlobalValue(),
BufferBegin+MR.getMachineCodeOffset(),
MR.doesntNeedStub());
- } else if (MR.isBasicBlock()) {
- ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
- } else if (MR.isConstantPoolIndex()) {
- ResultPtr=(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
- } else {
- assert(MR.isJumpTableIndex());
- ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
- }
+ } else if (MR.isBasicBlock()) {
+ ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
+ } else if (MR.isConstantPoolIndex()) {
+ ResultPtr = (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
+ } else {
+ assert(MR.isJumpTableIndex());
+ ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
+ }
- MR.setResultPointer(ResultPtr);
+ MR.setResultPointer(ResultPtr);
+ }
// if we are managing the GOT and the relocation wants an index,
// give it one
unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr);
MR.setGOTIndex(idx);
if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) {
- DOUT << "GOT was out of date for " << ResultPtr
+ DOUT << "JIT: GOT was out of date for " << ResultPtr
<< " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
<< "\n";
((void**)MemMgr->getGOTBase())[idx] = ResultPtr;
Relocations.size(), MemMgr->getGOTBase());
}
- unsigned char *FnEnd = CurBufferPtr;
-
- MemMgr->endFunctionBody(F.getFunction(), BufferBegin, FnEnd);
- NumBytes += FnEnd-FnStart;
-
// Update the GOT entry for F to point to the new code.
if (MemMgr->isManagingGOT()) {
unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin);
if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) {
- DOUT << "GOT was out of date for " << (void*)BufferBegin
+ DOUT << "JIT: GOT was out of date for " << (void*)BufferBegin
<< " pointing at " << ((void**)MemMgr->getGOTBase())[idx] << "\n";
((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin;
}
}
+ unsigned char *FnEnd = CurBufferPtr;
+
+ MemMgr->endFunctionBody(F.getFunction(), BufferBegin, FnEnd);
+ BufferBegin = CurBufferPtr = 0;
+ NumBytes += FnEnd-FnStart;
+
// Invalidate the icache if necessary.
sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart);
Relocations.clear();
// Mark code region readable and executable if it's not so already.
- sys::Memory::SetRXPrivilege(FnStart, FnEnd-FnStart);
+ MemMgr->setMemoryExecutable();
#ifndef NDEBUG
{
- DOUT << std::hex;
- int i;
- unsigned char* q = FnStart;
- for (i=1; q!=FnEnd; q++, i++) {
- if (i%8==1)
- DOUT << "0x" << (long)q << ": ";
- DOUT<< (unsigned short)*q << " ";
- if (i%8==0)
- DOUT<<"\n";
- }
- DOUT << std::dec;
- if (sys::hasDisassembler())
- DOUT << "Disassembled code:\n"
- << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart);
+ DOUT << "JIT: Disassembled code:\n";
+ if (sys::hasDisassembler())
+ DOUT << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart);
+ else {
+ DOUT << std::hex;
+ int i;
+ unsigned char* q = FnStart;
+ for (i=1; q!=FnEnd; q++, i++) {
+ if (i%8==1)
+ DOUT << "JIT: 0x" << (long)q << ": ";
+ DOUT<< std::setw(2) << std::setfill('0') << (unsigned short)*q << " ";
+ if (i%8==0)
+ DOUT << '\n';
+ }
+ DOUT << std::dec;
+ DOUT<< '\n';
+ }
}
#endif
if (ExceptionHandling) {
return false;
}
+void* JITEmitter::allocateSpace(intptr_t Size, unsigned Alignment) {
+ if (BufferBegin)
+ return MachineCodeEmitter::allocateSpace(Size, Alignment);
+
+ // create a new memory block if there is no active one.
+ // care must be taken so that BufferBegin is invalidated when a
+ // block is trimmed
+ BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment);
+ BufferEnd = BufferBegin+Size;
+ return CurBufferPtr;
+}
+
void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
+ if (TheJIT->getJITInfo().hasCustomConstantPool())
+ return;
+
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
if (Constants.empty()) return;
}
void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
+ if (TheJIT->getJITInfo().hasCustomJumpTables())
+ return;
+
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
if (JT.empty()) return;
}
void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
+ if (TheJIT->getJITInfo().hasCustomJumpTables())
+ return;
+
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
if (JT.empty() || JumpTableBase == 0) return;
void *JITEmitter::finishFunctionStub(const GlobalValue* F) {
NumBytes += getCurrentPCOffset();
+
+ // Invalidate the icache if necessary.
+ sys::Memory::InvalidateInstructionCache(BufferBegin, NumBytes);
+
std::swap(SavedBufferBegin, BufferBegin);
BufferEnd = SavedBufferEnd;
CurBufferPtr = SavedCurBufferPtr;