AArch64: don't mask off shift bits when processing JIT relocations.
[oota-llvm.git] / lib / ExecutionEngine / RuntimeDyld / RuntimeDyldELF.cpp
index c5bad8e41640188674ec1798e82efbf7308e8314..7c1551033ee688472cba51c0d3c3009656744bdc 100644 (file)
@@ -151,6 +151,14 @@ void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
 
 namespace llvm {
 
+StringRef RuntimeDyldELF::getEHFrameSection() {
+  for (int i = 0, e = Sections.size(); i != e; ++i) {
+    if (Sections[i].Name == ".eh_frame")
+      return StringRef((const char*)Sections[i].Address, Sections[i].Size);
+  }
+  return StringRef();
+}
+
 ObjectImage *RuntimeDyldELF::createObjectImage(ObjectBuffer *Buffer) {
   if (Buffer->getBufferSize() < ELF::EI_NIDENT)
     llvm_unreachable("Unexpected ELF object size");
@@ -269,12 +277,115 @@ void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
   }
 }
 
+void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
+                                              uint64_t Offset,
+                                              uint64_t Value,
+                                              uint32_t Type,
+                                              int64_t Addend) {
+  uint32_t *TargetPtr = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+  uint64_t FinalAddress = Section.LoadAddress + Offset;
+
+  DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
+               << format("%llx", Section.Address + Offset)
+               << " FinalAddress: 0x" << format("%llx",FinalAddress)
+               << " Value: 0x" << format("%llx",Value)
+               << " Type: 0x" << format("%x",Type)
+               << " Addend: 0x" << format("%llx",Addend)
+               << "\n");
+
+  switch (Type) {
+  default:
+    llvm_unreachable("Relocation type not implemented yet!");
+    break;
+  case ELF::R_AARCH64_ABS64: {
+    uint64_t *TargetPtr = reinterpret_cast<uint64_t*>(Section.Address + Offset);
+    *TargetPtr = Value + Addend;
+    break;
+  }
+  case ELF::R_AARCH64_PREL32: {
+    uint64_t Result = Value + Addend - FinalAddress;
+    assert(static_cast<int64_t>(Result) >= INT32_MIN && 
+           static_cast<int64_t>(Result) <= UINT32_MAX);
+    *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU);
+    break;
+  }
+  case ELF::R_AARCH64_CALL26: // fallthrough
+  case ELF::R_AARCH64_JUMP26: {
+    // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
+    // calculation.
+    uint64_t BranchImm = Value + Addend - FinalAddress;
+
+    // "Check that -2^27 <= result < 2^27".
+    assert(-(1LL << 27) <= static_cast<int64_t>(BranchImm) && 
+           static_cast<int64_t>(BranchImm) < (1LL << 27));
+
+    // AArch64 code is emitted with .rela relocations. The data already in any
+    // bits affected by the relocation on entry is garbage.
+    *TargetPtr &= 0xfc000000U;
+    // Immediate goes in bits 25:0 of B and BL.
+    *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
+    break;
+  }
+  case ELF::R_AARCH64_MOVW_UABS_G3: {
+    uint64_t Result = Value + Addend;
+
+    // AArch64 code is emitted with .rela relocations. The data already in any
+    // bits affected by the relocation on entry is garbage.
+    *TargetPtr &= 0xffe0001fU;
+    // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+    *TargetPtr |= Result >> (48 - 5);
+    // Shift must be "lsl #48", in bits 22:21
+    assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation");
+    break;
+  }
+  case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
+    uint64_t Result = Value + Addend;
+
+
+    // AArch64 code is emitted with .rela relocations. The data already in any
+    // bits affected by the relocation on entry is garbage.
+    *TargetPtr &= 0xffe0001fU;
+    // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+    *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
+    // Shift must be "lsl #32", in bits 22:21
+    assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation");
+    break;
+  }
+  case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
+    uint64_t Result = Value + Addend;
+
+    // AArch64 code is emitted with .rela relocations. The data already in any
+    // bits affected by the relocation on entry is garbage.
+    *TargetPtr &= 0xffe0001fU;
+    // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+    *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
+    // Shift must be "lsl #16", in bits 22:2
+    assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation");
+    break;
+  }
+  case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
+    uint64_t Result = Value + Addend;
+
+    // AArch64 code is emitted with .rela relocations. The data already in any
+    // bits affected by the relocation on entry is garbage.
+    *TargetPtr &= 0xffe0001fU;
+    // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+    *TargetPtr |= ((Result & 0xffffU) << 5);
+    // Shift must be "lsl #0", in bits 22:21.
+    assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
+    break;
+  }
+  }
+}
+
 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
                                           uint64_t Offset,
                                           uint32_t Value,
                                           uint32_t Type,
                                           int32_t Addend) {
   // TODO: Add Thumb relocations.
+  uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress +
+                                                      Offset);
   uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
   uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
   Value += Addend;
@@ -293,44 +404,51 @@ void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
 
   // Write a 32bit value to relocation address, taking into account the
   // implicit addend encoded in the target.
-  case ELF::R_ARM_TARGET1 :
-  case ELF::R_ARM_ABS32 :
-    *TargetPtr += Value;
+  case ELF::R_ARM_TARGET1:
+  case ELF::R_ARM_ABS32:
+    *TargetPtr = *Placeholder + Value;
     break;
-
   // Write first 16 bit of 32 bit value to the mov instruction.
   // Last 4 bit should be shifted.
-  case ELF::R_ARM_MOVW_ABS_NC :
+  case ELF::R_ARM_MOVW_ABS_NC:
     // We are not expecting any other addend in the relocation address.
     // Using 0x000F0FFF because MOVW has its 16 bit immediate split into 2
     // non-contiguous fields.
-    assert((*TargetPtr & 0x000F0FFF) == 0);
+    assert((*Placeholder & 0x000F0FFF) == 0);
     Value = Value & 0xFFFF;
-    *TargetPtr |= Value & 0xFFF;
+    *TargetPtr = *Placeholder | (Value & 0xFFF);
     *TargetPtr |= ((Value >> 12) & 0xF) << 16;
     break;
-
   // Write last 16 bit of 32 bit value to the mov instruction.
   // Last 4 bit should be shifted.
-  case ELF::R_ARM_MOVT_ABS :
+  case ELF::R_ARM_MOVT_ABS:
     // We are not expecting any other addend in the relocation address.
     // Use 0x000F0FFF for the same reason as R_ARM_MOVW_ABS_NC.
-    assert((*TargetPtr & 0x000F0FFF) == 0);
+    assert((*Placeholder & 0x000F0FFF) == 0);
+
     Value = (Value >> 16) & 0xFFFF;
-    *TargetPtr |= Value & 0xFFF;
+    *TargetPtr = *Placeholder | (Value & 0xFFF);
     *TargetPtr |= ((Value >> 12) & 0xF) << 16;
     break;
-
   // Write 24 bit relative value to the branch instruction.
   case ELF::R_ARM_PC24 :    // Fall through.
   case ELF::R_ARM_CALL :    // Fall through.
-  case ELF::R_ARM_JUMP24 :
+  case ELF::R_ARM_JUMP24: {
     int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
     RelValue = (RelValue & 0x03FFFFFC) >> 2;
+    assert((*TargetPtr & 0xFFFFFF) == 0xFFFFFE);
     *TargetPtr &= 0xFF000000;
     *TargetPtr |= RelValue;
     break;
   }
+  case ELF::R_ARM_PRIVATE_0:
+    // This relocation is reserved by the ARM ELF ABI for internal use. We
+    // appropriate it here to act as an R_ARM_ABS32 without any addend for use
+    // in the stubs created during JIT (which can't put an addend into the
+    // original object file).
+    *TargetPtr = Value;
+    break;
+  }
 }
 
 void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
@@ -338,6 +456,8 @@ void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
                                            uint32_t Value,
                                            uint32_t Type,
                                            int32_t Addend) {
+  uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress +
+                                                      Offset);
   uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
   Value += Addend;
 
@@ -355,19 +475,30 @@ void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
     llvm_unreachable("Not implemented relocation type!");
     break;
   case ELF::R_MIPS_32:
-    *TargetPtr = Value + (*TargetPtr);
+    *TargetPtr = Value + (*Placeholder);
     break;
   case ELF::R_MIPS_26:
-    *TargetPtr = ((*TargetPtr) & 0xfc000000) | (( Value & 0x0fffffff) >> 2);
+    *TargetPtr = ((*Placeholder) & 0xfc000000) | (( Value & 0x0fffffff) >> 2);
     break;
   case ELF::R_MIPS_HI16:
     // Get the higher 16-bits. Also add 1 if bit 15 is 1.
-    Value += ((*TargetPtr) & 0x0000ffff) << 16;
+    Value += ((*Placeholder) & 0x0000ffff) << 16;
+    *TargetPtr = ((*Placeholder) & 0xffff0000) |
+                 (((Value + 0x8000) >> 16) & 0xffff);
+    break;
+  case ELF::R_MIPS_LO16:
+    Value += ((*Placeholder) & 0x0000ffff);
+    *TargetPtr = ((*Placeholder) & 0xffff0000) | (Value & 0xffff);
+    break;
+  case ELF::R_MIPS_UNUSED1:
+    // Similar to ELF::R_ARM_PRIVATE_0, R_MIPS_UNUSED1 and R_MIPS_UNUSED2
+    // are used for internal JIT purpose. These relocations are similar to
+    // R_MIPS_HI16 and R_MIPS_LO16, but they do not take any addend into
+    // account.
     *TargetPtr = ((*TargetPtr) & 0xffff0000) |
                  (((Value + 0x8000) >> 16) & 0xffff);
     break;
-   case ELF::R_MIPS_LO16:
-    Value += ((*TargetPtr) & 0x0000ffff);
+  case ELF::R_MIPS_UNUSED2:
     *TargetPtr = ((*TargetPtr) & 0xffff0000) | (Value & 0xffff);
     break;
    }
@@ -412,9 +543,13 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
   error_code err;
   for (section_iterator si = Obj.begin_sections(),
      se = Obj.end_sections(); si != se; si.increment(err)) {
-    StringRef SectionName;
-    check(si->getName(SectionName));
-    if (SectionName != ".opd")
+    section_iterator RelSecI = si->getRelocatedSection();
+    if (RelSecI == Obj.end_sections())
+      continue;
+
+    StringRef RelSectionName;
+    check(RelSecI->getName(RelSectionName));
+    if (RelSectionName != ".opd")
       continue;
 
     for (relocation_iterator i = si->begin_relocations(),
@@ -430,12 +565,11 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
         continue;
       }
 
-      SymbolRef TargetSymbol;
       uint64_t TargetSymbolOffset;
-      int64_t TargetAdditionalInfo;
-      check(i->getSymbol(TargetSymbol));
+      symbol_iterator TargetSymbol = i->getSymbol();
       check(i->getOffset(TargetSymbolOffset));
-      check(i->getAdditionalInfo(TargetAdditionalInfo));
+      int64_t Addend;
+      check(getELFRelocationAddend(*i, Addend));
 
       i = i.increment(err);
       if (i == e)
@@ -455,9 +589,9 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
         continue;
 
       section_iterator tsi(Obj.end_sections());
-      check(TargetSymbol.getSection(tsi));
+      check(TargetSymbol->getSection(tsi));
       Rel.SectionID = findOrEmitSection(Obj, (*tsi), true, LocalSections);
-      Rel.Addend = (intptr_t)TargetAdditionalInfo;
+      Rel.Addend = (intptr_t)Addend;
       return;
     }
   }
@@ -541,6 +675,11 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
       llvm_unreachable("Relocation R_PPC64_REL32 overflow");
     writeInt32BE(LocalAddress, delta);
   } break;
+  case ELF::R_PPC64_REL64: {
+    uint64_t FinalAddress = (Section.LoadAddress + Offset);
+    uint64_t Delta = Value - FinalAddress + Addend;
+    writeInt64BE(LocalAddress, Delta);
+  } break;
   case ELF::R_PPC64_ADDR64 :
     writeInt64BE(LocalAddress, Value + Addend);
     break;
@@ -560,6 +699,42 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
   }
 }
 
+void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
+                                              uint64_t Offset,
+                                              uint64_t Value,
+                                              uint32_t Type,
+                                              int64_t Addend) {
+  uint8_t *LocalAddress = Section.Address + Offset;
+  switch (Type) {
+  default:
+    llvm_unreachable("Relocation type not implemented yet!");
+    break;
+  case ELF::R_390_PC16DBL:
+  case ELF::R_390_PLT16DBL: {
+    int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
+    assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
+    writeInt16BE(LocalAddress, Delta / 2);
+    break;
+  }
+  case ELF::R_390_PC32DBL:
+  case ELF::R_390_PLT32DBL: {
+    int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
+    assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
+    writeInt32BE(LocalAddress, Delta / 2);
+    break;
+  }
+  case ELF::R_390_PC32: {
+    int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
+    assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
+    writeInt32BE(LocalAddress, Delta);
+    break;
+  }
+  case ELF::R_390_64:
+    writeInt64BE(LocalAddress, Value + Addend);
+    break;
+  }
+}
+
 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
                                       uint64_t Value) {
   const SectionEntry &Section = Sections[RE.SectionID];
@@ -580,6 +755,9 @@ void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
                          (uint32_t)(Value & 0xffffffffL), Type,
                          (uint32_t)(Addend & 0xffffffffL));
     break;
+  case Triple::aarch64:
+    resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
+    break;
   case Triple::arm:    // Fall through.
   case Triple::thumb:
     resolveARMRelocation(Section, Offset,
@@ -595,6 +773,9 @@ void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
   case Triple::ppc64:
     resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
     break;
+  case Triple::systemz:
+    resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
+    break;
   default: llvm_unreachable("Unsupported CPU type!");
   }
 }
@@ -608,29 +789,33 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
   uint64_t RelType;
   Check(RelI.getType(RelType));
   int64_t Addend;
-  Check(RelI.getAdditionalInfo(Addend));
-  SymbolRef Symbol;
-  Check(RelI.getSymbol(Symbol));
+  Check(getELFRelocationAddend(RelI, Addend));
+  symbol_iterator Symbol = RelI.getSymbol();
 
   // Obtain the symbol name which is referenced in the relocation
   StringRef TargetName;
-  Symbol.getName(TargetName);
+  if (Symbol != Obj.end_symbols())
+    Symbol->getName(TargetName);
   DEBUG(dbgs() << "\t\tRelType: " << RelType
                << " Addend: " << Addend
                << " TargetName: " << TargetName
                << "\n");
   RelocationValueRef Value;
   // First search for the symbol in the local symbol table
-  SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data());
-  SymbolRef::Type SymType;
-  Symbol.getType(SymType);
+  SymbolTableMap::const_iterator lsi = Symbols.end();
+  SymbolRef::Type SymType = SymbolRef::ST_Unknown;
+  if (Symbol != Obj.end_symbols()) {
+    lsi = Symbols.find(TargetName.data());
+    Symbol->getType(SymType);
+  }
   if (lsi != Symbols.end()) {
     Value.SectionID = lsi->second.first;
     Value.Addend = lsi->second.second + Addend;
   } else {
     // Search for the symbol in the global symbol table
-    SymbolTableMap::const_iterator gsi =
-        GlobalSymbolTable.find(TargetName.data());
+    SymbolTableMap::const_iterator gsi = GlobalSymbolTable.end();
+    if (Symbol != Obj.end_symbols())
+      gsi = GlobalSymbolTable.find(TargetName.data());
     if (gsi != GlobalSymbolTable.end()) {
       Value.SectionID = gsi->second.first;
       Value.Addend = gsi->second.second + Addend;
@@ -641,7 +826,7 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
           // and can be changed by another developers. Maybe best way is add
           // a new symbol type ST_Section to SymbolRef and use it.
           section_iterator si(Obj.end_sections());
-          Symbol.getSection(si);
+          Symbol->getSection(si);
           if (si == Obj.end_sections())
             llvm_unreachable("Symbol section not found, bad object file format!");
           DEBUG(dbgs() << "\t\tThis is section symbol\n");
@@ -672,7 +857,56 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
   DEBUG(dbgs() << "\t\tSectionID: " << SectionID
                << " Offset: " << Offset
                << "\n");
-  if (Arch == Triple::arm &&
+  if (Arch == Triple::aarch64 &&
+      (RelType == ELF::R_AARCH64_CALL26 ||
+       RelType == ELF::R_AARCH64_JUMP26)) {
+    // This is an AArch64 branch relocation, need to use a stub function.
+    DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
+    SectionEntry &Section = Sections[SectionID];
+
+    // Look for an existing stub.
+    StubMap::const_iterator i = Stubs.find(Value);
+    if (i != Stubs.end()) {
+        resolveRelocation(Section, Offset,
+                          (uint64_t)Section.Address + i->second, RelType, 0);
+      DEBUG(dbgs() << " Stub function found\n");
+    } else {
+      // Create a new stub function.
+      DEBUG(dbgs() << " Create a new stub function\n");
+      Stubs[Value] = Section.StubOffset;
+      uint8_t *StubTargetAddr = createStubFunction(Section.Address +
+                                                   Section.StubOffset);
+
+      RelocationEntry REmovz_g3(SectionID,
+                                StubTargetAddr - Section.Address,
+                                ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
+      RelocationEntry REmovk_g2(SectionID,
+                                StubTargetAddr - Section.Address + 4,
+                                ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
+      RelocationEntry REmovk_g1(SectionID,
+                                StubTargetAddr - Section.Address + 8,
+                                ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
+      RelocationEntry REmovk_g0(SectionID,
+                                StubTargetAddr - Section.Address + 12,
+                                ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
+
+      if (Value.SymbolName) {
+        addRelocationForSymbol(REmovz_g3, Value.SymbolName);
+        addRelocationForSymbol(REmovk_g2, Value.SymbolName);
+        addRelocationForSymbol(REmovk_g1, Value.SymbolName);
+        addRelocationForSymbol(REmovk_g0, Value.SymbolName);
+      } else {
+        addRelocationForSection(REmovz_g3, Value.SectionID);
+        addRelocationForSection(REmovk_g2, Value.SectionID);
+        addRelocationForSection(REmovk_g1, Value.SectionID);
+        addRelocationForSection(REmovk_g0, Value.SectionID);
+      }
+      resolveRelocation(Section, Offset,
+                        (uint64_t)Section.Address + Section.StubOffset,
+                        RelType, 0);
+      Section.StubOffset += getMaxStubSize();
+    }
+  } else if (Arch == Triple::arm &&
       (RelType == ELF::R_ARM_PC24 ||
        RelType == ELF::R_ARM_CALL ||
        RelType == ELF::R_ARM_JUMP24)) {
@@ -693,7 +927,7 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
       uint8_t *StubTargetAddr = createStubFunction(Section.Address +
                                                    Section.StubOffset);
       RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
-                         ELF::R_ARM_ABS32, Value.Addend);
+                         ELF::R_ARM_PRIVATE_0, Value.Addend);
       if (Value.SymbolName)
         addRelocationForSymbol(RE, Value.SymbolName);
       else
@@ -733,10 +967,10 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
       // Creating Hi and Lo relocations for the filled stub instructions.
       RelocationEntry REHi(SectionID,
                            StubTargetAddr - Section.Address,
-                           ELF::R_MIPS_HI16, Value.Addend);
+                           ELF::R_MIPS_UNUSED1, Value.Addend);
       RelocationEntry RELo(SectionID,
                            StubTargetAddr - Section.Address + 4,
-                           ELF::R_MIPS_LO16, Value.Addend);
+                           ELF::R_MIPS_UNUSED2, Value.Addend);
 
       if (Value.SymbolName) {
         addRelocationForSymbol(REHi, Value.SymbolName);
@@ -839,6 +1073,53 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
       else
         addRelocationForSection(RE, Value.SectionID);
     }
+  } else if (Arch == Triple::systemz &&
+             (RelType == ELF::R_390_PLT32DBL ||
+              RelType == ELF::R_390_GOTENT)) {
+    // Create function stubs for both PLT and GOT references, regardless of
+    // whether the GOT reference is to data or code.  The stub contains the
+    // full address of the symbol, as needed by GOT references, and the
+    // executable part only adds an overhead of 8 bytes.
+    //
+    // We could try to conserve space by allocating the code and data
+    // parts of the stub separately.  However, as things stand, we allocate
+    // a stub for every relocation, so using a GOT in JIT code should be
+    // no less space efficient than using an explicit constant pool.
+    DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
+    SectionEntry &Section = Sections[SectionID];
+
+    // Look for an existing stub.
+    StubMap::const_iterator i = Stubs.find(Value);
+    uintptr_t StubAddress;
+    if (i != Stubs.end()) {
+      StubAddress = uintptr_t(Section.Address) + i->second;
+      DEBUG(dbgs() << " Stub function found\n");
+    } else {
+      // Create a new stub function.
+      DEBUG(dbgs() << " Create a new stub function\n");
+
+      uintptr_t BaseAddress = uintptr_t(Section.Address);
+      uintptr_t StubAlignment = getStubAlignment();
+      StubAddress = (BaseAddress + Section.StubOffset +
+                     StubAlignment - 1) & -StubAlignment;
+      unsigned StubOffset = StubAddress - BaseAddress;
+
+      Stubs[Value] = StubOffset;
+      createStubFunction((uint8_t *)StubAddress);
+      RelocationEntry RE(SectionID, StubOffset + 8,
+                         ELF::R_390_64, Value.Addend - Addend);
+      if (Value.SymbolName)
+        addRelocationForSymbol(RE, Value.SymbolName);
+      else
+        addRelocationForSection(RE, Value.SectionID);
+      Section.StubOffset = StubOffset + getMaxStubSize();
+    }
+
+    if (RelType == ELF::R_390_GOTENT)
+      resolveRelocation(Section, Offset, StubAddress + 8,
+                        ELF::R_390_PC32DBL, Addend);
+    else
+      resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
   } else {
     RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
     if (Value.SymbolName)