+ }
+}
+
+void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
+
+ DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.Address + Offset)
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
+ << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_AARCH64_ABS64: {
+ uint64_t *TargetPtr =
+ reinterpret_cast<uint64_t *>(Section.Address + Offset);
+ *TargetPtr = Value + Addend;
+ break;
+ }
+ case ELF::R_AARCH64_PREL32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= UINT32_MAX);
+ *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU);
+ break;
+ }
+ case ELF::R_AARCH64_CALL26: // fallthrough
+ case ELF::R_AARCH64_JUMP26: {
+ // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
+ // calculation.
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ // "Check that -2^27 <= result < 2^27".
+ assert(-(1LL << 27) <= static_cast<int64_t>(BranchImm) &&
+ static_cast<int64_t>(BranchImm) < (1LL << 27));
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xfc000000U;
+ // Immediate goes in bits 25:0 of B and BL.
+ *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G3: {
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffe0001fU;
+ // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+ *TargetPtr |= Result >> (48 - 5);
+ // Shift must be "lsl #48", in bits 22:21
+ assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation");
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffe0001fU;
+ // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+ *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
+ // Shift must be "lsl #32", in bits 22:21
+ assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation");
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffe0001fU;
+ // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+ *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
+ // Shift must be "lsl #16", in bits 22:2
+ assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation");
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffe0001fU;
+ // Immediate goes in bits 20:5 of MOVZ/MOVK instruction
+ *TargetPtr |= ((Result & 0xffffU) << 5);
+ // Shift must be "lsl #0", in bits 22:21.
+ assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
+ break;
+ }
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
+ // Operation: Page(S+A) - Page(P)
+ uint64_t Result =
+ ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
+
+ // Check that -2^32 <= X < 2^32
+ assert(static_cast<int64_t>(Result) >= (-1LL << 32) &&
+ static_cast<int64_t>(Result) < (1LL << 32) &&
+ "overflow check failed for relocation");
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0x9f00001fU;
+ // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
+ // from bits 32:12 of X.
+ *TargetPtr |= ((Result & 0x3000U) << (29 - 12));
+ *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
+ break;
+ }
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
+ // Operation: S + A
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffc003ffU;
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:2 of X
+ *TargetPtr |= ((Result & 0xffc) << (10 - 2));
+ break;
+ }
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
+ // Operation: S + A
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffc003ffU;
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:3 of X
+ *TargetPtr |= ((Result & 0xff8) << (10 - 3));
+ break;
+ }