1 //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
11 #define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
13 #include "../RuntimeDyldMachO.h"
14 #include "llvm/Support/Endian.h"
16 #define DEBUG_TYPE "dyld"
20 class RuntimeDyldMachOAArch64
21 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
24 typedef uint64_t TargetPtrT;
26 RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
27 : RuntimeDyldMachOCRTPBase(MM) {}
29 unsigned getMaxStubSize() override { return 8; }
31 unsigned getStubAlignment() override { return 8; }
33 /// Extract the addend encoded in the instruction / memory location.
34 int64_t decodeAddend(const RelocationEntry &RE) const {
35 const SectionEntry &Section = Sections[RE.SectionID];
36 uint8_t *LocalAddress = Section.Address + RE.Offset;
37 unsigned NumBytes = 1 << RE.Size;
39 // Verify that the relocation has the correct size and alignment.
42 llvm_unreachable("Unsupported relocation type!");
43 case MachO::ARM64_RELOC_UNSIGNED:
44 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
46 case MachO::ARM64_RELOC_BRANCH26:
47 case MachO::ARM64_RELOC_PAGE21:
48 case MachO::ARM64_RELOC_PAGEOFF12:
49 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
50 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
51 assert(NumBytes == 4 && "Invalid relocation size.");
52 assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
53 "Instruction address is not aligned to 4 bytes.");
59 llvm_unreachable("Unsupported relocation type!");
60 case MachO::ARM64_RELOC_UNSIGNED:
61 // This could be an unaligned memory location.
63 Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
65 Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
67 case MachO::ARM64_RELOC_BRANCH26: {
68 // Verify that the relocation points to the expected branch instruction.
69 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
70 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
72 // Get the 26 bit addend encoded in the branch instruction and sign-extend
73 // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
75 Addend = (*p & 0x03FFFFFF) << 2;
76 Addend = SignExtend64(Addend, 28);
79 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
80 case MachO::ARM64_RELOC_PAGE21: {
81 // Verify that the relocation points to the expected adrp instruction.
82 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
83 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
85 // Get the 21 bit addend encoded in the adrp instruction and sign-extend
86 // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
87 // therefore implicit (<< 12).
88 Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
89 Addend = SignExtend64(Addend, 33);
92 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
93 // Verify that the relocation points to one of the expected load / store
95 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
97 assert((*p & 0x3B000000) == 0x39000000 &&
98 "Only expected load / store instructions.");
100 case MachO::ARM64_RELOC_PAGEOFF12: {
101 // Verify that the relocation points to one of the expected load / store
102 // or add / sub instructions.
103 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
104 assert((((*p & 0x3B000000) == 0x39000000) ||
105 ((*p & 0x11C00000) == 0x11000000) ) &&
106 "Expected load / store or add/sub instruction.");
108 // Get the 12 bit addend encoded in the instruction.
109 Addend = (*p & 0x003FFC00) >> 10;
111 // Check which instruction we are decoding to obtain the implicit shift
112 // factor of the instruction.
113 int ImplicitShift = 0;
114 if ((*p & 0x3B000000) == 0x39000000) { // << load / store
115 // For load / store instructions the size is encoded in bits 31:30.
116 ImplicitShift = ((*p >> 30) & 0x3);
117 if (ImplicitShift == 0) {
118 // Check if this a vector op to get the correct shift value.
119 if ((*p & 0x04800000) == 0x04800000)
123 // Compensate for implicit shift.
124 Addend <<= ImplicitShift;
131 /// Extract the addend encoded in the instruction.
132 void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
133 MachO::RelocationInfoType RelType, int64_t Addend) const {
134 // Verify that the relocation has the correct alignment.
137 llvm_unreachable("Unsupported relocation type!");
138 case MachO::ARM64_RELOC_UNSIGNED:
139 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
141 case MachO::ARM64_RELOC_BRANCH26:
142 case MachO::ARM64_RELOC_PAGE21:
143 case MachO::ARM64_RELOC_PAGEOFF12:
144 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
145 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
146 assert(NumBytes == 4 && "Invalid relocation size.");
147 assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
148 "Instruction address is not aligned to 4 bytes.");
154 llvm_unreachable("Unsupported relocation type!");
155 case MachO::ARM64_RELOC_UNSIGNED:
156 // This could be an unaligned memory location.
158 *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
160 *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
162 case MachO::ARM64_RELOC_BRANCH26: {
163 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
164 // Verify that the relocation points to the expected branch instruction.
165 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
167 // Verify addend value.
168 assert((Addend & 0x3) == 0 && "Branch target is not aligned");
169 assert(isInt<28>(Addend) && "Branch target is out of range.");
171 // Encode the addend as 26 bit immediate in the branch instruction.
172 *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
175 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
176 case MachO::ARM64_RELOC_PAGE21: {
177 // Verify that the relocation points to the expected adrp instruction.
178 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
179 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
181 // Check that the addend fits into 21 bits (+ 12 lower bits).
182 assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
183 assert(isInt<33>(Addend) && "Invalid page reloc value.");
185 // Encode the addend into the instruction.
186 uint32_t ImmLoValue = (uint32_t)(Addend << 17) & 0x60000000;
187 uint32_t ImmHiValue = (uint32_t)(Addend >> 9) & 0x00FFFFE0;
188 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
191 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
192 // Verify that the relocation points to one of the expected load / store
194 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
195 assert((*p & 0x3B000000) == 0x39000000 &&
196 "Only expected load / store instructions.");
199 case MachO::ARM64_RELOC_PAGEOFF12: {
200 // Verify that the relocation points to one of the expected load / store
201 // or add / sub instructions.
202 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
203 assert((((*p & 0x3B000000) == 0x39000000) ||
204 ((*p & 0x11C00000) == 0x11000000) ) &&
205 "Expected load / store or add/sub instruction.");
207 // Check which instruction we are decoding to obtain the implicit shift
208 // factor of the instruction and verify alignment.
209 int ImplicitShift = 0;
210 if ((*p & 0x3B000000) == 0x39000000) { // << load / store
211 // For load / store instructions the size is encoded in bits 31:30.
212 ImplicitShift = ((*p >> 30) & 0x3);
213 switch (ImplicitShift) {
215 // Check if this a vector op to get the correct shift value.
216 if ((*p & 0x04800000) == 0x04800000) {
218 assert(((Addend & 0xF) == 0) &&
219 "128-bit LDR/STR not 16-byte aligned.");
223 assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
226 assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
229 assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
233 // Compensate for implicit shift.
234 Addend >>= ImplicitShift;
235 assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
237 // Encode the addend into the instruction.
238 *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
245 processRelocationRef(unsigned SectionID, relocation_iterator RelI,
246 ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
247 const SymbolTableMap &Symbols, StubMap &Stubs) override {
248 const MachOObjectFile &Obj =
249 static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
250 MachO::any_relocation_info RelInfo =
251 Obj.getRelocation(RelI->getRawDataRefImpl());
253 assert(!Obj.isRelocationScattered(RelInfo) && "");
255 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
256 // addend for the following relocation. If found: (1) store the associated
257 // addend, (2) consume the next relocation, and (3) use the stored addend to
258 // override the addend.
259 int64_t ExplicitAddend = 0;
260 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
261 assert(!Obj.getPlainRelocationExternal(RelInfo));
262 assert(!Obj.getAnyRelocationPCRel(RelInfo));
263 assert(Obj.getAnyRelocationLength(RelInfo) == 2);
264 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
265 // Sign-extend the 24-bit to 64-bit.
266 ExplicitAddend = SignExtend64(RawAddend, 24);
268 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
271 RelocationEntry RE(getRelocationEntry(SectionID, ObjImg, RelI));
272 RE.Addend = decodeAddend(RE);
273 RelocationValueRef Value(
274 getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
276 assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
277 "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
278 if (ExplicitAddend) {
279 RE.Addend = ExplicitAddend;
280 Value.Offset = ExplicitAddend;
283 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
284 if (!IsExtern && RE.IsPCRel)
285 makeValueAddendPCRel(Value, ObjImg, RelI, 1 << RE.Size);
287 RE.Addend = Value.Offset;
289 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
290 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
291 processGOTRelocation(RE, Value, Stubs);
293 if (Value.SymbolName)
294 addRelocationForSymbol(RE, Value.SymbolName);
296 addRelocationForSection(RE, Value.SectionID);
302 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
303 DEBUG(dumpRelocationToResolve(RE, Value));
305 const SectionEntry &Section = Sections[RE.SectionID];
306 uint8_t *LocalAddress = Section.Address + RE.Offset;
307 MachO::RelocationInfoType RelType =
308 static_cast<MachO::RelocationInfoType>(RE.RelType);
312 llvm_unreachable("Invalid relocation type!");
313 case MachO::ARM64_RELOC_UNSIGNED: {
314 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
315 // Mask in the target value a byte at a time (we don't have an alignment
316 // guarantee for the target address, so this is safest).
318 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
320 encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
323 case MachO::ARM64_RELOC_BRANCH26: {
324 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
325 // Check if branch is in range.
326 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
327 int64_t PCRelVal = Value - FinalAddress + RE.Addend;
328 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
331 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
332 case MachO::ARM64_RELOC_PAGE21: {
333 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
334 // Adjust for PC-relative relocation and offset.
335 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
337 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
338 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
341 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
342 case MachO::ARM64_RELOC_PAGEOFF12: {
343 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
344 // Add the offset from the symbol.
346 // Mask out the page address and only use the lower 12 bits.
348 encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
351 case MachO::ARM64_RELOC_SUBTRACTOR:
352 case MachO::ARM64_RELOC_POINTER_TO_GOT:
353 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
354 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
355 llvm_unreachable("Relocation type not yet implemented!");
356 case MachO::ARM64_RELOC_ADDEND:
357 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
358 "processRelocationRef!");
362 void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
363 const SectionRef &Section) {}
366 void processGOTRelocation(const RelocationEntry &RE,
367 RelocationValueRef &Value, StubMap &Stubs) {
368 assert(RE.Size == 2);
369 SectionEntry &Section = Sections[RE.SectionID];
370 StubMap::const_iterator i = Stubs.find(Value);
372 if (i != Stubs.end())
373 Addr = reinterpret_cast<uintptr_t>(Section.Address) + i->second;
375 // FIXME: There must be a better way to do this then to check and fix the
376 // alignment every time!!!
377 uintptr_t BaseAddress = uintptr_t(Section.Address);
378 uintptr_t StubAlignment = getStubAlignment();
379 uintptr_t StubAddress =
380 (BaseAddress + Section.StubOffset + StubAlignment - 1) &
382 unsigned StubOffset = StubAddress - BaseAddress;
383 Stubs[Value] = StubOffset;
384 assert(((StubAddress % getStubAlignment()) == 0) &&
385 "GOT entry not aligned");
386 RelocationEntry GOTRE(RE.SectionID, StubOffset,
387 MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
388 /*IsPCRel=*/false, /*Size=*/3);
389 if (Value.SymbolName)
390 addRelocationForSymbol(GOTRE, Value.SymbolName);
392 addRelocationForSection(GOTRE, Value.SectionID);
393 Section.StubOffset = StubOffset + getMaxStubSize();
396 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
397 RE.IsPCRel, RE.Size);
398 resolveRelocation(TargetRE, static_cast<uint64_t>(Addr));