1 //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H
11 #define LLVM_RUNTIMEDYLDMACHOAARCH64_H
13 #include "../RuntimeDyldMachO.h"
15 #define DEBUG_TYPE "dyld"
19 class RuntimeDyldMachOAArch64
20 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
22 RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
23 : RuntimeDyldMachOCRTPBase(MM) {}
25 unsigned getMaxStubSize() override { return 8; }
27 unsigned getStubAlignment() override { return 8; }
29 /// Extract the addend encoded in the instruction / memory location.
30 int64_t decodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
31 uint32_t RelType) const {
33 // Verify that the relocation has the correct size and alignment.
36 llvm_unreachable("Unsupported relocation type!");
37 case MachO::ARM64_RELOC_UNSIGNED:
38 assert((NumBytes >= 4 && NumBytes <= 8) && "Invalid relocation size.");
40 case MachO::ARM64_RELOC_BRANCH26:
41 case MachO::ARM64_RELOC_PAGE21:
42 case MachO::ARM64_RELOC_PAGEOFF12:
43 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
44 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
45 assert(NumBytes == 4 && "Invalid relocation size.");
46 assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
47 "Instruction address is not aligned to 4 bytes.");
53 llvm_unreachable("Unsupported relocation type!");
54 case MachO::ARM64_RELOC_UNSIGNED:
55 // This could be an unaligned memory location - use memcpy.
56 memcpy(&Addend, LocalAddress, NumBytes);
58 case MachO::ARM64_RELOC_BRANCH26: {
59 // Verify that the relocation points to the expected branch instruction.
60 uint32_t *p = (uint32_t *)LocalAddress;
61 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
63 // Get the 26 bit addend encoded in the branch instruction and sign-extend
64 // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
66 Addend = (*p & 0x03FFFFFF) << 2;
67 Addend = SignExtend64(Addend, 28);
70 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
71 case MachO::ARM64_RELOC_PAGE21: {
72 // Verify that the relocation points to the expected adrp instruction.
73 uint32_t *p = (uint32_t *)LocalAddress;
74 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
76 // Get the 21 bit addend encoded in the adrp instruction and sign-extend
77 // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
78 // therefore implicit (<< 12).
79 Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
80 Addend = SignExtend64(Addend, 33);
83 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
84 // Verify that the relocation points to one of the expected load / store
86 uint32_t *p = (uint32_t *)LocalAddress;
87 assert((*p & 0x3B000000) == 0x39000000 &&
88 "Only expected load / store instructions.");
90 case MachO::ARM64_RELOC_PAGEOFF12: {
91 // Verify that the relocation points to one of the expected load / store
92 // or add / sub instructions.
93 uint32_t *p = (uint32_t *)LocalAddress;
94 assert((((*p & 0x3B000000) == 0x39000000) ||
95 ((*p & 0x11C00000) == 0x11000000) ) &&
96 "Expected load / store or add/sub instruction.");
98 // Get the 12 bit addend encoded in the instruction.
99 Addend = (*p & 0x003FFC00) >> 10;
101 // Check which instruction we are decoding to obtain the implicit shift
102 // factor of the instruction.
103 int ImplicitShift = 0;
104 if ((*p & 0x3B000000) == 0x39000000) { // << load / store
105 // For load / store instructions the size is encoded in bits 31:30.
106 ImplicitShift = ((*p >> 30) & 0x3);
107 if (ImplicitShift == 0) {
108 // Check if this a vector op to get the correct shift value.
109 if ((*p & 0x04800000) == 0x04800000)
113 // Compensate for implicit shift.
114 Addend <<= ImplicitShift;
121 /// Extract the addend encoded in the instruction.
122 void encodeAddend(uint8_t *LocalAddress, uint32_t RelType,
123 int64_t Addend) const {
124 // Verify that the relocation has the correct alignment.
127 llvm_unreachable("Unsupported relocation type!");
128 case MachO::ARM64_RELOC_UNSIGNED:
129 llvm_unreachable("Invalid relocation type for instruction.");
130 case MachO::ARM64_RELOC_BRANCH26:
131 case MachO::ARM64_RELOC_PAGE21:
132 case MachO::ARM64_RELOC_PAGEOFF12:
133 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
134 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
135 assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
136 "Instruction address is not aligned to 4 bytes.");
142 llvm_unreachable("Unsupported relocation type!");
143 case MachO::ARM64_RELOC_BRANCH26: {
144 // Verify that the relocation points to the expected branch instruction.
145 uint32_t *p = (uint32_t *)LocalAddress;
146 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
148 // Verify addend value.
149 assert((Addend & 0x3) == 0 && "Branch target is not aligned");
150 assert(isInt<28>(Addend) && "Branch target is out of range.");
152 // Encode the addend as 26 bit immediate in the branch instruction.
153 *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
156 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
157 case MachO::ARM64_RELOC_PAGE21: {
158 // Verify that the relocation points to the expected adrp instruction.
159 uint32_t *p = (uint32_t *)LocalAddress;
160 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
162 // Check that the addend fits into 21 bits (+ 12 lower bits).
163 assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
164 assert(isInt<33>(Addend) && "Invalid page reloc value.");
166 // Encode the addend into the instruction.
167 uint32_t ImmLoValue = (uint32_t)(Addend << 17) & 0x60000000;
168 uint32_t ImmHiValue = (uint32_t)(Addend >> 9) & 0x00FFFFE0;
169 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
172 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
173 // Verify that the relocation points to one of the expected load / store
175 uint32_t *p = (uint32_t *)LocalAddress;
176 assert((*p & 0x3B000000) == 0x39000000 &&
177 "Only expected load / store instructions.");
179 case MachO::ARM64_RELOC_PAGEOFF12: {
180 // Verify that the relocation points to one of the expected load / store
181 // or add / sub instructions.
182 uint32_t *p = (uint32_t *)LocalAddress;
183 assert((((*p & 0x3B000000) == 0x39000000) ||
184 ((*p & 0x11C00000) == 0x11000000) ) &&
185 "Expected load / store or add/sub instruction.");
187 // Check which instruction we are decoding to obtain the implicit shift
188 // factor of the instruction and verify alignment.
189 int ImplicitShift = 0;
190 if ((*p & 0x3B000000) == 0x39000000) { // << load / store
191 // For load / store instructions the size is encoded in bits 31:30.
192 ImplicitShift = ((*p >> 30) & 0x3);
193 switch (ImplicitShift) {
195 // Check if this a vector op to get the correct shift value.
196 if ((*p & 0x04800000) == 0x04800000) {
198 assert(((Addend & 0xF) == 0) &&
199 "128-bit LDR/STR not 16-byte aligned.");
203 assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
206 assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
209 assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
213 // Compensate for implicit shift.
214 Addend >>= ImplicitShift;
215 assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
217 // Encode the addend into the instruction.
218 *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
225 processRelocationRef(unsigned SectionID, relocation_iterator RelI,
226 ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
227 const SymbolTableMap &Symbols, StubMap &Stubs) override {
228 const MachOObjectFile &Obj =
229 static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
230 MachO::any_relocation_info RelInfo =
231 Obj.getRelocation(RelI->getRawDataRefImpl());
233 assert(!Obj.isRelocationScattered(RelInfo) && "");
235 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
236 // addend for the following relocation. If found: (1) store the associated
237 // addend, (2) consume the next relocation, and (3) use the stored addend to
238 // override the addend.
239 int64_t ExplicitAddend = 0;
240 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
241 assert(!Obj.getPlainRelocationExternal(RelInfo));
242 assert(!Obj.getAnyRelocationPCRel(RelInfo));
243 assert(Obj.getAnyRelocationLength(RelInfo) == 2);
244 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
245 // Sign-extend the 24-bit to 64-bit.
246 ExplicitAddend = SignExtend64(RawAddend, 24);
248 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
251 RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
252 RelocationValueRef Value(
253 getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
255 assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
256 "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
257 if (ExplicitAddend) {
258 RE.Addend = ExplicitAddend;
259 Value.Addend = ExplicitAddend;
262 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
263 if (!IsExtern && RE.IsPCRel)
264 makeValueAddendPCRel(Value, ObjImg, RelI);
266 RE.Addend = Value.Addend;
268 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
269 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
270 processGOTRelocation(RE, Value, Stubs);
272 if (Value.SymbolName)
273 addRelocationForSymbol(RE, Value.SymbolName);
275 addRelocationForSection(RE, Value.SectionID);
281 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
282 DEBUG(dumpRelocationToResolve(RE, Value));
284 const SectionEntry &Section = Sections[RE.SectionID];
285 uint8_t *LocalAddress = Section.Address + RE.Offset;
287 switch (RE.RelType) {
289 llvm_unreachable("Invalid relocation type!");
290 case MachO::ARM64_RELOC_UNSIGNED: {
291 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
292 // Mask in the target value a byte at a time (we don't have an alignment
293 // guarantee for the target address, so this is safest).
295 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
297 writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
300 case MachO::ARM64_RELOC_BRANCH26: {
301 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
302 // Check if branch is in range.
303 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
304 int64_t PCRelVal = Value - FinalAddress + RE.Addend;
305 encodeAddend(LocalAddress, RE.RelType, PCRelVal);
308 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
309 case MachO::ARM64_RELOC_PAGE21: {
310 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
311 // Adjust for PC-relative relocation and offset.
312 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
314 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
315 encodeAddend(LocalAddress, RE.RelType, PCRelVal);
318 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
319 case MachO::ARM64_RELOC_PAGEOFF12: {
320 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
321 // Add the offset from the symbol.
323 // Mask out the page address and only use the lower 12 bits.
325 encodeAddend(LocalAddress, RE.RelType, Value);
328 case MachO::ARM64_RELOC_SUBTRACTOR:
329 case MachO::ARM64_RELOC_POINTER_TO_GOT:
330 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
331 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
332 llvm_unreachable("Relocation type not yet implemented!");
333 case MachO::ARM64_RELOC_ADDEND:
334 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
335 "processRelocationRef!");
339 void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
340 const SectionRef &Section) {}
343 void processGOTRelocation(const RelocationEntry &RE,
344 RelocationValueRef &Value, StubMap &Stubs) {
345 assert(RE.Size == 2);
346 SectionEntry &Section = Sections[RE.SectionID];
347 StubMap::const_iterator i = Stubs.find(Value);
349 if (i != Stubs.end())
350 Addr = Section.Address + i->second;
352 // FIXME: There must be a better way to do this then to check and fix the
353 // alignment every time!!!
354 uintptr_t BaseAddress = uintptr_t(Section.Address);
355 uintptr_t StubAlignment = getStubAlignment();
356 uintptr_t StubAddress =
357 (BaseAddress + Section.StubOffset + StubAlignment - 1) &
359 unsigned StubOffset = StubAddress - BaseAddress;
360 Stubs[Value] = StubOffset;
361 assert(((StubAddress % getStubAlignment()) == 0) &&
362 "GOT entry not aligned");
363 RelocationEntry GOTRE(RE.SectionID, StubOffset,
364 MachO::ARM64_RELOC_UNSIGNED, Value.Addend,
365 /*IsPCRel=*/false, /*Size=*/3);
366 if (Value.SymbolName)
367 addRelocationForSymbol(GOTRE, Value.SymbolName);
369 addRelocationForSection(GOTRE, Value.SectionID);
370 Section.StubOffset = StubOffset + getMaxStubSize();
371 Addr = (uint8_t *)StubAddress;
373 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
374 RE.IsPCRel, RE.Size);
375 resolveRelocation(TargetRE, (uint64_t)Addr);
382 #endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H