1 //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H
11 #define LLVM_RUNTIMEDYLDMACHOAARCH64_H
13 #include "../RuntimeDyldMachO.h"
15 #define DEBUG_TYPE "dyld"
19 class RuntimeDyldMachOAArch64
20 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
22 RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
23 : RuntimeDyldMachOCRTPBase(MM) {}
25 unsigned getMaxStubSize() override { return 8; }
27 unsigned getStubAlignment() override { return 8; }
29 /// Extract the addend encoded in the instruction / memory location.
30 int64_t decodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
31 uint32_t RelType) const {
33 // Verify that the relocation has the correct size and alignment.
36 llvm_unreachable("Unsupported relocation type!");
37 case MachO::ARM64_RELOC_UNSIGNED:
38 assert((NumBytes >= 4 && NumBytes <= 8) && "Invalid relocation size.");
40 case MachO::ARM64_RELOC_BRANCH26:
41 case MachO::ARM64_RELOC_PAGE21:
42 case MachO::ARM64_RELOC_PAGEOFF12:
43 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
44 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
45 assert(NumBytes == 4 && "Invalid relocation size.");
46 assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
47 "Instruction address is not aligned to 4 bytes.");
53 llvm_unreachable("Unsupported relocation type!");
54 case MachO::ARM64_RELOC_UNSIGNED:
55 // This could be an unaligned memory location - use memcpy.
56 memcpy(&Addend, LocalAddress, NumBytes);
58 case MachO::ARM64_RELOC_BRANCH26: {
59 // Verify that the relocation points to the expected branch instruction.
60 uint32_t *p = (uint32_t *)LocalAddress;
61 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
63 // Get the 26 bit addend encoded in the branch instruction and sign-extend
64 // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
66 Addend = (*p & 0x03FFFFFF) << 2;
67 Addend = SignExtend64(Addend, 28);
70 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
71 case MachO::ARM64_RELOC_PAGE21: {
72 // Verify that the relocation points to the expected adrp instruction.
73 uint32_t *p = (uint32_t *)LocalAddress;
74 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
76 // Get the 21 bit addend encoded in the adrp instruction and sign-extend
77 // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
78 // therefore implicit (<< 12).
79 Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
80 Addend = SignExtend64(Addend, 33);
83 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
84 // Verify that the relocation points to one of the expected load / store
86 uint32_t *p = (uint32_t *)LocalAddress;
87 assert((*p & 0x3B000000) == 0x39000000 &&
88 "Only expected load / store instructions.");
90 case MachO::ARM64_RELOC_PAGEOFF12: {
91 // Verify that the relocation points to one of the expected load / store
92 // or add / sub instructions.
93 uint32_t *p = (uint32_t *)LocalAddress;
94 assert((((*p & 0x3B000000) == 0x39000000) ||
95 ((*p & 0x11C00000) == 0x11000000) ) &&
96 "Expected load / store or add/sub instruction.");
98 // Get the 12 bit addend encoded in the instruction.
99 Addend = (*p & 0x003FFC00) >> 10;
101 // Check which instruction we are decoding to obtain the implicit shift
102 // factor of the instruction.
103 int ImplicitShift = 0;
104 if ((*p & 0x3B000000) == 0x39000000) { // << load / store
105 // For load / store instructions the size is encoded in bits 31:30.
106 ImplicitShift = ((*p >> 30) & 0x3);
107 if (ImplicitShift == 0) {
108 // Check if this a vector op to get the correct shift value.
109 if ((*p & 0x04800000) == 0x04800000)
113 // Compensate for implicit shift.
114 Addend <<= ImplicitShift;
122 processRelocationRef(unsigned SectionID, relocation_iterator RelI,
123 ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
124 const SymbolTableMap &Symbols, StubMap &Stubs) override {
125 const MachOObjectFile &Obj =
126 static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
127 MachO::any_relocation_info RelInfo =
128 Obj.getRelocation(RelI->getRawDataRefImpl());
130 assert(!Obj.isRelocationScattered(RelInfo) && "");
132 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
133 // addend for the following relocation. If found: (1) store the associated
134 // addend, (2) consume the next relocation, and (3) use the stored addend to
135 // override the addend.
136 int64_t ExplicitAddend = 0;
137 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
138 assert(!Obj.getPlainRelocationExternal(RelInfo));
139 assert(!Obj.getAnyRelocationPCRel(RelInfo));
140 assert(Obj.getAnyRelocationLength(RelInfo) == 2);
141 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
142 // Sign-extend the 24-bit to 64-bit.
143 ExplicitAddend = SignExtend64(RawAddend, 24);
145 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
148 RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
149 RelocationValueRef Value(
150 getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
152 assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
153 "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
154 if (ExplicitAddend) {
155 RE.Addend = ExplicitAddend;
156 Value.Addend = ExplicitAddend;
159 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
160 if (!IsExtern && RE.IsPCRel)
161 makeValueAddendPCRel(Value, ObjImg, RelI);
163 RE.Addend = Value.Addend;
165 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
166 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
167 processGOTRelocation(RE, Value, Stubs);
169 if (Value.SymbolName)
170 addRelocationForSymbol(RE, Value.SymbolName);
172 addRelocationForSection(RE, Value.SectionID);
178 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
179 DEBUG(dumpRelocationToResolve(RE, Value));
181 const SectionEntry &Section = Sections[RE.SectionID];
182 uint8_t *LocalAddress = Section.Address + RE.Offset;
184 switch (RE.RelType) {
186 llvm_unreachable("Invalid relocation type!");
187 case MachO::ARM64_RELOC_UNSIGNED: {
188 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
189 // Mask in the target value a byte at a time (we don't have an alignment
190 // guarantee for the target address, so this is safest).
192 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
194 writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
197 case MachO::ARM64_RELOC_BRANCH26: {
198 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
199 // Mask the value into the target address. We know instructions are
200 // 32-bit aligned, so we can do it all at once.
201 uint32_t *p = (uint32_t *)LocalAddress;
202 // Check if the addend is encoded in the instruction.
203 uint32_t EncodedAddend = *p & 0x03FFFFFF;
204 if (EncodedAddend != 0) {
206 llvm_unreachable("branch26 instruction has embedded addend.");
208 llvm_unreachable("branch26 instruction has embedded addend and"
209 "ARM64_RELOC_ADDEND.");
211 // Check if branch is in range.
212 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
213 uint64_t PCRelVal = Value - FinalAddress + RE.Addend;
214 assert(isInt<26>(PCRelVal) && "Branch target out of range!");
215 // Insert the value into the instruction.
216 *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF);
219 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
220 case MachO::ARM64_RELOC_PAGE21: {
221 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
222 // Mask the value into the target address. We know instructions are
223 // 32-bit aligned, so we can do it all at once.
224 uint32_t *p = (uint32_t *)LocalAddress;
225 // Check if the addend is encoded in the instruction.
226 uint32_t EncodedAddend =
227 ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3);
228 if (EncodedAddend != 0) {
230 llvm_unreachable("adrp instruction has embedded addend.");
232 llvm_unreachable("adrp instruction has embedded addend and"
233 "ARM64_RELOC_ADDEND.");
235 // Adjust for PC-relative relocation and offset.
236 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
238 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
239 // Check that the value fits into 21 bits (+ 12 lower bits).
240 assert(isInt<33>(PCRelVal) && "Invalid page reloc value!");
241 // Insert the value into the instruction.
242 uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000;
243 uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0;
244 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
247 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
248 case MachO::ARM64_RELOC_PAGEOFF12: {
249 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
250 // Mask the value into the target address. We know instructions are
251 // 32-bit aligned, so we can do it all at once.
252 uint32_t *p = (uint32_t *)LocalAddress;
253 // Check if the addend is encoded in the instruction.
254 uint32_t EncodedAddend = *p & 0x003FFC00;
255 if (EncodedAddend != 0) {
257 llvm_unreachable("adrp instruction has embedded addend.");
259 llvm_unreachable("adrp instruction has embedded addend and"
260 "ARM64_RELOC_ADDEND.");
262 // Add the offset from the symbol.
264 // Mask out the page address and only use the lower 12 bits.
266 // Check which instruction we are updating to obtain the implicit shift
267 // factor from LDR/STR instructions.
268 if (*p & 0x08000000) {
269 uint32_t ImplicitShift = ((*p >> 30) & 0x3);
270 switch (ImplicitShift) {
272 // Check if this a vector op.
273 if ((*p & 0x04800000) == 0x04800000) {
275 assert(((Value & 0xF) == 0) &&
276 "128-bit LDR/STR not 16-byte aligned.");
280 assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
282 assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
284 assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
286 // Compensate for implicit shift.
287 Value >>= ImplicitShift;
289 // Insert the value into the instruction.
290 *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00);
293 case MachO::ARM64_RELOC_SUBTRACTOR:
294 case MachO::ARM64_RELOC_POINTER_TO_GOT:
295 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
296 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
297 llvm_unreachable("Relocation type not implemented yet!");
298 case MachO::ARM64_RELOC_ADDEND:
299 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
300 "processRelocationRef!");
304 void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
305 const SectionRef &Section) {}
308 void processGOTRelocation(const RelocationEntry &RE,
309 RelocationValueRef &Value, StubMap &Stubs) {
310 assert(RE.Size == 2);
311 SectionEntry &Section = Sections[RE.SectionID];
312 StubMap::const_iterator i = Stubs.find(Value);
314 if (i != Stubs.end())
315 Addr = Section.Address + i->second;
317 // FIXME: There must be a better way to do this then to check and fix the
318 // alignment every time!!!
319 uintptr_t BaseAddress = uintptr_t(Section.Address);
320 uintptr_t StubAlignment = getStubAlignment();
321 uintptr_t StubAddress =
322 (BaseAddress + Section.StubOffset + StubAlignment - 1) &
324 unsigned StubOffset = StubAddress - BaseAddress;
325 Stubs[Value] = StubOffset;
326 assert(((StubAddress % getStubAlignment()) == 0) &&
327 "GOT entry not aligned");
328 RelocationEntry GOTRE(RE.SectionID, StubOffset,
329 MachO::ARM64_RELOC_UNSIGNED, Value.Addend,
330 /*IsPCRel=*/false, /*Size=*/3);
331 if (Value.SymbolName)
332 addRelocationForSymbol(GOTRE, Value.SymbolName);
334 addRelocationForSection(GOTRE, Value.SectionID);
335 Section.StubOffset = StubOffset + getMaxStubSize();
336 Addr = (uint8_t *)StubAddress;
338 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
339 RE.IsPCRel, RE.Size);
340 resolveRelocation(TargetRE, (uint64_t)Addr);
347 #endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H