1 //===-- LowerBitSets.cpp - Bitset lowering pass ---------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass lowers bitset metadata and calls to the llvm.bitset.test intrinsic.
11 // See http://llvm.org/docs/LangRef.html#bitsets for more information.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/IPO/LowerBitSets.h"
16 #include "llvm/Transforms/IPO.h"
17 #include "llvm/ADT/EquivalenceClasses.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GlobalObject.h"
24 #include "llvm/IR/GlobalVariable.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/IR/Operator.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #define DEBUG_TYPE "lowerbitsets"
39 STATISTIC(ByteArraySizeBits, "Byte array size in bits");
40 STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
41 STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
42 STATISTIC(NumBitSetCallsLowered, "Number of bitset calls lowered");
43 STATISTIC(NumBitSetDisjointSets, "Number of disjoint sets of bitsets");
45 static cl::opt<bool> AvoidReuse(
46 "lowerbitsets-avoid-reuse",
47 cl::desc("Try to avoid reuse of byte array addresses using aliases"),
48 cl::Hidden, cl::init(true));
50 bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
51 if (Offset < ByteOffset)
54 if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
57 uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
58 if (BitOffset >= BitSize)
61 return Bits.count(BitOffset);
64 bool BitSetInfo::containsValue(
66 const DenseMap<GlobalObject *, uint64_t> &GlobalLayout, Value *V,
67 uint64_t COffset) const {
68 if (auto GV = dyn_cast<GlobalObject>(V)) {
69 auto I = GlobalLayout.find(GV);
70 if (I == GlobalLayout.end())
72 return containsGlobalOffset(I->second + COffset);
75 if (auto GEP = dyn_cast<GEPOperator>(V)) {
76 APInt APOffset(DL.getPointerSizeInBits(0), 0);
77 bool Result = GEP->accumulateConstantOffset(DL, APOffset);
80 COffset += APOffset.getZExtValue();
81 return containsValue(DL, GlobalLayout, GEP->getPointerOperand(),
85 if (auto Op = dyn_cast<Operator>(V)) {
86 if (Op->getOpcode() == Instruction::BitCast)
87 return containsValue(DL, GlobalLayout, Op->getOperand(0), COffset);
89 if (Op->getOpcode() == Instruction::Select)
90 return containsValue(DL, GlobalLayout, Op->getOperand(1), COffset) &&
91 containsValue(DL, GlobalLayout, Op->getOperand(2), COffset);
97 void BitSetInfo::print(raw_ostream &OS) const {
98 OS << "offset " << ByteOffset << " size " << BitSize << " align "
107 for (uint64_t B : Bits)
113 BitSetInfo BitSetBuilder::build() {
117 // Normalize each offset against the minimum observed offset, and compute
118 // the bitwise OR of each of the offsets. The number of trailing zeros
119 // in the mask gives us the log2 of the alignment of all offsets, which
120 // allows us to compress the bitset by only storing one bit per aligned
123 for (uint64_t &Offset : Offsets) {
129 BSI.ByteOffset = Min;
133 BSI.AlignLog2 = countTrailingZeros(Mask, ZB_Undefined);
135 // Build the compressed bitset while normalizing the offsets against the
136 // computed alignment.
137 BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
138 for (uint64_t Offset : Offsets) {
139 Offset >>= BSI.AlignLog2;
140 BSI.Bits.insert(Offset);
146 void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
147 // Create a new fragment to hold the layout for F.
148 Fragments.emplace_back();
149 std::vector<uint64_t> &Fragment = Fragments.back();
150 uint64_t FragmentIndex = Fragments.size() - 1;
152 for (auto ObjIndex : F) {
153 uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
154 if (OldFragmentIndex == 0) {
155 // We haven't seen this object index before, so just add it to the current
157 Fragment.push_back(ObjIndex);
159 // This index belongs to an existing fragment. Copy the elements of the
160 // old fragment into this one and clear the old fragment. We don't update
161 // the fragment map just yet, this ensures that any further references to
162 // indices from the old fragment in this fragment do not insert any more
164 std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
165 Fragment.insert(Fragment.end(), OldFragment.begin(), OldFragment.end());
170 // Update the fragment map to point our object indices to this fragment.
171 for (uint64_t ObjIndex : Fragment)
172 FragmentMap[ObjIndex] = FragmentIndex;
175 void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
176 uint64_t BitSize, uint64_t &AllocByteOffset,
177 uint8_t &AllocMask) {
178 // Find the smallest current allocation.
180 for (unsigned I = 1; I != BitsPerByte; ++I)
181 if (BitAllocs[I] < BitAllocs[Bit])
184 AllocByteOffset = BitAllocs[Bit];
186 // Add our size to it.
187 unsigned ReqSize = AllocByteOffset + BitSize;
188 BitAllocs[Bit] = ReqSize;
189 if (Bytes.size() < ReqSize)
190 Bytes.resize(ReqSize);
193 AllocMask = 1 << Bit;
194 for (uint64_t B : Bits)
195 Bytes[AllocByteOffset + B] |= AllocMask;
200 struct ByteArrayInfo {
201 std::set<uint64_t> Bits;
203 GlobalVariable *ByteArray;
207 struct LowerBitSets : public ModulePass {
209 LowerBitSets() : ModulePass(ID) {
210 initializeLowerBitSetsPass(*PassRegistry::getPassRegistry());
215 bool LinkerSubsectionsViaSymbols;
216 Triple::ArchType Arch;
217 Triple::ObjectFormatType ObjectFormat;
220 IntegerType *Int32Ty;
222 IntegerType *Int64Ty;
223 IntegerType *IntPtrTy;
225 // The llvm.bitsets named metadata.
226 NamedMDNode *BitSetNM;
228 // Mapping from bitset identifiers to the call sites that test them.
229 DenseMap<Metadata *, std::vector<CallInst *>> BitSetTestCallSites;
231 std::vector<ByteArrayInfo> ByteArrayInfos;
234 buildBitSet(Metadata *BitSet,
235 const DenseMap<GlobalObject *, uint64_t> &GlobalLayout);
236 ByteArrayInfo *createByteArray(BitSetInfo &BSI);
237 void allocateByteArrays();
238 Value *createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI, ByteArrayInfo *&BAI,
240 void lowerBitSetCalls(ArrayRef<Metadata *> BitSets,
241 Constant *CombinedGlobalAddr,
242 const DenseMap<GlobalObject *, uint64_t> &GlobalLayout);
244 lowerBitSetCall(CallInst *CI, BitSetInfo &BSI, ByteArrayInfo *&BAI,
245 Constant *CombinedGlobal,
246 const DenseMap<GlobalObject *, uint64_t> &GlobalLayout);
247 void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> BitSets,
248 ArrayRef<GlobalVariable *> Globals);
249 unsigned getJumpTableEntrySize();
250 Type *getJumpTableEntryType();
251 Constant *createJumpTableEntry(GlobalObject *Src, Function *Dest,
253 void verifyBitSetMDNode(MDNode *Op);
254 void buildBitSetsFromFunctions(ArrayRef<Metadata *> BitSets,
255 ArrayRef<Function *> Functions);
256 void buildBitSetsFromDisjointSet(ArrayRef<Metadata *> BitSets,
257 ArrayRef<GlobalObject *> Globals);
259 bool eraseBitSetMetadata();
261 bool doInitialization(Module &M) override;
262 bool runOnModule(Module &M) override;
267 INITIALIZE_PASS_BEGIN(LowerBitSets, "lowerbitsets",
268 "Lower bitset metadata", false, false)
269 INITIALIZE_PASS_END(LowerBitSets, "lowerbitsets",
270 "Lower bitset metadata", false, false)
271 char LowerBitSets::ID = 0;
273 ModulePass *llvm::createLowerBitSetsPass() { return new LowerBitSets; }
275 bool LowerBitSets::doInitialization(Module &Mod) {
277 const DataLayout &DL = Mod.getDataLayout();
279 Triple TargetTriple(M->getTargetTriple());
280 LinkerSubsectionsViaSymbols = TargetTriple.isMacOSX();
281 Arch = TargetTriple.getArch();
282 ObjectFormat = TargetTriple.getObjectFormat();
284 Int1Ty = Type::getInt1Ty(M->getContext());
285 Int8Ty = Type::getInt8Ty(M->getContext());
286 Int32Ty = Type::getInt32Ty(M->getContext());
287 Int32PtrTy = PointerType::getUnqual(Int32Ty);
288 Int64Ty = Type::getInt64Ty(M->getContext());
289 IntPtrTy = DL.getIntPtrType(M->getContext(), 0);
291 BitSetNM = M->getNamedMetadata("llvm.bitsets");
293 BitSetTestCallSites.clear();
298 /// Build a bit set for BitSet using the object layouts in
300 BitSetInfo LowerBitSets::buildBitSet(
302 const DenseMap<GlobalObject *, uint64_t> &GlobalLayout) {
305 // Compute the byte offset of each element of this bitset.
307 for (MDNode *Op : BitSetNM->operands()) {
308 if (Op->getOperand(0) != BitSet || !Op->getOperand(1))
311 cast<ConstantAsMetadata>(Op->getOperand(1))->getValue();
312 if (auto GA = dyn_cast<GlobalAlias>(OpConst))
313 OpConst = GA->getAliasee();
314 auto OpGlobal = dyn_cast<GlobalObject>(OpConst);
318 cast<ConstantInt>(cast<ConstantAsMetadata>(Op->getOperand(2))
319 ->getValue())->getZExtValue();
321 Offset += GlobalLayout.find(OpGlobal)->second;
323 BSB.addOffset(Offset);
330 /// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
331 /// Bits. This pattern matches to the bt instruction on x86.
332 static Value *createMaskedBitTest(IRBuilder<> &B, Value *Bits,
334 auto BitsType = cast<IntegerType>(Bits->getType());
335 unsigned BitWidth = BitsType->getBitWidth();
337 BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
339 B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
340 Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
341 Value *MaskedBits = B.CreateAnd(Bits, BitMask);
342 return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
345 ByteArrayInfo *LowerBitSets::createByteArray(BitSetInfo &BSI) {
346 // Create globals to stand in for byte arrays and masks. These never actually
347 // get initialized, we RAUW and erase them later in allocateByteArrays() once
348 // we know the offset and mask to use.
349 auto ByteArrayGlobal = new GlobalVariable(
350 *M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
351 auto MaskGlobal = new GlobalVariable(
352 *M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
354 ByteArrayInfos.emplace_back();
355 ByteArrayInfo *BAI = &ByteArrayInfos.back();
357 BAI->Bits = BSI.Bits;
358 BAI->BitSize = BSI.BitSize;
359 BAI->ByteArray = ByteArrayGlobal;
360 BAI->Mask = ConstantExpr::getPtrToInt(MaskGlobal, Int8Ty);
364 void LowerBitSets::allocateByteArrays() {
365 std::stable_sort(ByteArrayInfos.begin(), ByteArrayInfos.end(),
366 [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
367 return BAI1.BitSize > BAI2.BitSize;
370 std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
372 ByteArrayBuilder BAB;
373 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
374 ByteArrayInfo *BAI = &ByteArrayInfos[I];
377 BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
379 BAI->Mask->replaceAllUsesWith(ConstantInt::get(Int8Ty, Mask));
380 cast<GlobalVariable>(BAI->Mask->getOperand(0))->eraseFromParent();
383 Constant *ByteArrayConst = ConstantDataArray::get(M->getContext(), BAB.Bytes);
385 new GlobalVariable(*M, ByteArrayConst->getType(), /*isConstant=*/true,
386 GlobalValue::PrivateLinkage, ByteArrayConst);
388 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
389 ByteArrayInfo *BAI = &ByteArrayInfos[I];
391 Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
392 ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
393 Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(
394 ByteArrayConst->getType(), ByteArray, Idxs);
396 // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
397 // that the pc-relative displacement is folded into the lea instead of the
398 // test instruction getting another displacement.
399 if (LinkerSubsectionsViaSymbols) {
400 BAI->ByteArray->replaceAllUsesWith(GEP);
402 GlobalAlias *Alias = GlobalAlias::create(
403 Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, M);
404 BAI->ByteArray->replaceAllUsesWith(Alias);
406 BAI->ByteArray->eraseFromParent();
409 ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
410 BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
411 BAB.BitAllocs[6] + BAB.BitAllocs[7];
412 ByteArraySizeBytes = BAB.Bytes.size();
415 /// Build a test that bit BitOffset is set in BSI, where
416 /// BitSetGlobal is a global containing the bits in BSI.
417 Value *LowerBitSets::createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI,
418 ByteArrayInfo *&BAI, Value *BitOffset) {
419 if (BSI.BitSize <= 64) {
420 // If the bit set is sufficiently small, we can avoid a load by bit testing
423 if (BSI.BitSize <= 32)
429 for (auto Bit : BSI.Bits)
430 Bits |= uint64_t(1) << Bit;
431 Constant *BitsConst = ConstantInt::get(BitsTy, Bits);
432 return createMaskedBitTest(B, BitsConst, BitOffset);
435 ++NumByteArraysCreated;
436 BAI = createByteArray(BSI);
439 Constant *ByteArray = BAI->ByteArray;
440 Type *Ty = BAI->ByteArray->getValueType();
441 if (!LinkerSubsectionsViaSymbols && AvoidReuse) {
442 // Each use of the byte array uses a different alias. This makes the
443 // backend less likely to reuse previously computed byte array addresses,
444 // improving the security of the CFI mechanism based on this pass.
445 ByteArray = GlobalAlias::create(BAI->ByteArray->getValueType(), 0,
446 GlobalValue::PrivateLinkage, "bits_use",
450 Value *ByteAddr = B.CreateGEP(Ty, ByteArray, BitOffset);
451 Value *Byte = B.CreateLoad(ByteAddr);
453 Value *ByteAndMask = B.CreateAnd(Byte, BAI->Mask);
454 return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
458 /// Lower a llvm.bitset.test call to its implementation. Returns the value to
459 /// replace the call with.
460 Value *LowerBitSets::lowerBitSetCall(
461 CallInst *CI, BitSetInfo &BSI, ByteArrayInfo *&BAI,
462 Constant *CombinedGlobalIntAddr,
463 const DenseMap<GlobalObject *, uint64_t> &GlobalLayout) {
464 Value *Ptr = CI->getArgOperand(0);
465 const DataLayout &DL = M->getDataLayout();
467 if (BSI.containsValue(DL, GlobalLayout, Ptr))
468 return ConstantInt::getTrue(M->getContext());
470 Constant *OffsetedGlobalAsInt = ConstantExpr::getAdd(
471 CombinedGlobalIntAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset));
473 BasicBlock *InitialBB = CI->getParent();
477 Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
479 if (BSI.isSingleOffset())
480 return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
482 Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt);
485 if (BSI.AlignLog2 == 0) {
486 BitOffset = PtrOffset;
488 // We need to check that the offset both falls within our range and is
489 // suitably aligned. We can check both properties at the same time by
490 // performing a right rotate by log2(alignment) followed by an integer
491 // comparison against the bitset size. The rotate will move the lower
492 // order bits that need to be zero into the higher order bits of the
493 // result, causing the comparison to fail if they are nonzero. The rotate
494 // also conveniently gives us a bit offset to use during the load from
497 B.CreateLShr(PtrOffset, ConstantInt::get(IntPtrTy, BSI.AlignLog2));
498 Value *OffsetSHL = B.CreateShl(
500 ConstantInt::get(IntPtrTy, DL.getPointerSizeInBits(0) - BSI.AlignLog2));
501 BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
504 Constant *BitSizeConst = ConstantInt::get(IntPtrTy, BSI.BitSize);
505 Value *OffsetInRange = B.CreateICmpULT(BitOffset, BitSizeConst);
507 // If the bit set is all ones, testing against it is unnecessary.
509 return OffsetInRange;
511 TerminatorInst *Term = SplitBlockAndInsertIfThen(OffsetInRange, CI, false);
512 IRBuilder<> ThenB(Term);
514 // Now that we know that the offset is in range and aligned, load the
515 // appropriate bit from the bitset.
516 Value *Bit = createBitSetTest(ThenB, BSI, BAI, BitOffset);
518 // The value we want is 0 if we came directly from the initial block
519 // (having failed the range or alignment checks), or the loaded bit if
520 // we came from the block in which we loaded it.
521 B.SetInsertPoint(CI);
522 PHINode *P = B.CreatePHI(Int1Ty, 2);
523 P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
524 P->addIncoming(Bit, ThenB.GetInsertBlock());
528 /// Given a disjoint set of bitsets and globals, layout the globals, build the
529 /// bit sets and lower the llvm.bitset.test calls.
530 void LowerBitSets::buildBitSetsFromGlobalVariables(
531 ArrayRef<Metadata *> BitSets, ArrayRef<GlobalVariable *> Globals) {
532 // Build a new global with the combined contents of the referenced globals.
533 // This global is a struct whose even-indexed elements contain the original
534 // contents of the referenced globals and whose odd-indexed elements contain
535 // any padding required to align the next element to the next power of 2.
536 std::vector<Constant *> GlobalInits;
537 const DataLayout &DL = M->getDataLayout();
538 for (GlobalVariable *G : Globals) {
539 GlobalInits.push_back(G->getInitializer());
540 uint64_t InitSize = DL.getTypeAllocSize(G->getInitializer()->getType());
542 // Compute the amount of padding required.
543 uint64_t Padding = NextPowerOf2(InitSize - 1) - InitSize;
545 // Cap at 128 was found experimentally to have a good data/instruction
546 // overhead tradeoff.
548 Padding = RoundUpToAlignment(InitSize, 128) - InitSize;
550 GlobalInits.push_back(
551 ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
553 if (!GlobalInits.empty())
554 GlobalInits.pop_back();
555 Constant *NewInit = ConstantStruct::getAnon(M->getContext(), GlobalInits);
556 auto CombinedGlobal =
557 new GlobalVariable(*M, NewInit->getType(), /*isConstant=*/true,
558 GlobalValue::PrivateLinkage, NewInit);
560 const StructLayout *CombinedGlobalLayout =
561 DL.getStructLayout(cast<StructType>(NewInit->getType()));
563 // Compute the offsets of the original globals within the new global.
564 DenseMap<GlobalObject *, uint64_t> GlobalLayout;
565 for (unsigned I = 0; I != Globals.size(); ++I)
566 // Multiply by 2 to account for padding elements.
567 GlobalLayout[Globals[I]] = CombinedGlobalLayout->getElementOffset(I * 2);
569 lowerBitSetCalls(BitSets, CombinedGlobal, GlobalLayout);
571 // Build aliases pointing to offsets into the combined global for each
572 // global from which we built the combined global, and replace references
573 // to the original globals with references to the aliases.
574 for (unsigned I = 0; I != Globals.size(); ++I) {
575 // Multiply by 2 to account for padding elements.
576 Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
577 ConstantInt::get(Int32Ty, I * 2)};
578 Constant *CombinedGlobalElemPtr = ConstantExpr::getGetElementPtr(
579 NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
580 if (LinkerSubsectionsViaSymbols) {
581 Globals[I]->replaceAllUsesWith(CombinedGlobalElemPtr);
583 GlobalAlias *GAlias = GlobalAlias::create(
584 Globals[I]->getType()->getElementType(),
585 Globals[I]->getType()->getAddressSpace(), Globals[I]->getLinkage(),
586 "", CombinedGlobalElemPtr, M);
587 GAlias->setVisibility(Globals[I]->getVisibility());
588 GAlias->takeName(Globals[I]);
589 Globals[I]->replaceAllUsesWith(GAlias);
591 Globals[I]->eraseFromParent();
595 void LowerBitSets::lowerBitSetCalls(
596 ArrayRef<Metadata *> BitSets, Constant *CombinedGlobalAddr,
597 const DenseMap<GlobalObject *, uint64_t> &GlobalLayout) {
598 Constant *CombinedGlobalIntAddr =
599 ConstantExpr::getPtrToInt(CombinedGlobalAddr, IntPtrTy);
601 // For each bitset in this disjoint set...
602 for (Metadata *BS : BitSets) {
604 BitSetInfo BSI = buildBitSet(BS, GlobalLayout);
606 if (auto BSS = dyn_cast<MDString>(BS))
607 dbgs() << BSS->getString() << ": ";
609 dbgs() << "<unnamed>: ";
613 ByteArrayInfo *BAI = 0;
615 // Lower each call to llvm.bitset.test for this bitset.
616 for (CallInst *CI : BitSetTestCallSites[BS]) {
617 ++NumBitSetCallsLowered;
619 lowerBitSetCall(CI, BSI, BAI, CombinedGlobalIntAddr, GlobalLayout);
620 CI->replaceAllUsesWith(Lowered);
621 CI->eraseFromParent();
626 void LowerBitSets::verifyBitSetMDNode(MDNode *Op) {
627 if (Op->getNumOperands() != 3)
629 "All operands of llvm.bitsets metadata must have 3 elements");
630 if (!Op->getOperand(1))
633 auto OpConstMD = dyn_cast<ConstantAsMetadata>(Op->getOperand(1));
635 report_fatal_error("Bit set element must be a constant");
636 auto OpGlobal = dyn_cast<GlobalObject>(OpConstMD->getValue());
640 if (OpGlobal->isThreadLocal())
641 report_fatal_error("Bit set element may not be thread-local");
642 if (OpGlobal->hasSection())
643 report_fatal_error("Bit set element may not have an explicit section");
645 if (isa<GlobalVariable>(OpGlobal) && OpGlobal->isDeclarationForLinker())
646 report_fatal_error("Bit set global var element must be a definition");
648 auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
650 report_fatal_error("Bit set element offset must be a constant");
651 auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
653 report_fatal_error("Bit set element offset must be an integer constant");
656 static const unsigned kX86JumpTableEntrySize = 8;
658 unsigned LowerBitSets::getJumpTableEntrySize() {
659 if (Arch != Triple::x86 && Arch != Triple::x86_64)
660 report_fatal_error("Unsupported architecture for jump tables");
662 return kX86JumpTableEntrySize;
665 // Create a constant representing a jump table entry for the target. This
666 // consists of an instruction sequence containing a relative branch to Dest. The
667 // constant will be laid out at address Src+(Len*Distance) where Len is the
668 // target-specific jump table entry size.
669 Constant *LowerBitSets::createJumpTableEntry(GlobalObject *Src, Function *Dest,
671 if (Arch != Triple::x86 && Arch != Triple::x86_64)
672 report_fatal_error("Unsupported architecture for jump tables");
674 const unsigned kJmpPCRel32Code = 0xe9;
675 const unsigned kInt3Code = 0xcc;
677 ConstantInt *Jmp = ConstantInt::get(Int8Ty, kJmpPCRel32Code);
679 // Build a constant representing the displacement between the constant's
680 // address and Dest. This will resolve to a PC32 relocation referring to Dest.
681 Constant *DestInt = ConstantExpr::getPtrToInt(Dest, IntPtrTy);
682 Constant *SrcInt = ConstantExpr::getPtrToInt(Src, IntPtrTy);
683 Constant *Disp = ConstantExpr::getSub(DestInt, SrcInt);
684 ConstantInt *DispOffset =
685 ConstantInt::get(IntPtrTy, Distance * kX86JumpTableEntrySize + 5);
686 Constant *OffsetedDisp = ConstantExpr::getSub(Disp, DispOffset);
687 OffsetedDisp = ConstantExpr::getTrunc(OffsetedDisp, Int32Ty);
689 ConstantInt *Int3 = ConstantInt::get(Int8Ty, kInt3Code);
691 Constant *Fields[] = {
692 Jmp, OffsetedDisp, Int3, Int3, Int3,
694 return ConstantStruct::getAnon(Fields, /*Packed=*/true);
697 Type *LowerBitSets::getJumpTableEntryType() {
698 if (Arch != Triple::x86 && Arch != Triple::x86_64)
699 report_fatal_error("Unsupported architecture for jump tables");
701 return StructType::get(M->getContext(),
702 {Int8Ty, Int32Ty, Int8Ty, Int8Ty, Int8Ty},
706 /// Given a disjoint set of bitsets and functions, build a jump table for the
707 /// functions, build the bit sets and lower the llvm.bitset.test calls.
708 void LowerBitSets::buildBitSetsFromFunctions(ArrayRef<Metadata *> BitSets,
709 ArrayRef<Function *> Functions) {
710 // Unlike the global bitset builder, the function bitset builder cannot
711 // re-arrange functions in a particular order and base its calculations on the
712 // layout of the functions' entry points, as we have no idea how large a
713 // particular function will end up being (the size could even depend on what
714 // this pass does!) Instead, we build a jump table, which is a block of code
715 // consisting of one branch instruction for each of the functions in the bit
716 // set that branches to the target function, and redirect any taken function
717 // addresses to the corresponding jump table entry. In the object file's
718 // symbol table, the symbols for the target functions also refer to the jump
719 // table entries, so that addresses taken outside the module will pass any
720 // verification done inside the module.
722 // In more concrete terms, suppose we have three functions f, g, h which are
723 // members of a single bitset, and a function foo that returns their
744 // To create a jump table for these functions, we instruct the LLVM code
745 // generator to output a jump table in the .text section. This is done by
746 // representing the instructions in the jump table as an LLVM constant and
747 // placing them in a global variable in the .text section. The end result will
748 // (conceptually) look like this:
751 // jmp .Ltmp0 ; 5 bytes
757 // jmp .Ltmp1 ; 5 bytes
763 // jmp .Ltmp2 ; 5 bytes
786 // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
787 // normal case the check can be carried out using the same kind of simple
788 // arithmetic that we normally use for globals.
790 assert(!Functions.empty());
792 // Build a simple layout based on the regular layout of jump tables.
793 DenseMap<GlobalObject *, uint64_t> GlobalLayout;
794 unsigned EntrySize = getJumpTableEntrySize();
795 for (unsigned I = 0; I != Functions.size(); ++I)
796 GlobalLayout[Functions[I]] = I * EntrySize;
798 // Create a constant to hold the jump table.
799 ArrayType *JumpTableType =
800 ArrayType::get(getJumpTableEntryType(), Functions.size());
801 auto JumpTable = new GlobalVariable(*M, JumpTableType,
803 GlobalValue::PrivateLinkage, nullptr);
804 JumpTable->setSection(ObjectFormat == Triple::MachO
805 ? "__TEXT,__text,regular,pure_instructions"
807 lowerBitSetCalls(BitSets, JumpTable, GlobalLayout);
809 // Build aliases pointing to offsets into the jump table, and replace
810 // references to the original functions with references to the aliases.
811 for (unsigned I = 0; I != Functions.size(); ++I) {
812 Constant *CombinedGlobalElemPtr = ConstantExpr::getBitCast(
813 ConstantExpr::getGetElementPtr(
814 JumpTableType, JumpTable,
815 ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
816 ConstantInt::get(IntPtrTy, I)}),
817 Functions[I]->getType());
818 if (LinkerSubsectionsViaSymbols || Functions[I]->isDeclarationForLinker()) {
819 Functions[I]->replaceAllUsesWith(CombinedGlobalElemPtr);
821 GlobalAlias *GAlias = GlobalAlias::create(
822 Functions[I]->getType()->getElementType(),
823 Functions[I]->getType()->getAddressSpace(),
824 Functions[I]->getLinkage(), "", CombinedGlobalElemPtr, M);
825 GAlias->setVisibility(Functions[I]->getVisibility());
826 GAlias->takeName(Functions[I]);
827 Functions[I]->replaceAllUsesWith(GAlias);
829 if (!Functions[I]->isDeclarationForLinker())
830 Functions[I]->setLinkage(GlobalValue::PrivateLinkage);
833 // Build and set the jump table's initializer.
834 std::vector<Constant *> JumpTableEntries;
835 for (unsigned I = 0; I != Functions.size(); ++I)
836 JumpTableEntries.push_back(
837 createJumpTableEntry(JumpTable, Functions[I], I));
838 JumpTable->setInitializer(
839 ConstantArray::get(JumpTableType, JumpTableEntries));
842 void LowerBitSets::buildBitSetsFromDisjointSet(
843 ArrayRef<Metadata *> BitSets, ArrayRef<GlobalObject *> Globals) {
844 llvm::DenseMap<Metadata *, uint64_t> BitSetIndices;
845 llvm::DenseMap<GlobalObject *, uint64_t> GlobalIndices;
846 for (unsigned I = 0; I != BitSets.size(); ++I)
847 BitSetIndices[BitSets[I]] = I;
848 for (unsigned I = 0; I != Globals.size(); ++I)
849 GlobalIndices[Globals[I]] = I;
851 // For each bitset, build a set of indices that refer to globals referenced by
853 std::vector<std::set<uint64_t>> BitSetMembers(BitSets.size());
855 for (MDNode *Op : BitSetNM->operands()) {
856 // Op = { bitset name, global, offset }
857 if (!Op->getOperand(1))
859 auto I = BitSetIndices.find(Op->getOperand(0));
860 if (I == BitSetIndices.end())
863 auto OpGlobal = dyn_cast<GlobalObject>(
864 cast<ConstantAsMetadata>(Op->getOperand(1))->getValue());
867 BitSetMembers[I->second].insert(GlobalIndices[OpGlobal]);
871 // Order the sets of indices by size. The GlobalLayoutBuilder works best
872 // when given small index sets first.
874 BitSetMembers.begin(), BitSetMembers.end(),
875 [](const std::set<uint64_t> &O1, const std::set<uint64_t> &O2) {
876 return O1.size() < O2.size();
879 // Create a GlobalLayoutBuilder and provide it with index sets as layout
880 // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
881 // close together as possible.
882 GlobalLayoutBuilder GLB(Globals.size());
883 for (auto &&MemSet : BitSetMembers)
884 GLB.addFragment(MemSet);
886 // Build the bitsets from this disjoint set.
887 if (Globals.empty() || isa<GlobalVariable>(Globals[0])) {
888 // Build a vector of global variables with the computed layout.
889 std::vector<GlobalVariable *> OrderedGVs(Globals.size());
890 auto OGI = OrderedGVs.begin();
891 for (auto &&F : GLB.Fragments) {
892 for (auto &&Offset : F) {
893 auto GV = dyn_cast<GlobalVariable>(Globals[Offset]);
896 "Bit set may not contain both global variables and functions");
901 buildBitSetsFromGlobalVariables(BitSets, OrderedGVs);
903 // Build a vector of functions with the computed layout.
904 std::vector<Function *> OrderedFns(Globals.size());
905 auto OFI = OrderedFns.begin();
906 for (auto &&F : GLB.Fragments) {
907 for (auto &&Offset : F) {
908 auto Fn = dyn_cast<Function>(Globals[Offset]);
911 "Bit set may not contain both global variables and functions");
916 buildBitSetsFromFunctions(BitSets, OrderedFns);
920 /// Lower all bit sets in this module.
921 bool LowerBitSets::buildBitSets() {
922 Function *BitSetTestFunc =
923 M->getFunction(Intrinsic::getName(Intrinsic::bitset_test));
927 // Equivalence class set containing bitsets and the globals they reference.
928 // This is used to partition the set of bitsets in the module into disjoint
930 typedef EquivalenceClasses<PointerUnion<GlobalObject *, Metadata *>>
932 GlobalClassesTy GlobalClasses;
934 // Verify the bitset metadata and build a mapping from bitset identifiers to
935 // their last observed index in BitSetNM. This will used later to
936 // deterministically order the list of bitset identifiers.
937 llvm::DenseMap<Metadata *, unsigned> BitSetIdIndices;
939 for (unsigned I = 0, E = BitSetNM->getNumOperands(); I != E; ++I) {
940 MDNode *Op = BitSetNM->getOperand(I);
941 verifyBitSetMDNode(Op);
942 BitSetIdIndices[Op->getOperand(0)] = I;
946 for (const Use &U : BitSetTestFunc->uses()) {
947 auto CI = cast<CallInst>(U.getUser());
949 auto BitSetMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
952 "Second argument of llvm.bitset.test must be metadata");
953 auto BitSet = BitSetMDVal->getMetadata();
955 // Add the call site to the list of call sites for this bit set. We also use
956 // BitSetTestCallSites to keep track of whether we have seen this bit set
957 // before. If we have, we don't need to re-add the referenced globals to the
958 // equivalence class.
959 std::pair<DenseMap<Metadata *, std::vector<CallInst *>>::iterator,
961 BitSetTestCallSites.insert(
962 std::make_pair(BitSet, std::vector<CallInst *>()));
963 Ins.first->second.push_back(CI);
967 // Add the bitset to the equivalence class.
968 GlobalClassesTy::iterator GCI = GlobalClasses.insert(BitSet);
969 GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
974 // Add the referenced globals to the bitset's equivalence class.
975 for (MDNode *Op : BitSetNM->operands()) {
976 if (Op->getOperand(0) != BitSet || !Op->getOperand(1))
979 auto OpGlobal = dyn_cast<GlobalObject>(
980 cast<ConstantAsMetadata>(Op->getOperand(1))->getValue());
984 CurSet = GlobalClasses.unionSets(
985 CurSet, GlobalClasses.findLeader(GlobalClasses.insert(OpGlobal)));
989 if (GlobalClasses.empty())
992 // Build a list of disjoint sets ordered by their maximum BitSetNM index
994 std::vector<std::pair<GlobalClassesTy::iterator, unsigned>> Sets;
995 for (GlobalClassesTy::iterator I = GlobalClasses.begin(),
996 E = GlobalClasses.end();
998 if (!I->isLeader()) continue;
999 ++NumBitSetDisjointSets;
1001 unsigned MaxIndex = 0;
1002 for (GlobalClassesTy::member_iterator MI = GlobalClasses.member_begin(I);
1003 MI != GlobalClasses.member_end(); ++MI) {
1004 if ((*MI).is<Metadata *>())
1005 MaxIndex = std::max(MaxIndex, BitSetIdIndices[MI->get<Metadata *>()]);
1007 Sets.emplace_back(I, MaxIndex);
1009 std::sort(Sets.begin(), Sets.end(),
1010 [](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
1011 const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
1012 return S1.second < S2.second;
1015 // For each disjoint set we found...
1016 for (const auto &S : Sets) {
1017 // Build the list of bitsets in this disjoint set.
1018 std::vector<Metadata *> BitSets;
1019 std::vector<GlobalObject *> Globals;
1020 for (GlobalClassesTy::member_iterator MI =
1021 GlobalClasses.member_begin(S.first);
1022 MI != GlobalClasses.member_end(); ++MI) {
1023 if ((*MI).is<Metadata *>())
1024 BitSets.push_back(MI->get<Metadata *>());
1026 Globals.push_back(MI->get<GlobalObject *>());
1029 // Order bitsets by BitSetNM index for determinism. This ordering is stable
1030 // as there is a one-to-one mapping between metadata and indices.
1031 std::sort(BitSets.begin(), BitSets.end(), [&](Metadata *M1, Metadata *M2) {
1032 return BitSetIdIndices[M1] < BitSetIdIndices[M2];
1035 // Lower the bitsets in this disjoint set.
1036 buildBitSetsFromDisjointSet(BitSets, Globals);
1039 allocateByteArrays();
1044 bool LowerBitSets::eraseBitSetMetadata() {
1048 M->eraseNamedMetadata(BitSetNM);
1052 bool LowerBitSets::runOnModule(Module &M) {
1053 bool Changed = buildBitSets();
1054 Changed |= eraseBitSetMetadata();