Instruction::CastOps CastOp = Instruction::BitCast;
switch (DepVal->getType()->getTypeID()) {
case Type::IntegerTyID: {
- CastOp = Instruction::SExt;
+ assert(TargetIntegerType->getTypeID() == Type::IntegerTyID);
+ auto* FromType = dyn_cast<IntegerType>(DepVal->getType());
+ auto* ToType = dyn_cast<IntegerType>(TargetIntegerType);
+ assert(FromType && ToType);
+ if (FromType->getBitWidth() <= ToType->getBitWidth()) {
+ CastOp = Instruction::ZExt;
+ } else {
+ CastOp = Instruction::Trunc;
+ }
break;
}
case Type::FloatTyID:
BBI++;
while (true) {
for (; BBI != BE; BBI++) {
- auto* Inst = dyn_cast<Instruction>(&*BBI);
- if (Inst == nullptr) {
- continue;
- }
- if (Inst->getOpcode() == Instruction::Store) {
+ Instruction* Inst = &*BBI;
+ IntrinsicInst* II = dyn_cast<IntrinsicInst>(&*BBI);
+ if (II && II->getIntrinsicID() == Intrinsic::aarch64_stlxr) {
+ return II;
+ } else if (Inst->getOpcode() == Instruction::Store) {
return Inst;
} else if (Inst->getOpcode() == Instruction::Br) {
auto* BrInst = dyn_cast<BranchInst>(Inst);
}
}
-// XXX-comment: Returns whether the code has been changed.
-bool taintMonotonicLoads(const SmallVector<LoadInst*, 1>& MonotonicLoadInsts) {
- bool Changed = false;
- for (auto* LI : MonotonicLoadInsts) {
- SmallVector<BasicBlock*, 2> ChainedBB;
- auto* FirstInst = findFirstStoreCondBranchInst(LI, &ChainedBB);
- if (FirstInst == nullptr) {
- // We don't seem to be able to taint a following store/conditional branch
- // instruction. Simply make it acquire.
- DEBUG(dbgs() << "[RelaxedLoad]: Transformed to acquire load\n"
- << *LI << "\n");
- LI->setOrdering(Acquire);
- Changed = true;
+// XXX-update: Find the next node of the last relaxed load from 'FromInst' to
+// 'ToInst'. If none, return 'ToInst'.
+Instruction* findLastLoadNext(Instruction* FromInst, Instruction* ToInst) {
+ if (FromInst == ToInst) {
+ return ToInst;
+ }
+ Instruction* LastLoad = ToInst;
+ auto* BB = FromInst->getParent();
+ auto BE = BB->end();
+ auto BBI = BasicBlock::iterator(FromInst);
+ BBI++;
+ for (; BBI != BE && &*BBI != ToInst; BBI++) {
+ auto* LI = dyn_cast<LoadInst>(&*BBI);
+ if (LI == nullptr || !LI->isAtomic() || LI->getOrdering() != Monotonic) {
continue;
}
- // Taint 'FirstInst', which could be a store or a condition branch
- // instruction.
- if (FirstInst->getOpcode() == Instruction::Store) {
- Changed |= taintStoreAddress(dyn_cast<StoreInst>(FirstInst), LI);
- } else if (FirstInst->getOpcode() == Instruction::Br) {
- Changed |= taintConditionalBranch(dyn_cast<BranchInst>(FirstInst), LI);
- } else {
- assert(false && "findFirstStoreCondBranchInst() should return a "
- "store/condition branch instruction");
- }
+ LastLoad = LI;
+ LastLoad = LastLoad->getNextNode();
}
- return Changed;
+ return LastLoad;
}
// Inserts a fake conditional branch right after the instruction 'SplitInst',
Type* TargetIntegerType =
IntegerType::get(UsageInst->getContext(),
BB->getModule()->getDataLayout().getPointerSizeInBits());
- if (UsageInst->getType() == TargetIntegerType) {
- AndTarget = UsageInst;
- } else {
- IRBuilder<true, NoFolder> Builder(InsertPoint);
- AndTarget = createCast(Builder, UsageInst, TargetIntegerType);
- }
// Check whether InsertPoint is a added fake conditional branch.
BranchInst* BI = nullptr;
// Now we have a previously added fake cond branch.
auto* Op00 = Op0->getOperand(0);
IRBuilder<true, NoFolder> Builder(CmpInst);
+ if (Op00->getType() == UsageInst->getType()) {
+ AndTarget = UsageInst;
+ } else {
+ AndTarget = createCast(Builder, UsageInst, Op00->getType());
+ }
AndTarget = Builder.CreateAnd(Op00, AndTarget);
auto* AndZero = dyn_cast<Instruction>(Builder.CreateAnd(
AndTarget, Constant::getNullValue(AndTarget->getType())));
}
IRBuilder<true, NoFolder> Builder(InsertPoint);
+ if (IntegerType::classof(UsageInst->getType())) {
+ AndTarget = UsageInst;
+ } else {
+ AndTarget = createCast(Builder, UsageInst, TargetIntegerType);
+ }
auto* AndZero = dyn_cast<Instruction>(
Builder.CreateAnd(AndTarget, Constant::getNullValue(AndTarget->getType())));
auto* FakeCondition = dyn_cast<Instruction>(Builder.CreateICmp(
continue;
}
} else {
- dbgs() << "FirstInst=" << *FirstInst << "\n";
- assert(false && "findFirstStoreCondBranchInst() should return a "
- "store/condition branch instruction");
+ IntrinsicInst* II = dyn_cast<IntrinsicInst>(FirstInst);
+ if (!II || II->getIntrinsicID() != Intrinsic::aarch64_stlxr) {
+ dbgs() << "FirstInst=" << *FirstInst << "\n";
+ assert(false && "findFirstStoreCondBranchInst() should return a "
+ "store/condition branch instruction");
+ }
}
}
- // We really need to process the relaxed load now.
- StoreInst* SI = nullptr;;
- if (FirstInst && (SI = dyn_cast<StoreInst>(FirstInst))) {
+ // We really need to process the relaxed load now. Note that if the next
+ // instruction is a RMW, it will be transformed into a control block, so we
+ // can safely only taint upcoming store instructions.
+ StoreInst* SI = nullptr;
+ IntrinsicInst* II = nullptr;
+ if (FirstInst) {
+ SI = dyn_cast<StoreInst>(FirstInst);
+ II = dyn_cast<IntrinsicInst>(FirstInst);
+ }
+ if (FirstInst && SI) {
// For immediately coming stores, taint the address of the store.
- if (SI->getParent() == LI->getParent() || DT->dominates(LI, SI)) {
- TaintRelaxedLoads(LI, SI);
+ if (FirstInst->getParent() == LI->getParent() ||
+ DT->dominates(LI, FirstInst)) {
+ Changed != taintStoreAddress(SI, LI);
Changed = true;
} else {
auto* Inst =
LI->setOrdering(Acquire);
Changed = true;
} else {
- TaintRelaxedLoads(Inst, SI);
- Changed = true;
+ Changed |= taintStoreAddress(SI, Inst);
}
}
} else {
switch (I->getOpcode()) {
case Instruction::Load: {
auto* LI = dyn_cast<LoadInst>(&*I);
- if (LI->getOrdering() == Monotonic) {
+ if (LI->getOrdering() == Monotonic &&
+ !LI->getHasSubsequentAcqlRMW()) {
MonotonicLoadInsts.insert(LI);
}
break;