Instruction::CastOps CastOp = Instruction::BitCast;
switch (DepVal->getType()->getTypeID()) {
case Type::IntegerTyID: {
- CastOp = Instruction::SExt;
+ assert(TargetIntegerType->getTypeID() == Type::IntegerTyID);
+ auto* FromType = dyn_cast<IntegerType>(DepVal->getType());
+ auto* ToType = dyn_cast<IntegerType>(TargetIntegerType);
+ assert(FromType && ToType);
+ if (FromType->getBitWidth() <= ToType->getBitWidth()) {
+ CastOp = Instruction::ZExt;
+ } else {
+ CastOp = Instruction::Trunc;
+ }
break;
}
case Type::FloatTyID:
BBI++;
while (true) {
for (; BBI != BE; BBI++) {
- auto* Inst = dyn_cast<Instruction>(&*BBI);
- if (Inst == nullptr) {
- continue;
- }
- if (Inst->getOpcode() == Instruction::Store) {
+ Instruction* Inst = &*BBI;
+ IntrinsicInst* II = dyn_cast<IntrinsicInst>(&*BBI);
+ if (II && II->getIntrinsicID() == Intrinsic::aarch64_stlxr) {
+ return II;
+ } else if (Inst->getOpcode() == Instruction::Store) {
return Inst;
} else if (Inst->getOpcode() == Instruction::Br) {
auto* BrInst = dyn_cast<BranchInst>(Inst);
}
}
-// XXX-comment: Returns whether the code has been changed.
-bool taintMonotonicLoads(const SmallVector<LoadInst*, 1>& MonotonicLoadInsts) {
- bool Changed = false;
- for (auto* LI : MonotonicLoadInsts) {
- SmallVector<BasicBlock*, 2> ChainedBB;
- auto* FirstInst = findFirstStoreCondBranchInst(LI, &ChainedBB);
- if (FirstInst == nullptr) {
- // We don't seem to be able to taint a following store/conditional branch
- // instruction. Simply make it acquire.
- DEBUG(dbgs() << "[RelaxedLoad]: Transformed to acquire load\n"
- << *LI << "\n");
- LI->setOrdering(Acquire);
- Changed = true;
+// XXX-update: Find the next node of the last relaxed load from 'FromInst' to
+// 'ToInst'. If none, return 'ToInst'.
+Instruction* findLastLoadNext(Instruction* FromInst, Instruction* ToInst) {
+ if (FromInst == ToInst) {
+ return ToInst;
+ }
+ Instruction* LastLoad = ToInst;
+ auto* BB = FromInst->getParent();
+ auto BE = BB->end();
+ auto BBI = BasicBlock::iterator(FromInst);
+ BBI++;
+ for (; BBI != BE && &*BBI != ToInst; BBI++) {
+ auto* LI = dyn_cast<LoadInst>(&*BBI);
+ if (LI == nullptr || !LI->isAtomic() || LI->getOrdering() != Monotonic) {
continue;
}
- // Taint 'FirstInst', which could be a store or a condition branch
- // instruction.
- if (FirstInst->getOpcode() == Instruction::Store) {
- Changed |= taintStoreAddress(dyn_cast<StoreInst>(FirstInst), LI);
- } else if (FirstInst->getOpcode() == Instruction::Br) {
- Changed |= taintConditionalBranch(dyn_cast<BranchInst>(FirstInst), LI);
- } else {
- assert(false && "findFirstStoreCondBranchInst() should return a "
- "store/condition branch instruction");
- }
+ LastLoad = LI;
+ LastLoad = LastLoad->getNextNode();
}
- return Changed;
+ return LastLoad;
}
// Inserts a fake conditional branch right after the instruction 'SplitInst',
}
// Returns true if the code is changed, and false otherwise.
-void TaintRelaxedLoads(Instruction* UsageInst) {
+void TaintRelaxedLoads(Instruction* UsageInst, Instruction* InsertPoint) {
// For better performance, we can add a "AND X 0" instruction before the
// condition.
auto* BB = UsageInst->getParent();
- auto* InsertPoint = UsageInst->getNextNode();
- IRBuilder<true, NoFolder> Builder(InsertPoint);
+ if (InsertPoint == nullptr) {
+ InsertPoint = UsageInst->getNextNode();
+ }
+ // Insert instructions after PHI nodes.
+ while (dyn_cast<PHINode>(InsertPoint)) {
+ InsertPoint = InsertPoint->getNextNode();
+ }
// First thing is to cast 'UsageInst' to an integer type if necessary.
Value* AndTarget = nullptr;
+ Type* TargetIntegerType =
+ IntegerType::get(UsageInst->getContext(),
+ BB->getModule()->getDataLayout().getPointerSizeInBits());
+
+ // Check whether InsertPoint is a added fake conditional branch.
+ BranchInst* BI = nullptr;
+ if ((BI = dyn_cast<BranchInst>(InsertPoint)) && BI->isConditional()) {
+ auto* Cond = dyn_cast<Instruction>(BI->getOperand(0));
+ if (Cond && Cond->getOpcode() == Instruction::ICmp) {
+ auto* CmpInst = dyn_cast<ICmpInst>(Cond);
+ auto* Op0 = dyn_cast<Instruction>(Cond->getOperand(0));
+ auto* Op1 = dyn_cast<ConstantInt>(Cond->getOperand(1));
+ // %tmp = And X, 0
+ // %cmp = ICMP_NE %tmp, 0
+ // Br %cmp
+ // =>
+ // %tmp1 = And X, NewTaintedVal
+ // %tmp2 = And %tmp1, 0
+ // %cmp = ICMP_NE %tmp2, 0
+ // Br %cmp
+ if (CmpInst && CmpInst->getPredicate() == CmpInst::ICMP_NE && Op0 &&
+ Op0->getOpcode() == Instruction::And && Op1 && Op1->isZero()) {
+ auto* Op01 = dyn_cast<ConstantInt>(Op0->getOperand(1));
+ if (Op01 && Op01->isZero()) {
+ // Now we have a previously added fake cond branch.
+ auto* Op00 = Op0->getOperand(0);
+ IRBuilder<true, NoFolder> Builder(CmpInst);
+ if (Op00->getType() == UsageInst->getType()) {
+ AndTarget = UsageInst;
+ } else {
+ AndTarget = createCast(Builder, UsageInst, Op00->getType());
+ }
+ AndTarget = Builder.CreateAnd(Op00, AndTarget);
+ auto* AndZero = dyn_cast<Instruction>(Builder.CreateAnd(
+ AndTarget, Constant::getNullValue(AndTarget->getType())));
+ CmpInst->setOperand(0, AndZero);
+ return;
+ }
+ }
+ }
+ }
+
+ IRBuilder<true, NoFolder> Builder(InsertPoint);
if (IntegerType::classof(UsageInst->getType())) {
AndTarget = UsageInst;
} else {
- Type* TargetIntegerType = IntegerType::get(
- UsageInst->getContext(),
- BB->getModule()->getDataLayout().getPointerSizeInBits());
AndTarget = createCast(Builder, UsageInst, TargetIntegerType);
}
-
auto* AndZero = dyn_cast<Instruction>(
Builder.CreateAnd(AndTarget, Constant::getNullValue(AndTarget->getType())));
auto* FakeCondition = dyn_cast<Instruction>(Builder.CreateICmp(
// XXX-comment: Returns whether the code has been changed.
bool AddFakeConditionalBranchAfterMonotonicLoads(
- const SmallVector<LoadInst*, 1>& MonotonicLoadInsts, DominatorTree* DT) {
+ SmallSet<LoadInst*, 1>& MonotonicLoadInsts, DominatorTree* DT) {
bool Changed = false;
- for (auto* LI : MonotonicLoadInsts) {
+ while (!MonotonicLoadInsts.empty()) {
+ auto* LI = *MonotonicLoadInsts.begin();
+ MonotonicLoadInsts.erase(LI);
SmallVector<BasicBlock*, 2> ChainedBB;
auto* FirstInst = findFirstStoreCondBranchInst(LI, &ChainedBB);
if (FirstInst != nullptr) {
continue;
}
} else {
- dbgs() << "FirstInst=" << *FirstInst << "\n";
- assert(false && "findFirstStoreCondBranchInst() should return a "
- "store/condition branch instruction");
+ IntrinsicInst* II = dyn_cast<IntrinsicInst>(FirstInst);
+ if (!II || II->getIntrinsicID() != Intrinsic::aarch64_stlxr) {
+ dbgs() << "FirstInst=" << *FirstInst << "\n";
+ assert(false && "findFirstStoreCondBranchInst() should return a "
+ "store/condition branch instruction");
+ }
}
}
- // We really need to process the relaxed load now.
- StoreInst* SI = nullptr;;
- if (FirstInst && (SI = dyn_cast<StoreInst>(FirstInst))) {
+ // We really need to process the relaxed load now. Note that if the next
+ // instruction is a RMW, it will be transformed into a control block, so we
+ // can safely only taint upcoming store instructions.
+ StoreInst* SI = nullptr;
+ IntrinsicInst* II = nullptr;
+ if (FirstInst) {
+ SI = dyn_cast<StoreInst>(FirstInst);
+ II = dyn_cast<IntrinsicInst>(FirstInst);
+ }
+ if (FirstInst && SI) {
// For immediately coming stores, taint the address of the store.
- if (SI->getParent() == LI->getParent() || DT->dominates(LI, SI)) {
- Changed |= taintStoreAddress(SI, LI);
+ if (FirstInst->getParent() == LI->getParent() ||
+ DT->dominates(LI, FirstInst)) {
+ Changed != taintStoreAddress(SI, LI);
+ Changed = true;
} else {
auto* Inst =
findMostRecentDependenceUsage(LI, FirstInst, &ChainedBB, DT);
} else {
// No upcoming branch
if (!FirstInst) {
- TaintRelaxedLoads(LI);
+ TaintRelaxedLoads(LI, nullptr);
Changed = true;
} else {
// For immediately coming branch, directly add a fake branch.
if (FirstInst->getParent() == LI->getParent() ||
DT->dominates(LI, FirstInst)) {
- TaintRelaxedLoads(LI);
+ TaintRelaxedLoads(LI, FirstInst);
Changed = true;
} else {
auto* Inst =
findMostRecentDependenceUsage(LI, FirstInst, &ChainedBB, DT);
if (Inst) {
- TaintRelaxedLoads(Inst);
+ TaintRelaxedLoads(Inst, FirstInst);
} else {
LI->setOrdering(Acquire);
}
// XXX-comment: Delay dealing with relaxed loads in this function to avoid
// further changes done by other passes (e.g., SimplifyCFG).
// Collect all the relaxed loads.
- SmallVector<LoadInst*, 1> MonotonicLoadInsts;
+ SmallSet<LoadInst*, 1> MonotonicLoadInsts;
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
if (I->isAtomic()) {
switch (I->getOpcode()) {
case Instruction::Load: {
auto* LI = dyn_cast<LoadInst>(&*I);
- if (LI->getOrdering() == Monotonic) {
- MonotonicLoadInsts.push_back(LI);
+ if (LI->getOrdering() == Monotonic &&
+ !LI->getHasSubsequentAcqlRMW()) {
+ MonotonicLoadInsts.insert(LI);
}
break;
}