break;
}
case Instruction::PHI: {
- for (int i = 0; i < I->getNumOperands(); i++) {
+ for (unsigned i = 0; i < I->getNumOperands(); i++) {
auto* op = I->getOperand(i);
if (DepSet->count(op) == 0) {
recursivelyFindDependence(DepSet, I->getOperand(i),
// Is it a cast from integer to pointer type.
Instruction* OrAddress = nullptr;
Instruction* AndDep = nullptr;
- Instruction* CastToInt = nullptr;
- Value* ActualAddress = nullptr;
Constant* ZeroConst = nullptr;
const Instruction* CastToPtr = dyn_cast<Instruction>(CurrentAddress);
// store i32 1, i32* %5, align 4
bool taintStoreAddress(StoreInst* SI, Value* DepVal) {
// Set the insertion point right after the 'DepVal'.
- Instruction* Inst = nullptr;
IRBuilder<true, NoFolder> Builder(SI);
BasicBlock* BB = SI->getParent();
Value* Address = SI->getPointerOperand();
OldDep, createCast(Builder, DepVal, TargetIntegerType));
}
- auto* NewDepInst = dyn_cast<Instruction>(NewDep);
-
// Use the new AND instruction as the dependence
AndDep->setOperand(0, NewDep);
return true;
Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType);
Value* AndDepVal =
Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0));
- auto AndInst = dyn_cast<Instruction>(AndDepVal);
// XXX-comment: The original IR InstCombiner would change our and instruction
// to a select and then the back end optimize the condition out. We attach a
// flag to instructions and set it here to inform the InstCombiner to not to
if (!FirstInst) {
return true;
}
- auto* BB = Inst->getParent();
auto BBI = Inst->getIterator();
BBI++;
while (true) {
continue;
}
- // We really need to process the relaxed load now.
+ // We really need to process the relaxed load now. First see if we can delay
+ // the tainting.
+ if (FirstInst) {
+ auto* FirstInstBBTerm = FirstInst->getParent()->getTerminator();
+ while (FirstInst != FirstInstBBTerm) {
+ if (!CanDelayTainting(LI, FirstInst)) {
+ break;
+ }
+ FirstInst = FirstInst->getNextNode();
+ }
+ }
+
StoreInst* SI = nullptr;
IntrinsicInst* II = nullptr;
if (FirstInst) {
return CurrentAddress;
}
- Value* ActualAddress = nullptr;
auto* CastToInt = dyn_cast<Instruction>(OrAddress->getOperand(1));
if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) {
continue;
}
// Check condition B.
- Value* Cond = nullptr;
if (OrigAddress == CurrSI->getPointerOperand() ||
OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr ||
!dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) {
return dependenceSetInclusion(SI, Dep);
}
-// If 'LI' is a relaxed load, and it is immediately followed by a atomic
-// read-modify-write that has acq_rel parameter, we don't have to do anything
-// since the rmw serves as a natural barrier.
-void MarkRelaxedLoadBeforeAcqrelRMW(LoadInst* LI) {
+bool ValueDependOnValue(Value* Inst, Value* Dep) {
+ return dependenceSetInclusion(Inst, Dep);
+}
+
+// XXX-update: Checks whether the relaxed load 'LI' has subsequent instructions
+// that naturally prevents it from being reordered across later stores.
+bool HasSubsequentOrderingProtection(LoadInst* LI) {
auto* BB = LI->getParent();
- auto BBI = LI->getIterator();
- for (BBI++; BBI != BB->end(); BBI++) {
- Instruction* CurInst = &*BBI;
- if (!CurInst) {
- return;
+ auto* Term = BB->getTerminator();
+ for (auto Iter = BasicBlock::iterator(LI->getNextNode()); Iter != BB->end();
+ Iter++) {
+ Instruction* I = &*Iter;
+
+ // Reaching the end of the block.
+ if (I == Term) {
+ auto* Branch = dyn_cast<BranchInst>(Term);
+ // The last instruction isn't a branch, end of analysis.
+ if (!Branch) {
+ return false;
+ }
+ if (Branch->isConditional()) {
+ if (ValueDependOnValue(Branch, LI)) {
+ // 'LI' is used in the conditional branch.
+ return true;
+ } else {
+ // Reach the end with a cond branch that doesn't use the result of
+ // 'LI'.
+ return false;
+ }
+ } else {
+ // Reach the end with a unconditional branch, keep going to the next
+ // block.
+ BB = BB->getSingleSuccessor();
+ Term = BB->getTerminator();
+ Iter = BB->begin();
+ continue;
+ }
+ }
+
+ // 'I' is a CAS whose old value depends on 'LI'. We don't need to taint 'LI'
+ // then.
+ auto* CAS = dyn_cast<AtomicCmpXchgInst>(I);
+ if (CAS) {
+ if (ValueDependOnValue(CAS->getCompareOperand(), LI)) {
+ return true;
+ }
+ }
+
+ // fetch_* operations that have acquire-release semantics.
+ auto* RMW = dyn_cast<AtomicRMWInst>(I);
+ if (RMW) {
+ auto Order = RMW->getOrdering();
+ if (Order == AcquireRelease || Order == SequentiallyConsistent) {
+ return true;
+ }
}
- if (!CurInst->isAtomic()) {
+
+ // A load whose address depends on 'LI' prevents later stores from being
+ // reordered.
+ auto* LdInst = dyn_cast<LoadInst>(I);
+ if (LdInst) {
+ if (ValueDependOnValue(LdInst->getPointerOperand(), LI)) {
+ return true;
+ }
+ }
+
+ // Other instructions that don't affect the reordering.
+ if (!I->mayHaveSideEffects()) {
continue;
}
- auto* RMW = dyn_cast<AtomicRMWInst>(CurInst);
- if (!RMW) {
- return;
+
+ // A store whose address depends on 'LI' is also protection.
+ auto* SI = dyn_cast<StoreInst>(I);
+ if (SI) {
+ if (ValueDependOnValue(SI->getPointerOperand(), LI)) {
+ return true;
+ }
}
- if (RMW->getOrdering() == AcquireRelease ||
- RMW->getOrdering() == SequentiallyConsistent) {
- LI->setHasSubsequentAcqlRMW(true);
+
+ // The following are store/store-like operations. They don't protect later
+ // stores from being reordered across 'LI', but the analysis can go on if
+ // they naturally can't be reordered across 'LI' themselves.
+ {
+ // Release (or stronger) store.
+ if (SI) {
+ auto Order = SI->getOrdering();
+ if (Order == Release || Order == SequentiallyConsistent) {
+ continue;
+ }
+ }
+
+ // Release (or stronger) fetch_*.
+ if (RMW) {
+ auto Order = RMW->getOrdering();
+ if (Order == Release || Order == AcquireRelease ||
+ Order == SequentiallyConsistent) {
+ continue;
+ }
+ }
+
+ // The instruction naturally depends on 'LI'.
+ if (ValueDependOnValue(I, LI)) {
+ continue;
+ }
}
+ // Otherwise, we need to taint 'LI'.
+ // XXX-comment: It may be a good idea that we can delay the fake conditional
+ // branch down to this instruction.
+ return false;
}
+
+ // Just in case, the loop should never end without reaching a return.
+ return false;
}
+// XXX-update: Checks whether the tainting to instruction 'I' can be delayed
+// with respects to the relaxed load 'LI'. This usually means 'I' itself already
+// depends on the 'LI' or 'I' is a store/store-like atomic operation that has
+// release semantics.
+bool CanDelayTainting(LoadInst* LI, Instruction* I) {
+ if (I == I->getParent()->getTerminator()) {
+ return false;
+ }
+
+ if (!I->mayHaveSideEffects()) {
+ return true;
+ }
+
+ // The following are store/store-like operations. They don't protect later
+ // stores from being reordered across 'LI', but the analysis can go on if
+ // they naturally can't be reordered across 'LI' themselves.
+
+ // Release (or stronger) store.
+ auto* SI = dyn_cast<StoreInst>(I);
+ if (SI) {
+ auto Order = SI->getOrdering();
+ if (Order == Release || Order == SequentiallyConsistent) {
+ return true;
+ }
+ }
+
+ // Release (or stronger) fetch_*.
+ auto* RMW = dyn_cast<AtomicRMWInst>(I);
+ if (RMW) {
+ auto Order = RMW->getOrdering();
+ if (Order == Release || Order == AcquireRelease ||
+ Order == SequentiallyConsistent) {
+ return true;
+ }
+ }
+
+ // The instruction naturally depends on 'LI'.
+ if (ValueDependOnValue(I, LI)) {
+ return true;
+ }
+
+ // Otherwise, be conservative and say no!
+ return false;
+}
} // namespace llvm