/// reachesChainWithoutSideEffects - Return true if this operand (which must
/// be a chain) reaches the specified operand without crossing any
-/// side-effecting instructions. In practice, this looks through token
-/// factors and non-volatile loads. In order to remain efficient, this only
-/// looks a couple of nodes in, it does not do an exhaustive search.
+/// side-effecting instructions on any chain path. In practice, this looks
+/// through token factors and non-volatile loads. In order to remain efficient,
+/// this only looks a couple of nodes in, it does not do an exhaustive search.
bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
unsigned Depth) const {
if (*this == Dest) return true;
if (Depth == 0) return false;
// If this is a token factor, all inputs to the TF happen in parallel. If any
- // of the operands of the TF reach dest, then we can do the xform.
+ // of the operands of the TF does not reach dest, then we cannot do the xform.
if (getOpcode() == ISD::TokenFactor) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
- return true;
- return false;
+ if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
+ return false;
+ return true;
}
// Loads don't have side effects, look through them.
--- /dev/null
+; RUN: llc < %s -combiner-alias-analysis -march=x86-64 | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.4"
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+
+define fastcc i32 @cli_magic_scandesc(i8* %in) nounwind ssp {
+entry:
+ %a = alloca [64 x i8]
+ %b = getelementptr inbounds [64 x i8]* %a, i64 0, i32 0
+ %c = getelementptr inbounds [64 x i8]* %a, i64 0, i32 30
+ %d = load i8* %b, align 8
+ %e = load i8* %c, align 8
+ %f = bitcast [64 x i8]* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %f, i8* %in, i64 64, i32 8, i1 false) nounwind
+ store i8 %d, i8* %b, align 8
+ store i8 %e, i8* %c, align 8
+ ret i32 0
+}
+
+; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), %rax
+; CHECK: movb (%rsp), %dl
+; CHECK-NEXT: movb 30(%rsp), %sil
+; CHECK: movb %dl, (%rsp)
+; CHECK-NEXT: movb %sil, 30(%rsp)
+; CHECK: callq ___stack_chk_fail