From fc1604ec7274e9b724ff9b2512c288a691167427 Mon Sep 17 00:00:00 2001 From: Arnold Schwaighofer Date: Wed, 16 Oct 2013 17:52:40 +0000 Subject: [PATCH] SLPVectorizer: Don't vectorize volatile memory operations radar://15231682 Reapply r192799, http://lab.llvm.org:8011/builders/lldb-x86_64-debian-clang/builds/8226 showed that the bot is still broken even with this out. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192820 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Vectorize/SLPVectorizer.cpp | 11 +++-- test/Transforms/SLPVectorizer/X86/simplebb.ll | 43 +++++++++++++++++++ 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index af1c0e74236..4d82bc428db 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -786,13 +786,14 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth) { } case Instruction::Load: { // Check if the loads are consecutive or of we need to swizzle them. - for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) - if (!isConsecutiveAccess(VL[i], VL[i + 1])) { + for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { + LoadInst *L = cast(VL[i]); + if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { newTreeEntry(VL, false); DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); return; } - + } newTreeEntry(VL, true); DEBUG(dbgs() << "SLP: added a vector of loads.\n"); return; @@ -1911,6 +1912,10 @@ unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { if (!SI) continue; + // Don't touch volatile stores. + if (!SI->isSimple()) + continue; + // Check that the pointer points to scalars. Type *Ty = SI->getValueOperand()->getType(); if (Ty->isAggregateType() || Ty->isVectorTy()) diff --git a/test/Transforms/SLPVectorizer/X86/simplebb.ll b/test/Transforms/SLPVectorizer/X86/simplebb.ll index 0b76bec07bd..7d682e5e467 100644 --- a/test/Transforms/SLPVectorizer/X86/simplebb.ll +++ b/test/Transforms/SLPVectorizer/X86/simplebb.ll @@ -44,3 +44,46 @@ entry: store double %mul5, double* %arrayidx5, align 8 ret void } + +; Don't vectorize volatile loads. +; CHECK: test_volatile_load +; CHECK-NOT: load <2 x double> +; CHECK: store <2 x double> +; CHECK: ret +define void @test_volatile_load(double* %a, double* %b, double* %c) { +entry: + %i0 = load volatile double* %a, align 8 + %i1 = load volatile double* %b, align 8 + %mul = fmul double %i0, %i1 + %arrayidx3 = getelementptr inbounds double* %a, i64 1 + %i3 = load double* %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double* %b, i64 1 + %i4 = load double* %arrayidx4, align 8 + %mul5 = fmul double %i3, %i4 + store double %mul, double* %c, align 8 + %arrayidx5 = getelementptr inbounds double* %c, i64 1 + store double %mul5, double* %arrayidx5, align 8 + ret void +} + +; Don't vectorize volatile stores. +; CHECK: test_volatile_store +; CHECK-NOT: store <2 x double> +; CHECK: ret +define void @test_volatile_store(double* %a, double* %b, double* %c) { +entry: + %i0 = load double* %a, align 8 + %i1 = load double* %b, align 8 + %mul = fmul double %i0, %i1 + %arrayidx3 = getelementptr inbounds double* %a, i64 1 + %i3 = load double* %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double* %b, i64 1 + %i4 = load double* %arrayidx4, align 8 + %mul5 = fmul double %i3, %i4 + store volatile double %mul, double* %c, align 8 + %arrayidx5 = getelementptr inbounds double* %c, i64 1 + store volatile double %mul5, double* %arrayidx5, align 8 + ret void +} + + -- 2.34.1