ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
if (CopySize == 0 || M->isVolatile()) return false;
+ // If the source and destination of the memcpy are the same, then zap it.
+ if (M->getSource() == M->getDest()) {
+ MD->removeInstruction(M);
+ M->eraseFromParent();
+ return false;
+ }
+
+
// The are two possible optimizations we can do for memcpy:
// a) memcpy-memcpy xform which exposes redundance for DSE.
// b) call-memcpy xform for return slot optimization.
return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
- bool changed = performCallSlotOptzn(M, M->getDest(), M->getSource(),
- CopySize->getZExtValue(), C);
- if (changed) M->eraseFromParent();
- return changed;
+ if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
+ CopySize->getZExtValue(), C)) {
+ M->eraseFromParent();
+ return true;
+ }
}
return false;
}
; CHECK: store i8 4
; CHECK: call void @test5a(%struct.S* byval align 16 %y)
}
+
+;; Noop memcpy should be zapped.
+define void @test6(i8 *%P) {
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %P, i64 8, i32 4, i1 false)
+ ret void
+; CHECK: @test6
+; CHECK-NEXT: ret void
+}
+