if (VT.bitsGT(LVT))
VT = LVT;
}
+
+ // If we're optimizing for size, and there is a limit, bump the maximum number
+ // of operations inserted down to 4. This is a wild guess that approximates
+ // the size of a call to memcpy or memset (3 arguments + call).
+ if (Limit != ~0U) {
+ const Function *F = DAG.getMachineFunction().getFunction();
+ if (F->hasFnAttr(Attribute::OptimizeForSize))
+ Limit = 4;
+ }
unsigned NumMemOps = 0;
while (Size != 0) {
; CHECK: memcpy
}
+; Large constant memcpy's should lower to a call when optimizing for size.
+; PR6623
+define void @test3(i8* nocapture %A, i8* nocapture %B) nounwind optsize noredzone {
+entry:
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
+ ret void
+; CHECK: test3:
+; CHECK: memcpy
+}
+
+; Large constant memcpy's should be inlined when not optimizing for size.
+define void @test4(i8* nocapture %A, i8* nocapture %B) nounwind noredzone {
+entry:
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
+ ret void
+; CHECK: test4:
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+; CHECK: movq
+}
+