1 ; RUN: llc < %s -march=nvptx -mcpu=sm_35 | FileCheck %s
3 ; Verify that the NVPTXLowerAggrCopies pass works as expected - calls to
4 ; llvm.mem* intrinsics get lowered to loops.
6 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
7 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
9 define i8* @memcpy_caller(i8* %dst, i8* %src, i64 %n) #0 {
11 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 false)
13 ; CHECK-LABEL: .visible .func (.param .b32 func_retval0) memcpy_caller
14 ; CHECK: LBB[[LABEL:[_0-9]+]]:
15 ; CHECK: ld.u8 %rs[[REG:[0-9]+]]
16 ; CHECK: st.u8 [%r{{[0-9]+}}], %rs[[REG]]
17 ; CHECK: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
18 ; CHECK-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
19 ; CHECK-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
22 define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 {
24 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 true)
26 ; CHECK-LABEL: .visible .func (.param .b32 func_retval0) memcpy_volatile_caller
27 ; CHECK: LBB[[LABEL:[_0-9]+]]:
28 ; CHECK: ld.volatile.u8 %rs[[REG:[0-9]+]]
29 ; CHECK: st.volatile.u8 [%r{{[0-9]+}}], %rs[[REG]]
30 ; CHECK: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
31 ; CHECK-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
32 ; CHECK-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
35 define i8* @memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
37 %0 = trunc i32 %c to i8
38 tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1, i1 false)
40 ; CHECK-LABEL: .visible .func (.param .b32 func_retval0) memset_caller(
41 ; CHECK: ld.param.u8 %rs[[REG:[0-9]+]]
42 ; CHECK: LBB[[LABEL:[_0-9]+]]:
43 ; CHECK: st.u8 [%r{{[0-9]+}}], %rs[[REG]]
44 ; CHECK: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
45 ; CHECK-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
46 ; CHECK-NEXT: @%p[[PRED]] bra LBB[[LABEL]]