1 ; RUN: opt < %s -sroa -S | FileCheck %s
2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
4 declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
6 define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
8 ; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 0
9 ; CHECK: %[[a0:.*]] = load i8* %[[gep_a0]], align 16
10 ; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 1
11 ; CHECK: %[[a1:.*]] = load i8* %[[gep_a1]], align 1
12 ; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 0
13 ; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
14 ; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 1
15 ; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
19 %alloca = alloca { i8, i8 }, align 16
20 %gep_a = getelementptr { i8, i8 }* %a, i32 0, i32 0
21 %gep_alloca = getelementptr { i8, i8 }* %alloca, i32 0, i32 0
22 %gep_b = getelementptr { i8, i8 }* %b, i32 0, i32 0
24 store i8 420, i8* %gep_alloca, align 16
26 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_alloca, i8* %gep_a, i32 2, i32 16, i1 false)
27 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_b, i8* %gep_alloca, i32 2, i32 16, i1 false)
31 define void @test2() {
32 ; CHECK-LABEL: @test2(
34 ; CHECK: load i8* %{{.*}}
35 ; CHECK: store i8 42, i8* %{{.*}}
39 %a = alloca { i8, i8, i8, i8 }, align 2
40 %gep1 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 1
41 %cast1 = bitcast i8* %gep1 to i16*
42 store volatile i16 0, i16* %cast1
43 %gep2 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 2
44 %result = load i8* %gep2
45 store i8 42, i8* %gep2
49 define void @PR13920(<2 x i64>* %a, i16* %b) {
50 ; Test that alignments on memcpy intrinsics get propagated to loads and stores.
51 ; CHECK-LABEL: @PR13920(
52 ; CHECK: load <2 x i64>* %a, align 2
53 ; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
57 %aa = alloca <2 x i64>, align 16
58 %aptr = bitcast <2 x i64>* %a to i8*
59 %aaptr = bitcast <2 x i64>* %aa to i8*
60 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
61 %bptr = bitcast i16* %b to i8*
62 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
66 define void @test3(i8* %x) {
67 ; Test that when we promote an alloca to a type with lower ABI alignment, we
68 ; provide the needed explicit alignment that code using the alloca may be
69 ; expecting. However, also check that any offset within an alloca can in turn
70 ; reduce the alignment.
71 ; CHECK-LABEL: @test3(
72 ; CHECK: alloca [22 x i8], align 8
73 ; CHECK: alloca [18 x i8], align 2
77 %a = alloca { i8*, i8*, i8* }
78 %b = alloca { i8*, i8*, i8* }
79 %a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
80 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a_raw, i8* %x, i32 22, i32 8, i1 false)
81 %b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
82 %b_gep = getelementptr i8* %b_raw, i32 6
83 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_gep, i8* %x, i32 18, i32 2, i1 false)
87 define void @test5() {
88 ; Test that we preserve underaligned loads and stores when splitting.
89 ; CHECK-LABEL: @test5(
90 ; CHECK: alloca [9 x i8]
91 ; CHECK: alloca [9 x i8]
92 ; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
93 ; CHECK: load i16* %{{.*}}, align 1
94 ; CHECK: load double* %{{.*}}, align 1
95 ; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
96 ; CHECK: load i16* %{{.*}}, align 1
100 %a = alloca [18 x i8]
101 %raw1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 0
102 %ptr1 = bitcast i8* %raw1 to double*
103 store volatile double 0.0, double* %ptr1, align 1
104 %weird_gep1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 7
105 %weird_cast1 = bitcast i8* %weird_gep1 to i16*
106 %weird_load1 = load i16* %weird_cast1, align 1
108 %raw2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 9
109 %ptr2 = bitcast i8* %raw2 to double*
110 %d1 = load double* %ptr1, align 1
111 store volatile double %d1, double* %ptr2, align 1
112 %weird_gep2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 16
113 %weird_cast2 = bitcast i8* %weird_gep2 to i16*
114 %weird_load2 = load i16* %weird_cast2, align 1
119 define void @test6() {
120 ; Test that we promote alignment when the underlying alloca switches to one
121 ; that innately provides it.
122 ; CHECK-LABEL: @test6(
123 ; CHECK: alloca double
124 ; CHECK: alloca double
129 %a = alloca [16 x i8]
130 %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
131 %ptr1 = bitcast i8* %raw1 to double*
132 store volatile double 0.0, double* %ptr1, align 1
134 %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
135 %ptr2 = bitcast i8* %raw2 to double*
136 %val = load double* %ptr1, align 1
137 store volatile double %val, double* %ptr2, align 1
142 define void @test7(i8* %out) {
143 ; Test that we properly compute the destination alignment when rewriting
144 ; memcpys as direct loads or stores.
145 ; CHECK-LABEL: @test7(
149 %a = alloca [16 x i8]
150 %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
151 %ptr1 = bitcast i8* %raw1 to double*
152 %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
153 %ptr2 = bitcast i8* %raw2 to double*
155 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)
156 ; CHECK: %[[val2:.*]] = load double* %{{.*}}, align 1
157 ; CHECK: %[[val1:.*]] = load double* %{{.*}}, align 1
159 %val1 = load double* %ptr2, align 1
160 %val2 = load double* %ptr1, align 1
162 store double %val1, double* %ptr1, align 1
163 store double %val2, double* %ptr2, align 1
165 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %raw1, i32 16, i32 0, i1 false)
166 ; CHECK: store double %[[val1]], double* %{{.*}}, align 1
167 ; CHECK: store double %[[val2]], double* %{{.*}}, align 1