1 ; "PLAIN" - No optimizations. This tests the target-independent
3 ; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
5 ; "OPT" - Optimizations but no targetdata. This tests target-independent
6 ; folding in the optimizers.
7 ; RUN: opt -S -o - -instcombine -globalopt < %s | FileCheck --check-prefix=OPT %s
9 ; "TO" - Optimizations and targetdata. This tests target-dependent
10 ; folding in the optimizers.
11 ; RUN: opt -S -o - -instcombine -globalopt -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" < %s | FileCheck --check-prefix=TO %s
13 ; "SCEV" - ScalarEvolution but no targetdata.
14 ; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s
17 ; The automatic constant folder in opt does not have targetdata access, so
18 ; it can't fold gep arithmetic, in general. However, the constant folder run
19 ; from instcombine and global opt can use targetdata.
21 ; PLAIN: @G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
22 ; PLAIN: @G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
23 ; PLAIN: @F8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
24 ; PLAIN: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
25 ; PLAIN: @H8 = global i8* getelementptr (i8* null, i32 -1)
26 ; PLAIN: @H1 = global i1* getelementptr (i1* null, i32 -1)
27 ; OPT: @G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
28 ; OPT: @G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
29 ; OPT: @F8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
30 ; OPT: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
31 ; OPT: @H8 = global i8* getelementptr (i8* null, i32 -1)
32 ; OPT: @H1 = global i1* getelementptr (i1* null, i32 -1)
33 ; TO: @G8 = global i8* null
34 ; TO: @G1 = global i1* null
35 ; TO: @F8 = global i8* inttoptr (i64 -1 to i8*)
36 ; TO: @F1 = global i1* inttoptr (i64 -1 to i1*)
37 ; TO: @H8 = global i8* inttoptr (i64 -1 to i8*)
38 ; TO: @H1 = global i1* inttoptr (i64 -1 to i1*)
40 @G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
41 @G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
42 @F8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
43 @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
44 @H8 = global i8* getelementptr (i8* inttoptr (i32 0 to i8*), i32 -1)
45 @H1 = global i1* getelementptr (i1* inttoptr (i32 0 to i1*), i32 -1)
47 ; The target-independent folder should be able to do some clever
48 ; simplifications on sizeof, alignof, and offsetof expressions. The
49 ; target-dependent folder should fold these down to constants.
51 ; PLAIN: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
52 ; PLAIN: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
53 ; PLAIN: @c = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
54 ; PLAIN: @d = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
55 ; PLAIN: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
56 ; PLAIN: @f = constant i64 1
57 ; PLAIN: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
58 ; PLAIN: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
59 ; PLAIN: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
60 ; OPT: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
61 ; OPT: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
62 ; OPT: @c = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
63 ; OPT: @d = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
64 ; OPT: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
65 ; OPT: @f = constant i64 1
66 ; OPT: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
67 ; OPT: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
68 ; OPT: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
69 ; TO: @a = constant i64 18480
70 ; TO: @b = constant i64 8
71 ; TO: @c = constant i64 16
72 ; TO: @d = constant i64 88
73 ; TO: @e = constant i64 16
74 ; TO: @f = constant i64 1
75 ; TO: @g = constant i64 8
76 ; TO: @h = constant i64 8
77 ; TO: @i = constant i64 8
79 @a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5))
80 @b = constant i64 ptrtoint ([13 x double]* getelementptr ({i1, [13 x double]}* null, i64 0, i32 1) to i64)
81 @c = constant i64 ptrtoint (double* getelementptr ({double, double, double, double}* null, i64 0, i32 2) to i64)
82 @d = constant i64 ptrtoint (double* getelementptr ([13 x double]* null, i64 0, i32 11) to i64)
83 @e = constant i64 ptrtoint (double* getelementptr ({double, float, double, double}* null, i64 0, i32 2) to i64)
84 @f = constant i64 ptrtoint (<{ i16, i128 }>* getelementptr ({i1, <{ i16, i128 }>}* null, i64 0, i32 1) to i64)
85 @g = constant i64 ptrtoint ({double, double}* getelementptr ({i1, {double, double}}* null, i64 0, i32 1) to i64)
86 @h = constant i64 ptrtoint (double** getelementptr (double** null, i64 1) to i64)
87 @i = constant i64 ptrtoint (double** getelementptr ({i1, double*}* null, i64 0, i32 1) to i64)
89 ; The target-dependent folder should cast GEP indices to integer-sized pointers.
91 ; PLAIN: @M = constant i64* getelementptr (i64* null, i32 1)
92 ; PLAIN: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
93 ; PLAIN: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
94 ; OPT: @M = constant i64* getelementptr (i64* null, i32 1)
95 ; OPT: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
96 ; OPT: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
97 ; TO: @M = constant i64* inttoptr (i64 8 to i64*)
98 ; TO: @N = constant i64* inttoptr (i64 8 to i64*)
99 ; TO: @O = constant i64* inttoptr (i64 8 to i64*)
101 @M = constant i64* getelementptr (i64* null, i32 1)
102 @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
103 @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
105 ; Fold GEP of a GEP. Very simple cases are folded without targetdata.
107 ; PLAIN: @Y = global [3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 2)
108 ; PLAIN: @Z = global i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
109 ; OPT: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }]* @ext, i64 2)
110 ; OPT: @Z = global i32* getelementptr (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
111 ; TO: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }]* @ext, i64 2)
112 ; TO: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
114 @ext = external global [3 x { i32, i32 }]
115 @Y = global [3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 1), i64 1)
116 @Z = global i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
118 ; Duplicate all of the above as function return values rather than
119 ; global initializers.
121 ; PLAIN: define i8* @goo8() #0 {
122 ; PLAIN: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
125 ; PLAIN: define i1* @goo1() #0 {
126 ; PLAIN: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
129 ; PLAIN: define i8* @foo8() #0 {
130 ; PLAIN: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
133 ; PLAIN: define i1* @foo1() #0 {
134 ; PLAIN: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
137 ; PLAIN: define i8* @hoo8() #0 {
138 ; PLAIN: %t = bitcast i8* getelementptr (i8* null, i32 -1) to i8*
141 ; PLAIN: define i1* @hoo1() #0 {
142 ; PLAIN: %t = bitcast i1* getelementptr (i1* null, i32 -1) to i1*
145 ; OPT: define i8* @goo8() #0 {
146 ; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
148 ; OPT: define i1* @goo1() #0 {
149 ; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
151 ; OPT: define i8* @foo8() #0 {
152 ; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
154 ; OPT: define i1* @foo1() #0 {
155 ; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
157 ; OPT: define i8* @hoo8() #0 {
158 ; OPT: ret i8* getelementptr (i8* null, i32 -1)
160 ; OPT: define i1* @hoo1() #0 {
161 ; OPT: ret i1* getelementptr (i1* null, i32 -1)
163 ; TO: define i8* @goo8() #0 {
166 ; TO: define i1* @goo1() #0 {
169 ; TO: define i8* @foo8() #0 {
170 ; TO: ret i8* inttoptr (i64 -1 to i8*)
172 ; TO: define i1* @foo1() #0 {
173 ; TO: ret i1* inttoptr (i64 -1 to i1*)
175 ; TO: define i8* @hoo8() #0 {
176 ; TO: ret i8* inttoptr (i64 -1 to i8*)
178 ; TO: define i1* @hoo1() #0 {
179 ; TO: ret i1* inttoptr (i64 -1 to i1*)
181 ; SCEV: Classifying expressions for: @goo8
182 ; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
183 ; SCEV: --> ((-1 * sizeof(i8)) + inttoptr (i32 1 to i8*))
184 ; SCEV: Classifying expressions for: @goo1
185 ; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
186 ; SCEV: --> ((-1 * sizeof(i1)) + inttoptr (i32 1 to i1*))
187 ; SCEV: Classifying expressions for: @foo8
188 ; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
189 ; SCEV: --> ((-2 * sizeof(i8)) + inttoptr (i32 1 to i8*))
190 ; SCEV: Classifying expressions for: @foo1
191 ; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
192 ; SCEV: --> ((-2 * sizeof(i1)) + inttoptr (i32 1 to i1*))
193 ; SCEV: Classifying expressions for: @hoo8
194 ; SCEV: --> (-1 * sizeof(i8))
195 ; SCEV: Classifying expressions for: @hoo1
196 ; SCEV: --> (-1 * sizeof(i1))
198 define i8* @goo8() nounwind {
199 %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
202 define i1* @goo1() nounwind {
203 %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
206 define i8* @foo8() nounwind {
207 %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
210 define i1* @foo1() nounwind {
211 %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
214 define i8* @hoo8() nounwind {
215 %t = bitcast i8* getelementptr (i8* inttoptr (i32 0 to i8*), i32 -1) to i8*
218 define i1* @hoo1() nounwind {
219 %t = bitcast i1* getelementptr (i1* inttoptr (i32 0 to i1*), i32 -1) to i1*
223 ; PLAIN: define i64 @fa() #0 {
224 ; PLAIN: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
227 ; PLAIN: define i64 @fb() #0 {
228 ; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
231 ; PLAIN: define i64 @fc() #0 {
232 ; PLAIN: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
235 ; PLAIN: define i64 @fd() #0 {
236 ; PLAIN: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
239 ; PLAIN: define i64 @fe() #0 {
240 ; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
243 ; PLAIN: define i64 @ff() #0 {
244 ; PLAIN: %t = bitcast i64 1 to i64
247 ; PLAIN: define i64 @fg() #0 {
248 ; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
251 ; PLAIN: define i64 @fh() #0 {
252 ; PLAIN: %t = bitcast i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64) to i64
255 ; PLAIN: define i64 @fi() #0 {
256 ; PLAIN: %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64) to i64
259 ; OPT: define i64 @fa() #0 {
260 ; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
262 ; OPT: define i64 @fb() #0 {
263 ; OPT: ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
265 ; OPT: define i64 @fc() #0 {
266 ; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
268 ; OPT: define i64 @fd() #0 {
269 ; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
271 ; OPT: define i64 @fe() #0 {
272 ; OPT: ret i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
274 ; OPT: define i64 @ff() #0 {
277 ; OPT: define i64 @fg() #0 {
278 ; OPT: ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
280 ; OPT: define i64 @fh() #0 {
281 ; OPT: ret i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
283 ; OPT: define i64 @fi() #0 {
284 ; OPT: ret i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
286 ; TO: define i64 @fa() #0 {
289 ; TO: define i64 @fb() #0 {
292 ; TO: define i64 @fc() #0 {
295 ; TO: define i64 @fd() #0 {
298 ; TO: define i64 @fe() #0 {
301 ; TO: define i64 @ff() #0 {
304 ; TO: define i64 @fg() #0 {
307 ; TO: define i64 @fh() #0 {
310 ; TO: define i64 @fi() #0 {
313 ; SCEV: Classifying expressions for: @fa
314 ; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
315 ; SCEV: --> (2310 * sizeof(double))
316 ; SCEV: Classifying expressions for: @fb
317 ; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
318 ; SCEV: --> alignof(double)
319 ; SCEV: Classifying expressions for: @fc
320 ; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
321 ; SCEV: --> (2 * sizeof(double))
322 ; SCEV: Classifying expressions for: @fd
323 ; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
324 ; SCEV: --> (11 * sizeof(double))
325 ; SCEV: Classifying expressions for: @fe
326 ; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
327 ; SCEV: --> offsetof({ double, float, double, double }, 2)
328 ; SCEV: Classifying expressions for: @ff
329 ; SCEV: %t = bitcast i64 1 to i64
331 ; SCEV: Classifying expressions for: @fg
332 ; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
333 ; SCEV: --> alignof(double)
334 ; SCEV: Classifying expressions for: @fh
335 ; SCEV: %t = bitcast i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64) to i64
336 ; SCEV: --> sizeof(i1*)
337 ; SCEV: Classifying expressions for: @fi
338 ; SCEV: %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64) to i64
339 ; SCEV: --> alignof(i1*)
341 define i64 @fa() nounwind {
342 %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64
345 define i64 @fb() nounwind {
346 %t = bitcast i64 ptrtoint ([13 x double]* getelementptr ({i1, [13 x double]}* null, i64 0, i32 1) to i64) to i64
349 define i64 @fc() nounwind {
350 %t = bitcast i64 ptrtoint (double* getelementptr ({double, double, double, double}* null, i64 0, i32 2) to i64) to i64
353 define i64 @fd() nounwind {
354 %t = bitcast i64 ptrtoint (double* getelementptr ([13 x double]* null, i64 0, i32 11) to i64) to i64
357 define i64 @fe() nounwind {
358 %t = bitcast i64 ptrtoint (double* getelementptr ({double, float, double, double}* null, i64 0, i32 2) to i64) to i64
361 define i64 @ff() nounwind {
362 %t = bitcast i64 ptrtoint (<{ i16, i128 }>* getelementptr ({i1, <{ i16, i128 }>}* null, i64 0, i32 1) to i64) to i64
365 define i64 @fg() nounwind {
366 %t = bitcast i64 ptrtoint ({double, double}* getelementptr ({i1, {double, double}}* null, i64 0, i32 1) to i64) to i64
369 define i64 @fh() nounwind {
370 %t = bitcast i64 ptrtoint (double** getelementptr (double** null, i32 1) to i64) to i64
373 define i64 @fi() nounwind {
374 %t = bitcast i64 ptrtoint (double** getelementptr ({i1, double*}* null, i64 0, i32 1) to i64) to i64
378 ; PLAIN: define i64* @fM() #0 {
379 ; PLAIN: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
382 ; PLAIN: define i64* @fN() #0 {
383 ; PLAIN: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
386 ; PLAIN: define i64* @fO() #0 {
387 ; PLAIN: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
390 ; OPT: define i64* @fM() #0 {
391 ; OPT: ret i64* getelementptr (i64* null, i32 1)
393 ; OPT: define i64* @fN() #0 {
394 ; OPT: ret i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
396 ; OPT: define i64* @fO() #0 {
397 ; OPT: ret i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
399 ; TO: define i64* @fM() #0 {
400 ; TO: ret i64* inttoptr (i64 8 to i64*)
402 ; TO: define i64* @fN() #0 {
403 ; TO: ret i64* inttoptr (i64 8 to i64*)
405 ; TO: define i64* @fO() #0 {
406 ; TO: ret i64* inttoptr (i64 8 to i64*)
408 ; SCEV: Classifying expressions for: @fM
409 ; SCEV: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
410 ; SCEV: --> sizeof(i64)
411 ; SCEV: Classifying expressions for: @fN
412 ; SCEV: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
413 ; SCEV: --> sizeof(i64)
414 ; SCEV: Classifying expressions for: @fO
415 ; SCEV: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
416 ; SCEV: --> sizeof(i64)
418 define i64* @fM() nounwind {
419 %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
422 define i64* @fN() nounwind {
423 %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
426 define i64* @fO() nounwind {
427 %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
431 ; PLAIN: define i32* @fZ() #0 {
432 ; PLAIN: %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
435 ; OPT: define i32* @fZ() #0 {
436 ; OPT: ret i32* getelementptr (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
438 ; TO: define i32* @fZ() #0 {
439 ; TO: ret i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
441 ; SCEV: Classifying expressions for: @fZ
442 ; SCEV: %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
443 ; SCEV: --> ((3 * sizeof(i32)) + @ext)
445 define i32* @fZ() nounwind {
446 %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
450 ; PR15262 - Check GEP folding with casts between address spaces.
452 @p0 = global [4 x i8] zeroinitializer, align 1
453 @p12 = addrspace(12) global [4 x i8] zeroinitializer, align 1
455 define i8* @different_addrspace() nounwind noinline {
456 ; OPT: different_addrspace
457 %p = getelementptr inbounds i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*),
460 ; OPT: ret i8* getelementptr (i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*), i32 2)
463 define i8* @same_addrspace() nounwind noinline {
464 ; OPT: same_addrspace
465 %p = getelementptr inbounds i8* bitcast ([4 x i8] * @p0 to i8*), i32 2
467 ; OPT: ret i8* getelementptr inbounds ([4 x i8]* @p0, i32 0, i32 2)
470 ; CHECK: attributes #0 = { nounwind }