define i8 @atomic8_load_unordered(i8* %a) nounwind uwtable {
entry:
- %0 = load atomic i8* %a unordered, align 1
+ %0 = load atomic i8, i8* %a unordered, align 1
ret i8 %0
}
; CHECK: atomic8_load_unordered
define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable {
entry:
- %0 = load atomic i8* %a monotonic, align 1
+ %0 = load atomic i8, i8* %a monotonic, align 1
ret i8 %0
}
; CHECK: atomic8_load_monotonic
define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable {
entry:
- %0 = load atomic i8* %a acquire, align 1
+ %0 = load atomic i8, i8* %a acquire, align 1
ret i8 %0
}
; CHECK: atomic8_load_acquire
define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable {
entry:
- %0 = load atomic i8* %a seq_cst, align 1
+ %0 = load atomic i8, i8* %a seq_cst, align 1
ret i8 %0
}
; CHECK: atomic8_load_seq_cst
define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
entry:
- %0 = load atomic i16* %a unordered, align 2
+ %0 = load atomic i16, i16* %a unordered, align 2
ret i16 %0
}
; CHECK: atomic16_load_unordered
define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable {
entry:
- %0 = load atomic i16* %a monotonic, align 2
+ %0 = load atomic i16, i16* %a monotonic, align 2
ret i16 %0
}
; CHECK: atomic16_load_monotonic
define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable {
entry:
- %0 = load atomic i16* %a acquire, align 2
+ %0 = load atomic i16, i16* %a acquire, align 2
ret i16 %0
}
; CHECK: atomic16_load_acquire
define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable {
entry:
- %0 = load atomic i16* %a seq_cst, align 2
+ %0 = load atomic i16, i16* %a seq_cst, align 2
ret i16 %0
}
; CHECK: atomic16_load_seq_cst
define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
entry:
- %0 = load atomic i32* %a unordered, align 4
+ %0 = load atomic i32, i32* %a unordered, align 4
ret i32 %0
}
; CHECK: atomic32_load_unordered
define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
entry:
- %0 = load atomic i32* %a monotonic, align 4
+ %0 = load atomic i32, i32* %a monotonic, align 4
ret i32 %0
}
; CHECK: atomic32_load_monotonic
define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
entry:
- %0 = load atomic i32* %a acquire, align 4
+ %0 = load atomic i32, i32* %a acquire, align 4
ret i32 %0
}
; CHECK: atomic32_load_acquire
define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
entry:
- %0 = load atomic i32* %a seq_cst, align 4
+ %0 = load atomic i32, i32* %a seq_cst, align 4
ret i32 %0
}
; CHECK: atomic32_load_seq_cst
define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
entry:
- %0 = load atomic i64* %a unordered, align 8
+ %0 = load atomic i64, i64* %a unordered, align 8
ret i64 %0
}
; CHECK: atomic64_load_unordered
define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
entry:
- %0 = load atomic i64* %a monotonic, align 8
+ %0 = load atomic i64, i64* %a monotonic, align 8
ret i64 %0
}
; CHECK: atomic64_load_monotonic
define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
entry:
- %0 = load atomic i64* %a acquire, align 8
+ %0 = load atomic i64, i64* %a acquire, align 8
ret i64 %0
}
; CHECK: atomic64_load_acquire
define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
entry:
- %0 = load atomic i64* %a seq_cst, align 8
+ %0 = load atomic i64, i64* %a seq_cst, align 8
ret i64 %0
}
; CHECK: atomic64_load_seq_cst
define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
entry:
- %0 = load atomic i128* %a unordered, align 16
+ %0 = load atomic i128, i128* %a unordered, align 16
ret i128 %0
}
; CHECK: atomic128_load_unordered
define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
entry:
- %0 = load atomic i128* %a monotonic, align 16
+ %0 = load atomic i128, i128* %a monotonic, align 16
ret i128 %0
}
; CHECK: atomic128_load_monotonic
define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
entry:
- %0 = load atomic i128* %a acquire, align 16
+ %0 = load atomic i128, i128* %a acquire, align 16
ret i128 %0
}
; CHECK: atomic128_load_acquire
define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
entry:
- %0 = load atomic i128* %a seq_cst, align 16
+ %0 = load atomic i128, i128* %a seq_cst, align 16
ret i128 %0
}
; CHECK: atomic128_load_seq_cst