1 ; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s
3 ; Note: This test is testing that the lowering for atomics matches what we
4 ; currently emit for non-atomics + the atomic restriction. The presence of
5 ; particular lowering detail in these tests should not be read as requiring
6 ; that detail for correctness unless it's related to the atomicity itself.
7 ; (Specifically, there were reviewer questions about the lowering for halfs
8 ; and their calling convention which remain unresolved.)
10 define void @store_half(half* %fptr, half %v) {
11 ; CHECK-LABEL: @store_half
12 ; CHECK: movq %rdi, %rbx
13 ; CHECK: callq __gnu_f2h_ieee
14 ; CHECK: movw %ax, (%rbx)
15 store atomic half %v, half* %fptr unordered, align 2
19 define void @store_float(float* %fptr, float %v) {
20 ; CHECK-LABEL: @store_float
21 ; CHECK: movd %xmm0, %eax
22 ; CHECK: movl %eax, (%rdi)
23 store atomic float %v, float* %fptr unordered, align 4
27 define void @store_double(double* %fptr, double %v) {
28 ; CHECK-LABEL: @store_double
29 ; CHECK: movd %xmm0, %rax
30 ; CHECK: movq %rax, (%rdi)
31 store atomic double %v, double* %fptr unordered, align 8
35 define void @store_fp128(fp128* %fptr, fp128 %v) {
36 ; CHECK-LABEL: @store_fp128
37 ; CHECK: callq __sync_lock_test_and_set_16
38 store atomic fp128 %v, fp128* %fptr unordered, align 16
42 define half @load_half(half* %fptr) {
43 ; CHECK-LABEL: @load_half
44 ; CHECK: movw (%rdi), %ax
45 ; CHECK: movzwl %ax, %edi
46 ; CHECK: jmp __gnu_h2f_ieee
47 %v = load atomic half, half* %fptr unordered, align 2
51 define float @load_float(float* %fptr) {
52 ; CHECK-LABEL: @load_float
53 ; CHECK: movl (%rdi), %eax
54 ; CHECK: movd %eax, %xmm0
55 %v = load atomic float, float* %fptr unordered, align 4
59 define double @load_double(double* %fptr) {
60 ; CHECK-LABEL: @load_double
61 ; CHECK: movq (%rdi), %rax
62 ; CHECK: movd %rax, %xmm0
63 %v = load atomic double, double* %fptr unordered, align 8
67 define fp128 @load_fp128(fp128* %fptr) {
68 ; CHECK-LABEL: @load_fp128
69 ; CHECK: callq __sync_val_compare_and_swap_16
70 %v = load atomic fp128, fp128* %fptr unordered, align 16
75 ; sanity check the seq_cst lowering since that's the
76 ; interesting one from an ordering perspective on x86.
78 define void @store_float_seq_cst(float* %fptr, float %v) {
79 ; CHECK-LABEL: @store_float_seq_cst
80 ; CHECK: movd %xmm0, %eax
81 ; CHECK: xchgl %eax, (%rdi)
82 store atomic float %v, float* %fptr seq_cst, align 4
86 define void @store_double_seq_cst(double* %fptr, double %v) {
87 ; CHECK-LABEL: @store_double_seq_cst
88 ; CHECK: movd %xmm0, %rax
89 ; CHECK: xchgq %rax, (%rdi)
90 store atomic double %v, double* %fptr seq_cst, align 8
94 define float @load_float_seq_cst(float* %fptr) {
95 ; CHECK-LABEL: @load_float_seq_cst
96 ; CHECK: movl (%rdi), %eax
97 ; CHECK: movd %eax, %xmm0
98 %v = load atomic float, float* %fptr seq_cst, align 4
102 define double @load_double_seq_cst(double* %fptr) {
103 ; CHECK-LABEL: @load_double_seq_cst
104 ; CHECK: movq (%rdi), %rax
105 ; CHECK: movd %rax, %xmm0
106 %v = load atomic double, double* %fptr seq_cst, align 8