From 169b5ed46e969ad2b04efeac54bd24a316c14e05 Mon Sep 17 00:00:00 2001 From: Dale Johannesen Date: Fri, 22 Aug 2008 22:39:21 +0000 Subject: [PATCH] Test all currently supported atomic builtins on x86-{32,64}. These just test that they go through the BE. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55208 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/Atomics-32.ll | 818 +++++++++++++++++++++++++ test/CodeGen/X86/Atomics-64.ll | 1015 ++++++++++++++++++++++++++++++++ 2 files changed, 1833 insertions(+) create mode 100644 test/CodeGen/X86/Atomics-32.ll create mode 100644 test/CodeGen/X86/Atomics-64.ll diff --git a/test/CodeGen/X86/Atomics-32.ll b/test/CodeGen/X86/Atomics-32.ll new file mode 100644 index 00000000000..66db810cce5 --- /dev/null +++ b/test/CodeGen/X86/Atomics-32.ll @@ -0,0 +1,818 @@ +; RUN: llvm-as < %s | llc -march=x86 +;; Note the 64-bit variants are not supported yet (in 32-bit mode). +; ModuleID = 'Atomics.c' +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" +target triple = "i386-apple-darwin8" +@sc = common global i8 0 ; [#uses=52] +@uc = common global i8 0 ; [#uses=100] +@ss = common global i16 0 ; [#uses=15] +@us = common global i16 0 ; [#uses=15] +@si = common global i32 0 ; [#uses=15] +@ui = common global i32 0 ; [#uses=23] +@sl = common global i32 0 ; [#uses=15] +@ul = common global i32 0 ; [#uses=15] + +define void @test_op_ignore() nounwind { +entry: + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=0] + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=0] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :10 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; :11 [#uses=0] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :12 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; :13 [#uses=0] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; :14 [#uses=0] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; :15 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :16 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; :17 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; :19 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :20 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; :21 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :22 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; :23 [#uses=0] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :24 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; :25 [#uses=0] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :26 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; :27 [#uses=0] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; :28 [#uses=0] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; :29 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :30 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; :31 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :32 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; :33 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :34 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; :35 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :36 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; :37 [#uses=0] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :38 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; :39 [#uses=0] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :40 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; :41 [#uses=0] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; :42 [#uses=0] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; :43 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; :45 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :46 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; :47 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :48 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; :49 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :50 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; :51 [#uses=0] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :52 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; :53 [#uses=0] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :54 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; :55 [#uses=0] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; :56 [#uses=0] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; :57 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :58 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; :59 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :60 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; :61 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :62 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; :63 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :64 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; :65 [#uses=0] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :66 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; :67 [#uses=0] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :68 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; :69 [#uses=0] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; :70 [#uses=0] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; :71 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :72 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; :73 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :74 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; :75 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; :77 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :78 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; :79 [#uses=0] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :80 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; :81 [#uses=0] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :82 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; :83 [#uses=0] + br label %return + +return: ; preds = %entry + ret void +} + +declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind + +declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind + +declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind + +declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind + +declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind + +declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind + +define void @test_fetch_and_op() nounwind { +entry: + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; :0 [#uses=1] + store i8 %0, i8* @sc, align 1 + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; :1 [#uses=1] + store i8 %1, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; :3 [#uses=1] + store i16 %3, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; :5 [#uses=1] + store i16 %5, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; :7 [#uses=1] + store i32 %7, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; :9 [#uses=1] + store i32 %9, i32* @ui, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :10 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; :11 [#uses=1] + store i32 %11, i32* @sl, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :12 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; :13 [#uses=1] + store i32 %13, i32* @ul, align 4 + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; :14 [#uses=1] + store i8 %14, i8* @sc, align 1 + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; :15 [#uses=1] + store i8 %15, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :16 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; :17 [#uses=1] + store i16 %17, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; :19 [#uses=1] + store i16 %19, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :20 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; :21 [#uses=1] + store i32 %21, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :22 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; :23 [#uses=1] + store i32 %23, i32* @ui, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :24 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; :25 [#uses=1] + store i32 %25, i32* @sl, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :26 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; :27 [#uses=1] + store i32 %27, i32* @ul, align 4 + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; :28 [#uses=1] + store i8 %28, i8* @sc, align 1 + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; :29 [#uses=1] + store i8 %29, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :30 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; :31 [#uses=1] + store i16 %31, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :32 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; :33 [#uses=1] + store i16 %33, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :34 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; :35 [#uses=1] + store i32 %35, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :36 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; :37 [#uses=1] + store i32 %37, i32* @ui, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :38 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; :39 [#uses=1] + store i32 %39, i32* @sl, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :40 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; :41 [#uses=1] + store i32 %41, i32* @ul, align 4 + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; :42 [#uses=1] + store i8 %42, i8* @sc, align 1 + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; :43 [#uses=1] + store i8 %43, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; :45 [#uses=1] + store i16 %45, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :46 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; :47 [#uses=1] + store i16 %47, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :48 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; :49 [#uses=1] + store i32 %49, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :50 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; :51 [#uses=1] + store i32 %51, i32* @ui, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :52 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; :53 [#uses=1] + store i32 %53, i32* @sl, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :54 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; :55 [#uses=1] + store i32 %55, i32* @ul, align 4 + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; :56 [#uses=1] + store i8 %56, i8* @sc, align 1 + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; :57 [#uses=1] + store i8 %57, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :58 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; :59 [#uses=1] + store i16 %59, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :60 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; :61 [#uses=1] + store i16 %61, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :62 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; :63 [#uses=1] + store i32 %63, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :64 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; :65 [#uses=1] + store i32 %65, i32* @ui, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :66 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; :67 [#uses=1] + store i32 %67, i32* @sl, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :68 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; :69 [#uses=1] + store i32 %69, i32* @ul, align 4 + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; :70 [#uses=1] + store i8 %70, i8* @sc, align 1 + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; :71 [#uses=1] + store i8 %71, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :72 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; :73 [#uses=1] + store i16 %73, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :74 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; :75 [#uses=1] + store i16 %75, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; :77 [#uses=1] + store i32 %77, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :78 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; :79 [#uses=1] + store i32 %79, i32* @ui, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :80 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; :81 [#uses=1] + store i32 %81, i32* @sl, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :82 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; :83 [#uses=1] + store i32 %83, i32* @ul, align 4 + br label %return + +return: ; preds = %entry + ret void +} + +define void @test_op_and_fetch() nounwind { +entry: + load i8* @uc, align 1 ; :0 [#uses=1] + zext i8 %0 to i32 ; :1 [#uses=1] + trunc i32 %1 to i8 ; :2 [#uses=2] + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; :3 [#uses=1] + add i8 %3, %2 ; :4 [#uses=1] + store i8 %4, i8* @sc, align 1 + load i8* @uc, align 1 ; :5 [#uses=1] + zext i8 %5 to i32 ; :6 [#uses=1] + trunc i32 %6 to i8 ; :7 [#uses=2] + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; :8 [#uses=1] + add i8 %8, %7 ; :9 [#uses=1] + store i8 %9, i8* @uc, align 1 + load i8* @uc, align 1 ; :10 [#uses=1] + zext i8 %10 to i32 ; :11 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :12 [#uses=1] + trunc i32 %11 to i16 ; :13 [#uses=2] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; :14 [#uses=1] + add i16 %14, %13 ; :15 [#uses=1] + store i16 %15, i16* @ss, align 2 + load i8* @uc, align 1 ; :16 [#uses=1] + zext i8 %16 to i32 ; :17 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1] + trunc i32 %17 to i16 ; :19 [#uses=2] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; :20 [#uses=1] + add i16 %20, %19 ; :21 [#uses=1] + store i16 %21, i16* @us, align 2 + load i8* @uc, align 1 ; :22 [#uses=1] + zext i8 %22 to i32 ; :23 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :24 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; :25 [#uses=1] + add i32 %25, %23 ; :26 [#uses=1] + store i32 %26, i32* @si, align 4 + load i8* @uc, align 1 ; :27 [#uses=1] + zext i8 %27 to i32 ; :28 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :29 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; :30 [#uses=1] + add i32 %30, %28 ; :31 [#uses=1] + store i32 %31, i32* @ui, align 4 + load i8* @uc, align 1 ; :32 [#uses=1] + zext i8 %32 to i32 ; :33 [#uses=2] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :34 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %34, i32 %33 ) ; :35 [#uses=1] + add i32 %35, %33 ; :36 [#uses=1] + store i32 %36, i32* @sl, align 4 + load i8* @uc, align 1 ; :37 [#uses=1] + zext i8 %37 to i32 ; :38 [#uses=2] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :39 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %39, i32 %38 ) ; :40 [#uses=1] + add i32 %40, %38 ; :41 [#uses=1] + store i32 %41, i32* @ul, align 4 + load i8* @uc, align 1 ; :42 [#uses=1] + zext i8 %42 to i32 ; :43 [#uses=1] + trunc i32 %43 to i8 ; :44 [#uses=2] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %44 ) ; :45 [#uses=1] + sub i8 %45, %44 ; :46 [#uses=1] + store i8 %46, i8* @sc, align 1 + load i8* @uc, align 1 ; :47 [#uses=1] + zext i8 %47 to i32 ; :48 [#uses=1] + trunc i32 %48 to i8 ; :49 [#uses=2] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %49 ) ; :50 [#uses=1] + sub i8 %50, %49 ; :51 [#uses=1] + store i8 %51, i8* @uc, align 1 + load i8* @uc, align 1 ; :52 [#uses=1] + zext i8 %52 to i32 ; :53 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :54 [#uses=1] + trunc i32 %53 to i16 ; :55 [#uses=2] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %54, i16 %55 ) ; :56 [#uses=1] + sub i16 %56, %55 ; :57 [#uses=1] + store i16 %57, i16* @ss, align 2 + load i8* @uc, align 1 ; :58 [#uses=1] + zext i8 %58 to i32 ; :59 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :60 [#uses=1] + trunc i32 %59 to i16 ; :61 [#uses=2] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %60, i16 %61 ) ; :62 [#uses=1] + sub i16 %62, %61 ; :63 [#uses=1] + store i16 %63, i16* @us, align 2 + load i8* @uc, align 1 ; :64 [#uses=1] + zext i8 %64 to i32 ; :65 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :66 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %66, i32 %65 ) ; :67 [#uses=1] + sub i32 %67, %65 ; :68 [#uses=1] + store i32 %68, i32* @si, align 4 + load i8* @uc, align 1 ; :69 [#uses=1] + zext i8 %69 to i32 ; :70 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :71 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %71, i32 %70 ) ; :72 [#uses=1] + sub i32 %72, %70 ; :73 [#uses=1] + store i32 %73, i32* @ui, align 4 + load i8* @uc, align 1 ; :74 [#uses=1] + zext i8 %74 to i32 ; :75 [#uses=2] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :76 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; :77 [#uses=1] + sub i32 %77, %75 ; :78 [#uses=1] + store i32 %78, i32* @sl, align 4 + load i8* @uc, align 1 ; :79 [#uses=1] + zext i8 %79 to i32 ; :80 [#uses=2] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :81 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; :82 [#uses=1] + sub i32 %82, %80 ; :83 [#uses=1] + store i32 %83, i32* @ul, align 4 + load i8* @uc, align 1 ; :84 [#uses=1] + zext i8 %84 to i32 ; :85 [#uses=1] + trunc i32 %85 to i8 ; :86 [#uses=2] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %86 ) ; :87 [#uses=1] + or i8 %87, %86 ; :88 [#uses=1] + store i8 %88, i8* @sc, align 1 + load i8* @uc, align 1 ; :89 [#uses=1] + zext i8 %89 to i32 ; :90 [#uses=1] + trunc i32 %90 to i8 ; :91 [#uses=2] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %91 ) ; :92 [#uses=1] + or i8 %92, %91 ; :93 [#uses=1] + store i8 %93, i8* @uc, align 1 + load i8* @uc, align 1 ; :94 [#uses=1] + zext i8 %94 to i32 ; :95 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :96 [#uses=1] + trunc i32 %95 to i16 ; :97 [#uses=2] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %96, i16 %97 ) ; :98 [#uses=1] + or i16 %98, %97 ; :99 [#uses=1] + store i16 %99, i16* @ss, align 2 + load i8* @uc, align 1 ; :100 [#uses=1] + zext i8 %100 to i32 ; :101 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :102 [#uses=1] + trunc i32 %101 to i16 ; :103 [#uses=2] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %102, i16 %103 ) ; :104 [#uses=1] + or i16 %104, %103 ; :105 [#uses=1] + store i16 %105, i16* @us, align 2 + load i8* @uc, align 1 ; :106 [#uses=1] + zext i8 %106 to i32 ; :107 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :108 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %108, i32 %107 ) ; :109 [#uses=1] + or i32 %109, %107 ; :110 [#uses=1] + store i32 %110, i32* @si, align 4 + load i8* @uc, align 1 ; :111 [#uses=1] + zext i8 %111 to i32 ; :112 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :113 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %113, i32 %112 ) ; :114 [#uses=1] + or i32 %114, %112 ; :115 [#uses=1] + store i32 %115, i32* @ui, align 4 + load i8* @uc, align 1 ; :116 [#uses=1] + zext i8 %116 to i32 ; :117 [#uses=2] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :118 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %118, i32 %117 ) ; :119 [#uses=1] + or i32 %119, %117 ; :120 [#uses=1] + store i32 %120, i32* @sl, align 4 + load i8* @uc, align 1 ; :121 [#uses=1] + zext i8 %121 to i32 ; :122 [#uses=2] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :123 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %123, i32 %122 ) ; :124 [#uses=1] + or i32 %124, %122 ; :125 [#uses=1] + store i32 %125, i32* @ul, align 4 + load i8* @uc, align 1 ; :126 [#uses=1] + zext i8 %126 to i32 ; :127 [#uses=1] + trunc i32 %127 to i8 ; :128 [#uses=2] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %128 ) ; :129 [#uses=1] + xor i8 %129, %128 ; :130 [#uses=1] + store i8 %130, i8* @sc, align 1 + load i8* @uc, align 1 ; :131 [#uses=1] + zext i8 %131 to i32 ; :132 [#uses=1] + trunc i32 %132 to i8 ; :133 [#uses=2] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %133 ) ; :134 [#uses=1] + xor i8 %134, %133 ; :135 [#uses=1] + store i8 %135, i8* @uc, align 1 + load i8* @uc, align 1 ; :136 [#uses=1] + zext i8 %136 to i32 ; :137 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :138 [#uses=1] + trunc i32 %137 to i16 ; :139 [#uses=2] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %138, i16 %139 ) ; :140 [#uses=1] + xor i16 %140, %139 ; :141 [#uses=1] + store i16 %141, i16* @ss, align 2 + load i8* @uc, align 1 ; :142 [#uses=1] + zext i8 %142 to i32 ; :143 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :144 [#uses=1] + trunc i32 %143 to i16 ; :145 [#uses=2] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %144, i16 %145 ) ; :146 [#uses=1] + xor i16 %146, %145 ; :147 [#uses=1] + store i16 %147, i16* @us, align 2 + load i8* @uc, align 1 ; :148 [#uses=1] + zext i8 %148 to i32 ; :149 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :150 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %150, i32 %149 ) ; :151 [#uses=1] + xor i32 %151, %149 ; :152 [#uses=1] + store i32 %152, i32* @si, align 4 + load i8* @uc, align 1 ; :153 [#uses=1] + zext i8 %153 to i32 ; :154 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :155 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %155, i32 %154 ) ; :156 [#uses=1] + xor i32 %156, %154 ; :157 [#uses=1] + store i32 %157, i32* @ui, align 4 + load i8* @uc, align 1 ; :158 [#uses=1] + zext i8 %158 to i32 ; :159 [#uses=2] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :160 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %160, i32 %159 ) ; :161 [#uses=1] + xor i32 %161, %159 ; :162 [#uses=1] + store i32 %162, i32* @sl, align 4 + load i8* @uc, align 1 ; :163 [#uses=1] + zext i8 %163 to i32 ; :164 [#uses=2] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :165 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %165, i32 %164 ) ; :166 [#uses=1] + xor i32 %166, %164 ; :167 [#uses=1] + store i32 %167, i32* @ul, align 4 + load i8* @uc, align 1 ; :168 [#uses=1] + zext i8 %168 to i32 ; :169 [#uses=1] + trunc i32 %169 to i8 ; :170 [#uses=2] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %170 ) ; :171 [#uses=1] + and i8 %171, %170 ; :172 [#uses=1] + store i8 %172, i8* @sc, align 1 + load i8* @uc, align 1 ; :173 [#uses=1] + zext i8 %173 to i32 ; :174 [#uses=1] + trunc i32 %174 to i8 ; :175 [#uses=2] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %175 ) ; :176 [#uses=1] + and i8 %176, %175 ; :177 [#uses=1] + store i8 %177, i8* @uc, align 1 + load i8* @uc, align 1 ; :178 [#uses=1] + zext i8 %178 to i32 ; :179 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :180 [#uses=1] + trunc i32 %179 to i16 ; :181 [#uses=2] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %180, i16 %181 ) ; :182 [#uses=1] + and i16 %182, %181 ; :183 [#uses=1] + store i16 %183, i16* @ss, align 2 + load i8* @uc, align 1 ; :184 [#uses=1] + zext i8 %184 to i32 ; :185 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :186 [#uses=1] + trunc i32 %185 to i16 ; :187 [#uses=2] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %186, i16 %187 ) ; :188 [#uses=1] + and i16 %188, %187 ; :189 [#uses=1] + store i16 %189, i16* @us, align 2 + load i8* @uc, align 1 ; :190 [#uses=1] + zext i8 %190 to i32 ; :191 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :192 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %192, i32 %191 ) ; :193 [#uses=1] + and i32 %193, %191 ; :194 [#uses=1] + store i32 %194, i32* @si, align 4 + load i8* @uc, align 1 ; :195 [#uses=1] + zext i8 %195 to i32 ; :196 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :197 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %197, i32 %196 ) ; :198 [#uses=1] + and i32 %198, %196 ; :199 [#uses=1] + store i32 %199, i32* @ui, align 4 + load i8* @uc, align 1 ; :200 [#uses=1] + zext i8 %200 to i32 ; :201 [#uses=2] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :202 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %202, i32 %201 ) ; :203 [#uses=1] + and i32 %203, %201 ; :204 [#uses=1] + store i32 %204, i32* @sl, align 4 + load i8* @uc, align 1 ; :205 [#uses=1] + zext i8 %205 to i32 ; :206 [#uses=2] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :207 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %207, i32 %206 ) ; :208 [#uses=1] + and i32 %208, %206 ; :209 [#uses=1] + store i32 %209, i32* @ul, align 4 + load i8* @uc, align 1 ; :210 [#uses=1] + zext i8 %210 to i32 ; :211 [#uses=1] + trunc i32 %211 to i8 ; :212 [#uses=2] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %212 ) ; :213 [#uses=1] + xor i8 %213, -1 ; :214 [#uses=1] + and i8 %214, %212 ; :215 [#uses=1] + store i8 %215, i8* @sc, align 1 + load i8* @uc, align 1 ; :216 [#uses=1] + zext i8 %216 to i32 ; :217 [#uses=1] + trunc i32 %217 to i8 ; :218 [#uses=2] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %218 ) ; :219 [#uses=1] + xor i8 %219, -1 ; :220 [#uses=1] + and i8 %220, %218 ; :221 [#uses=1] + store i8 %221, i8* @uc, align 1 + load i8* @uc, align 1 ; :222 [#uses=1] + zext i8 %222 to i32 ; :223 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :224 [#uses=1] + trunc i32 %223 to i16 ; :225 [#uses=2] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %224, i16 %225 ) ; :226 [#uses=1] + xor i16 %226, -1 ; :227 [#uses=1] + and i16 %227, %225 ; :228 [#uses=1] + store i16 %228, i16* @ss, align 2 + load i8* @uc, align 1 ; :229 [#uses=1] + zext i8 %229 to i32 ; :230 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :231 [#uses=1] + trunc i32 %230 to i16 ; :232 [#uses=2] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %231, i16 %232 ) ; :233 [#uses=1] + xor i16 %233, -1 ; :234 [#uses=1] + and i16 %234, %232 ; :235 [#uses=1] + store i16 %235, i16* @us, align 2 + load i8* @uc, align 1 ; :236 [#uses=1] + zext i8 %236 to i32 ; :237 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :238 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %238, i32 %237 ) ; :239 [#uses=1] + xor i32 %239, -1 ; :240 [#uses=1] + and i32 %240, %237 ; :241 [#uses=1] + store i32 %241, i32* @si, align 4 + load i8* @uc, align 1 ; :242 [#uses=1] + zext i8 %242 to i32 ; :243 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :244 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %244, i32 %243 ) ; :245 [#uses=1] + xor i32 %245, -1 ; :246 [#uses=1] + and i32 %246, %243 ; :247 [#uses=1] + store i32 %247, i32* @ui, align 4 + load i8* @uc, align 1 ; :248 [#uses=1] + zext i8 %248 to i32 ; :249 [#uses=2] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :250 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %250, i32 %249 ) ; :251 [#uses=1] + xor i32 %251, -1 ; :252 [#uses=1] + and i32 %252, %249 ; :253 [#uses=1] + store i32 %253, i32* @sl, align 4 + load i8* @uc, align 1 ; :254 [#uses=1] + zext i8 %254 to i32 ; :255 [#uses=2] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :256 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %256, i32 %255 ) ; :257 [#uses=1] + xor i32 %257, -1 ; :258 [#uses=1] + and i32 %258, %255 ; :259 [#uses=1] + store i32 %259, i32* @ul, align 4 + br label %return + +return: ; preds = %entry + ret void +} + +define void @test_compare_and_swap() nounwind { +entry: + load i8* @sc, align 1 ; :0 [#uses=1] + zext i8 %0 to i32 ; :1 [#uses=1] + load i8* @uc, align 1 ; :2 [#uses=1] + zext i8 %2 to i32 ; :3 [#uses=1] + trunc i32 %3 to i8 ; :4 [#uses=1] + trunc i32 %1 to i8 ; :5 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; :6 [#uses=1] + store i8 %6, i8* @sc, align 1 + load i8* @sc, align 1 ; :7 [#uses=1] + zext i8 %7 to i32 ; :8 [#uses=1] + load i8* @uc, align 1 ; :9 [#uses=1] + zext i8 %9 to i32 ; :10 [#uses=1] + trunc i32 %10 to i8 ; :11 [#uses=1] + trunc i32 %8 to i8 ; :12 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; :13 [#uses=1] + store i8 %13, i8* @uc, align 1 + load i8* @sc, align 1 ; :14 [#uses=1] + sext i8 %14 to i16 ; :15 [#uses=1] + zext i16 %15 to i32 ; :16 [#uses=1] + load i8* @uc, align 1 ; :17 [#uses=1] + zext i8 %17 to i32 ; :18 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :19 [#uses=1] + trunc i32 %18 to i16 ; :20 [#uses=1] + trunc i32 %16 to i16 ; :21 [#uses=1] + call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; :22 [#uses=1] + store i16 %22, i16* @ss, align 2 + load i8* @sc, align 1 ; :23 [#uses=1] + sext i8 %23 to i16 ; :24 [#uses=1] + zext i16 %24 to i32 ; :25 [#uses=1] + load i8* @uc, align 1 ; :26 [#uses=1] + zext i8 %26 to i32 ; :27 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :28 [#uses=1] + trunc i32 %27 to i16 ; :29 [#uses=1] + trunc i32 %25 to i16 ; :30 [#uses=1] + call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; :31 [#uses=1] + store i16 %31, i16* @us, align 2 + load i8* @sc, align 1 ; :32 [#uses=1] + sext i8 %32 to i32 ; :33 [#uses=1] + load i8* @uc, align 1 ; :34 [#uses=1] + zext i8 %34 to i32 ; :35 [#uses=1] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :36 [#uses=1] + call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; :37 [#uses=1] + store i32 %37, i32* @si, align 4 + load i8* @sc, align 1 ; :38 [#uses=1] + sext i8 %38 to i32 ; :39 [#uses=1] + load i8* @uc, align 1 ; :40 [#uses=1] + zext i8 %40 to i32 ; :41 [#uses=1] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :42 [#uses=1] + call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; :43 [#uses=1] + store i32 %43, i32* @ui, align 4 + load i8* @sc, align 1 ; :44 [#uses=1] + sext i8 %44 to i32 ; :45 [#uses=1] + load i8* @uc, align 1 ; :46 [#uses=1] + zext i8 %46 to i32 ; :47 [#uses=1] + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :48 [#uses=1] + call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %48, i32 %47, i32 %45 ) ; :49 [#uses=1] + store i32 %49, i32* @sl, align 4 + load i8* @sc, align 1 ; :50 [#uses=1] + sext i8 %50 to i32 ; :51 [#uses=1] + load i8* @uc, align 1 ; :52 [#uses=1] + zext i8 %52 to i32 ; :53 [#uses=1] + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :54 [#uses=1] + call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %54, i32 %53, i32 %51 ) ; :55 [#uses=1] + store i32 %55, i32* @ul, align 4 + load i8* @sc, align 1 ; :56 [#uses=1] + zext i8 %56 to i32 ; :57 [#uses=1] + load i8* @uc, align 1 ; :58 [#uses=1] + zext i8 %58 to i32 ; :59 [#uses=1] + trunc i32 %59 to i8 ; :60 [#uses=2] + trunc i32 %57 to i8 ; :61 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %60, i8 %61 ) ; :62 [#uses=1] + icmp eq i8 %62, %60 ; :63 [#uses=1] + zext i1 %63 to i8 ; :64 [#uses=1] + zext i8 %64 to i32 ; :65 [#uses=1] + store i32 %65, i32* @ui, align 4 + load i8* @sc, align 1 ; :66 [#uses=1] + zext i8 %66 to i32 ; :67 [#uses=1] + load i8* @uc, align 1 ; :68 [#uses=1] + zext i8 %68 to i32 ; :69 [#uses=1] + trunc i32 %69 to i8 ; :70 [#uses=2] + trunc i32 %67 to i8 ; :71 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %70, i8 %71 ) ; :72 [#uses=1] + icmp eq i8 %72, %70 ; :73 [#uses=1] + zext i1 %73 to i8 ; :74 [#uses=1] + zext i8 %74 to i32 ; :75 [#uses=1] + store i32 %75, i32* @ui, align 4 + load i8* @sc, align 1 ; :76 [#uses=1] + sext i8 %76 to i16 ; :77 [#uses=1] + zext i16 %77 to i32 ; :78 [#uses=1] + load i8* @uc, align 1 ; :79 [#uses=1] + zext i8 %79 to i32 ; :80 [#uses=1] + trunc i32 %80 to i8 ; :81 [#uses=2] + trunc i32 %78 to i8 ; :82 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %81, i8 %82 ) ; :83 [#uses=1] + icmp eq i8 %83, %81 ; :84 [#uses=1] + zext i1 %84 to i8 ; :85 [#uses=1] + zext i8 %85 to i32 ; :86 [#uses=1] + store i32 %86, i32* @ui, align 4 + load i8* @sc, align 1 ; :87 [#uses=1] + sext i8 %87 to i16 ; :88 [#uses=1] + zext i16 %88 to i32 ; :89 [#uses=1] + load i8* @uc, align 1 ; :90 [#uses=1] + zext i8 %90 to i32 ; :91 [#uses=1] + trunc i32 %91 to i8 ; :92 [#uses=2] + trunc i32 %89 to i8 ; :93 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %92, i8 %93 ) ; :94 [#uses=1] + icmp eq i8 %94, %92 ; :95 [#uses=1] + zext i1 %95 to i8 ; :96 [#uses=1] + zext i8 %96 to i32 ; :97 [#uses=1] + store i32 %97, i32* @ui, align 4 + load i8* @sc, align 1 ; :98 [#uses=1] + sext i8 %98 to i32 ; :99 [#uses=1] + load i8* @uc, align 1 ; :100 [#uses=1] + zext i8 %100 to i32 ; :101 [#uses=1] + trunc i32 %101 to i8 ; :102 [#uses=2] + trunc i32 %99 to i8 ; :103 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %102, i8 %103 ) ; :104 [#uses=1] + icmp eq i8 %104, %102 ; :105 [#uses=1] + zext i1 %105 to i8 ; :106 [#uses=1] + zext i8 %106 to i32 ; :107 [#uses=1] + store i32 %107, i32* @ui, align 4 + load i8* @sc, align 1 ; :108 [#uses=1] + sext i8 %108 to i32 ; :109 [#uses=1] + load i8* @uc, align 1 ; :110 [#uses=1] + zext i8 %110 to i32 ; :111 [#uses=1] + trunc i32 %111 to i8 ; :112 [#uses=2] + trunc i32 %109 to i8 ; :113 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %112, i8 %113 ) ; :114 [#uses=1] + icmp eq i8 %114, %112 ; :115 [#uses=1] + zext i1 %115 to i8 ; :116 [#uses=1] + zext i8 %116 to i32 ; :117 [#uses=1] + store i32 %117, i32* @ui, align 4 + load i8* @sc, align 1 ; :118 [#uses=1] + sext i8 %118 to i32 ; :119 [#uses=1] + load i8* @uc, align 1 ; :120 [#uses=1] + zext i8 %120 to i32 ; :121 [#uses=1] + trunc i32 %121 to i8 ; :122 [#uses=2] + trunc i32 %119 to i8 ; :123 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @sl to i8*), i8 %122, i8 %123 ) ; :124 [#uses=1] + icmp eq i8 %124, %122 ; :125 [#uses=1] + zext i1 %125 to i8 ; :126 [#uses=1] + zext i8 %126 to i32 ; :127 [#uses=1] + store i32 %127, i32* @ui, align 4 + load i8* @sc, align 1 ; :128 [#uses=1] + sext i8 %128 to i32 ; :129 [#uses=1] + load i8* @uc, align 1 ; :130 [#uses=1] + zext i8 %130 to i32 ; :131 [#uses=1] + trunc i32 %131 to i8 ; :132 [#uses=2] + trunc i32 %129 to i8 ; :133 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ul to i8*), i8 %132, i8 %133 ) ; :134 [#uses=1] + icmp eq i8 %134, %132 ; :135 [#uses=1] + zext i1 %135 to i8 ; :136 [#uses=1] + zext i8 %136 to i32 ; :137 [#uses=1] + store i32 %137, i32* @ui, align 4 + br label %return + +return: ; preds = %entry + ret void +} + +declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind + +declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind + +declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind + +define void @test_lock() nounwind { +entry: + call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=1] + store i8 %0, i8* @sc, align 1 + call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=1] + store i8 %1, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1] + call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=1] + store i16 %3, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1] + call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=1] + store i16 %5, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1] + call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=1] + store i32 %7, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1] + call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=1] + store i32 %9, i32* @ui, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :10 [#uses=1] + call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; :11 [#uses=1] + store i32 %11, i32* @sl, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :12 [#uses=1] + call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; :13 [#uses=1] + store i32 %13, i32* @ul, align 4 + call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false ) + volatile store i8 0, i8* @sc, align 1 + volatile store i8 0, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :14 [#uses=1] + volatile store i16 0, i16* %14, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :15 [#uses=1] + volatile store i16 0, i16* %15, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :16 [#uses=1] + volatile store i32 0, i32* %16, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :17 [#uses=1] + volatile store i32 0, i32* %17, align 4 + bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :18 [#uses=1] + volatile store i32 0, i32* %18, align 4 + bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :19 [#uses=1] + volatile store i32 0, i32* %19, align 4 + br label %return + +return: ; preds = %entry + ret void +} + +declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind + +declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind diff --git a/test/CodeGen/X86/Atomics-64.ll b/test/CodeGen/X86/Atomics-64.ll new file mode 100644 index 00000000000..d6e3c651cb2 --- /dev/null +++ b/test/CodeGen/X86/Atomics-64.ll @@ -0,0 +1,1015 @@ +; RUN: llvm-as < %s | llc -march=x86-64 +; ModuleID = 'Atomics.c' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" +target triple = "x86_64-apple-darwin8" +@sc = common global i8 0 ; [#uses=56] +@uc = common global i8 0 ; [#uses=116] +@ss = common global i16 0 ; [#uses=15] +@us = common global i16 0 ; [#uses=15] +@si = common global i32 0 ; [#uses=15] +@ui = common global i32 0 ; [#uses=25] +@sl = common global i64 0 ; [#uses=15] +@ul = common global i64 0 ; [#uses=15] +@sll = common global i64 0 ; [#uses=15] +@ull = common global i64 0 ; [#uses=15] + +define void @test_op_ignore() nounwind { +entry: + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=0] + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=0] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :10 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; :11 [#uses=0] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :12 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; :13 [#uses=0] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :14 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 1 ) ; :15 [#uses=0] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :16 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 1 ) ; :17 [#uses=0] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; :18 [#uses=0] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; :19 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :20 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 1 ) ; :21 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :22 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 1 ) ; :23 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :24 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; :25 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :26 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; :27 [#uses=0] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :28 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 1 ) ; :29 [#uses=0] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :30 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 1 ) ; :31 [#uses=0] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :32 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 1 ) ; :33 [#uses=0] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :34 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 1 ) ; :35 [#uses=0] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; :36 [#uses=0] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; :37 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :38 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 1 ) ; :39 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :40 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 1 ) ; :41 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :42 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 1 ) ; :43 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :44 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 1 ) ; :45 [#uses=0] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :46 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 1 ) ; :47 [#uses=0] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :48 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 1 ) ; :49 [#uses=0] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :50 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 1 ) ; :51 [#uses=0] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :52 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 1 ) ; :53 [#uses=0] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; :54 [#uses=0] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; :55 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :56 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 1 ) ; :57 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :58 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 1 ) ; :59 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :60 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 1 ) ; :61 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :62 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 1 ) ; :63 [#uses=0] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :64 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 1 ) ; :65 [#uses=0] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :66 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 1 ) ; :67 [#uses=0] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :68 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 1 ) ; :69 [#uses=0] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :70 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 1 ) ; :71 [#uses=0] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; :72 [#uses=0] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; :73 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :74 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 1 ) ; :75 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :76 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 1 ) ; :77 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :78 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 1 ) ; :79 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :80 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 1 ) ; :81 [#uses=0] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :82 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 1 ) ; :83 [#uses=0] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :84 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 1 ) ; :85 [#uses=0] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :86 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 1 ) ; :87 [#uses=0] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :88 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 1 ) ; :89 [#uses=0] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; :90 [#uses=0] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; :91 [#uses=0] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :92 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 1 ) ; :93 [#uses=0] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :94 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 1 ) ; :95 [#uses=0] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :96 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 1 ) ; :97 [#uses=0] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :98 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 1 ) ; :99 [#uses=0] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :100 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 1 ) ; :101 [#uses=0] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :102 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 1 ) ; :103 [#uses=0] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :104 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 1 ) ; :105 [#uses=0] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :106 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 1 ) ; :107 [#uses=0] + br label %return + +return: ; preds = %entry + ret void +} + +declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind + +declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind + +declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind + +declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind + +declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind + +declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind + +declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind + +declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind + +declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind + +declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind + +declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind + +declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind + +define void @test_fetch_and_op() nounwind { +entry: + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; :0 [#uses=1] + store i8 %0, i8* @sc, align 1 + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; :1 [#uses=1] + store i8 %1, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; :3 [#uses=1] + store i16 %3, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; :5 [#uses=1] + store i16 %5, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; :7 [#uses=1] + store i32 %7, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; :9 [#uses=1] + store i32 %9, i32* @ui, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :10 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; :11 [#uses=1] + store i64 %11, i64* @sl, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :12 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; :13 [#uses=1] + store i64 %13, i64* @ul, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :14 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 11 ) ; :15 [#uses=1] + store i64 %15, i64* @sll, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :16 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 11 ) ; :17 [#uses=1] + store i64 %17, i64* @ull, align 8 + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; :18 [#uses=1] + store i8 %18, i8* @sc, align 1 + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; :19 [#uses=1] + store i8 %19, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :20 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 11 ) ; :21 [#uses=1] + store i16 %21, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :22 [#uses=1] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 11 ) ; :23 [#uses=1] + store i16 %23, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :24 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; :25 [#uses=1] + store i32 %25, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :26 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; :27 [#uses=1] + store i32 %27, i32* @ui, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :28 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 11 ) ; :29 [#uses=1] + store i64 %29, i64* @sl, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :30 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 11 ) ; :31 [#uses=1] + store i64 %31, i64* @ul, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :32 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 11 ) ; :33 [#uses=1] + store i64 %33, i64* @sll, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :34 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 11 ) ; :35 [#uses=1] + store i64 %35, i64* @ull, align 8 + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; :36 [#uses=1] + store i8 %36, i8* @sc, align 1 + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; :37 [#uses=1] + store i8 %37, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :38 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 11 ) ; :39 [#uses=1] + store i16 %39, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :40 [#uses=1] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 11 ) ; :41 [#uses=1] + store i16 %41, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :42 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 11 ) ; :43 [#uses=1] + store i32 %43, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :44 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 11 ) ; :45 [#uses=1] + store i32 %45, i32* @ui, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :46 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 11 ) ; :47 [#uses=1] + store i64 %47, i64* @sl, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :48 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 11 ) ; :49 [#uses=1] + store i64 %49, i64* @ul, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :50 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 11 ) ; :51 [#uses=1] + store i64 %51, i64* @sll, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :52 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 11 ) ; :53 [#uses=1] + store i64 %53, i64* @ull, align 8 + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; :54 [#uses=1] + store i8 %54, i8* @sc, align 1 + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; :55 [#uses=1] + store i8 %55, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :56 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 11 ) ; :57 [#uses=1] + store i16 %57, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :58 [#uses=1] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 11 ) ; :59 [#uses=1] + store i16 %59, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :60 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 11 ) ; :61 [#uses=1] + store i32 %61, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :62 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 11 ) ; :63 [#uses=1] + store i32 %63, i32* @ui, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :64 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 11 ) ; :65 [#uses=1] + store i64 %65, i64* @sl, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :66 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 11 ) ; :67 [#uses=1] + store i64 %67, i64* @ul, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :68 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 11 ) ; :69 [#uses=1] + store i64 %69, i64* @sll, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :70 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 11 ) ; :71 [#uses=1] + store i64 %71, i64* @ull, align 8 + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; :72 [#uses=1] + store i8 %72, i8* @sc, align 1 + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; :73 [#uses=1] + store i8 %73, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :74 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 11 ) ; :75 [#uses=1] + store i16 %75, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :76 [#uses=1] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 11 ) ; :77 [#uses=1] + store i16 %77, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :78 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 11 ) ; :79 [#uses=1] + store i32 %79, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :80 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 11 ) ; :81 [#uses=1] + store i32 %81, i32* @ui, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :82 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 11 ) ; :83 [#uses=1] + store i64 %83, i64* @sl, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :84 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 11 ) ; :85 [#uses=1] + store i64 %85, i64* @ul, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :86 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 11 ) ; :87 [#uses=1] + store i64 %87, i64* @sll, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :88 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 11 ) ; :89 [#uses=1] + store i64 %89, i64* @ull, align 8 + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; :90 [#uses=1] + store i8 %90, i8* @sc, align 1 + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; :91 [#uses=1] + store i8 %91, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :92 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 11 ) ; :93 [#uses=1] + store i16 %93, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :94 [#uses=1] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 11 ) ; :95 [#uses=1] + store i16 %95, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :96 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 11 ) ; :97 [#uses=1] + store i32 %97, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :98 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 11 ) ; :99 [#uses=1] + store i32 %99, i32* @ui, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :100 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 11 ) ; :101 [#uses=1] + store i64 %101, i64* @sl, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :102 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 11 ) ; :103 [#uses=1] + store i64 %103, i64* @ul, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :104 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 11 ) ; :105 [#uses=1] + store i64 %105, i64* @sll, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :106 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 11 ) ; :107 [#uses=1] + store i64 %107, i64* @ull, align 8 + br label %return + +return: ; preds = %entry + ret void +} + +define void @test_op_and_fetch() nounwind { +entry: + load i8* @uc, align 1 ; :0 [#uses=1] + zext i8 %0 to i32 ; :1 [#uses=1] + trunc i32 %1 to i8 ; :2 [#uses=2] + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; :3 [#uses=1] + add i8 %3, %2 ; :4 [#uses=1] + store i8 %4, i8* @sc, align 1 + load i8* @uc, align 1 ; :5 [#uses=1] + zext i8 %5 to i32 ; :6 [#uses=1] + trunc i32 %6 to i8 ; :7 [#uses=2] + call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; :8 [#uses=1] + add i8 %8, %7 ; :9 [#uses=1] + store i8 %9, i8* @uc, align 1 + load i8* @uc, align 1 ; :10 [#uses=1] + zext i8 %10 to i32 ; :11 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :12 [#uses=1] + trunc i32 %11 to i16 ; :13 [#uses=2] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; :14 [#uses=1] + add i16 %14, %13 ; :15 [#uses=1] + store i16 %15, i16* @ss, align 2 + load i8* @uc, align 1 ; :16 [#uses=1] + zext i8 %16 to i32 ; :17 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1] + trunc i32 %17 to i16 ; :19 [#uses=2] + call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; :20 [#uses=1] + add i16 %20, %19 ; :21 [#uses=1] + store i16 %21, i16* @us, align 2 + load i8* @uc, align 1 ; :22 [#uses=1] + zext i8 %22 to i32 ; :23 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :24 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; :25 [#uses=1] + add i32 %25, %23 ; :26 [#uses=1] + store i32 %26, i32* @si, align 4 + load i8* @uc, align 1 ; :27 [#uses=1] + zext i8 %27 to i32 ; :28 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :29 [#uses=1] + call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; :30 [#uses=1] + add i32 %30, %28 ; :31 [#uses=1] + store i32 %31, i32* @ui, align 4 + load i8* @uc, align 1 ; :32 [#uses=1] + zext i8 %32 to i64 ; :33 [#uses=2] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :34 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %34, i64 %33 ) ; :35 [#uses=1] + add i64 %35, %33 ; :36 [#uses=1] + store i64 %36, i64* @sl, align 8 + load i8* @uc, align 1 ; :37 [#uses=1] + zext i8 %37 to i64 ; :38 [#uses=2] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :39 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %39, i64 %38 ) ; :40 [#uses=1] + add i64 %40, %38 ; :41 [#uses=1] + store i64 %41, i64* @ul, align 8 + load i8* @uc, align 1 ; :42 [#uses=1] + zext i8 %42 to i64 ; :43 [#uses=2] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :44 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %44, i64 %43 ) ; :45 [#uses=1] + add i64 %45, %43 ; :46 [#uses=1] + store i64 %46, i64* @sll, align 8 + load i8* @uc, align 1 ; :47 [#uses=1] + zext i8 %47 to i64 ; :48 [#uses=2] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :49 [#uses=1] + call i64 @llvm.atomic.load.add.i64.p0i64( i64* %49, i64 %48 ) ; :50 [#uses=1] + add i64 %50, %48 ; :51 [#uses=1] + store i64 %51, i64* @ull, align 8 + load i8* @uc, align 1 ; :52 [#uses=1] + zext i8 %52 to i32 ; :53 [#uses=1] + trunc i32 %53 to i8 ; :54 [#uses=2] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %54 ) ; :55 [#uses=1] + sub i8 %55, %54 ; :56 [#uses=1] + store i8 %56, i8* @sc, align 1 + load i8* @uc, align 1 ; :57 [#uses=1] + zext i8 %57 to i32 ; :58 [#uses=1] + trunc i32 %58 to i8 ; :59 [#uses=2] + call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %59 ) ; :60 [#uses=1] + sub i8 %60, %59 ; :61 [#uses=1] + store i8 %61, i8* @uc, align 1 + load i8* @uc, align 1 ; :62 [#uses=1] + zext i8 %62 to i32 ; :63 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :64 [#uses=1] + trunc i32 %63 to i16 ; :65 [#uses=2] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %64, i16 %65 ) ; :66 [#uses=1] + sub i16 %66, %65 ; :67 [#uses=1] + store i16 %67, i16* @ss, align 2 + load i8* @uc, align 1 ; :68 [#uses=1] + zext i8 %68 to i32 ; :69 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :70 [#uses=1] + trunc i32 %69 to i16 ; :71 [#uses=2] + call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %70, i16 %71 ) ; :72 [#uses=1] + sub i16 %72, %71 ; :73 [#uses=1] + store i16 %73, i16* @us, align 2 + load i8* @uc, align 1 ; :74 [#uses=1] + zext i8 %74 to i32 ; :75 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; :77 [#uses=1] + sub i32 %77, %75 ; :78 [#uses=1] + store i32 %78, i32* @si, align 4 + load i8* @uc, align 1 ; :79 [#uses=1] + zext i8 %79 to i32 ; :80 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :81 [#uses=1] + call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; :82 [#uses=1] + sub i32 %82, %80 ; :83 [#uses=1] + store i32 %83, i32* @ui, align 4 + load i8* @uc, align 1 ; :84 [#uses=1] + zext i8 %84 to i64 ; :85 [#uses=2] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :86 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %86, i64 %85 ) ; :87 [#uses=1] + sub i64 %87, %85 ; :88 [#uses=1] + store i64 %88, i64* @sl, align 8 + load i8* @uc, align 1 ; :89 [#uses=1] + zext i8 %89 to i64 ; :90 [#uses=2] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :91 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %91, i64 %90 ) ; :92 [#uses=1] + sub i64 %92, %90 ; :93 [#uses=1] + store i64 %93, i64* @ul, align 8 + load i8* @uc, align 1 ; :94 [#uses=1] + zext i8 %94 to i64 ; :95 [#uses=2] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :96 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %96, i64 %95 ) ; :97 [#uses=1] + sub i64 %97, %95 ; :98 [#uses=1] + store i64 %98, i64* @sll, align 8 + load i8* @uc, align 1 ; :99 [#uses=1] + zext i8 %99 to i64 ; :100 [#uses=2] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :101 [#uses=1] + call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %101, i64 %100 ) ; :102 [#uses=1] + sub i64 %102, %100 ; :103 [#uses=1] + store i64 %103, i64* @ull, align 8 + load i8* @uc, align 1 ; :104 [#uses=1] + zext i8 %104 to i32 ; :105 [#uses=1] + trunc i32 %105 to i8 ; :106 [#uses=2] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %106 ) ; :107 [#uses=1] + or i8 %107, %106 ; :108 [#uses=1] + store i8 %108, i8* @sc, align 1 + load i8* @uc, align 1 ; :109 [#uses=1] + zext i8 %109 to i32 ; :110 [#uses=1] + trunc i32 %110 to i8 ; :111 [#uses=2] + call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %111 ) ; :112 [#uses=1] + or i8 %112, %111 ; :113 [#uses=1] + store i8 %113, i8* @uc, align 1 + load i8* @uc, align 1 ; :114 [#uses=1] + zext i8 %114 to i32 ; :115 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :116 [#uses=1] + trunc i32 %115 to i16 ; :117 [#uses=2] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %116, i16 %117 ) ; :118 [#uses=1] + or i16 %118, %117 ; :119 [#uses=1] + store i16 %119, i16* @ss, align 2 + load i8* @uc, align 1 ; :120 [#uses=1] + zext i8 %120 to i32 ; :121 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :122 [#uses=1] + trunc i32 %121 to i16 ; :123 [#uses=2] + call i16 @llvm.atomic.load.or.i16.p0i16( i16* %122, i16 %123 ) ; :124 [#uses=1] + or i16 %124, %123 ; :125 [#uses=1] + store i16 %125, i16* @us, align 2 + load i8* @uc, align 1 ; :126 [#uses=1] + zext i8 %126 to i32 ; :127 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :128 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %128, i32 %127 ) ; :129 [#uses=1] + or i32 %129, %127 ; :130 [#uses=1] + store i32 %130, i32* @si, align 4 + load i8* @uc, align 1 ; :131 [#uses=1] + zext i8 %131 to i32 ; :132 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :133 [#uses=1] + call i32 @llvm.atomic.load.or.i32.p0i32( i32* %133, i32 %132 ) ; :134 [#uses=1] + or i32 %134, %132 ; :135 [#uses=1] + store i32 %135, i32* @ui, align 4 + load i8* @uc, align 1 ; :136 [#uses=1] + zext i8 %136 to i64 ; :137 [#uses=2] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :138 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %138, i64 %137 ) ; :139 [#uses=1] + or i64 %139, %137 ; :140 [#uses=1] + store i64 %140, i64* @sl, align 8 + load i8* @uc, align 1 ; :141 [#uses=1] + zext i8 %141 to i64 ; :142 [#uses=2] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :143 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %143, i64 %142 ) ; :144 [#uses=1] + or i64 %144, %142 ; :145 [#uses=1] + store i64 %145, i64* @ul, align 8 + load i8* @uc, align 1 ; :146 [#uses=1] + zext i8 %146 to i64 ; :147 [#uses=2] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :148 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %148, i64 %147 ) ; :149 [#uses=1] + or i64 %149, %147 ; :150 [#uses=1] + store i64 %150, i64* @sll, align 8 + load i8* @uc, align 1 ; :151 [#uses=1] + zext i8 %151 to i64 ; :152 [#uses=2] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :153 [#uses=1] + call i64 @llvm.atomic.load.or.i64.p0i64( i64* %153, i64 %152 ) ; :154 [#uses=1] + or i64 %154, %152 ; :155 [#uses=1] + store i64 %155, i64* @ull, align 8 + load i8* @uc, align 1 ; :156 [#uses=1] + zext i8 %156 to i32 ; :157 [#uses=1] + trunc i32 %157 to i8 ; :158 [#uses=2] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %158 ) ; :159 [#uses=1] + xor i8 %159, %158 ; :160 [#uses=1] + store i8 %160, i8* @sc, align 1 + load i8* @uc, align 1 ; :161 [#uses=1] + zext i8 %161 to i32 ; :162 [#uses=1] + trunc i32 %162 to i8 ; :163 [#uses=2] + call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %163 ) ; :164 [#uses=1] + xor i8 %164, %163 ; :165 [#uses=1] + store i8 %165, i8* @uc, align 1 + load i8* @uc, align 1 ; :166 [#uses=1] + zext i8 %166 to i32 ; :167 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :168 [#uses=1] + trunc i32 %167 to i16 ; :169 [#uses=2] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %168, i16 %169 ) ; :170 [#uses=1] + xor i16 %170, %169 ; :171 [#uses=1] + store i16 %171, i16* @ss, align 2 + load i8* @uc, align 1 ; :172 [#uses=1] + zext i8 %172 to i32 ; :173 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :174 [#uses=1] + trunc i32 %173 to i16 ; :175 [#uses=2] + call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %174, i16 %175 ) ; :176 [#uses=1] + xor i16 %176, %175 ; :177 [#uses=1] + store i16 %177, i16* @us, align 2 + load i8* @uc, align 1 ; :178 [#uses=1] + zext i8 %178 to i32 ; :179 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :180 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %180, i32 %179 ) ; :181 [#uses=1] + xor i32 %181, %179 ; :182 [#uses=1] + store i32 %182, i32* @si, align 4 + load i8* @uc, align 1 ; :183 [#uses=1] + zext i8 %183 to i32 ; :184 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :185 [#uses=1] + call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %185, i32 %184 ) ; :186 [#uses=1] + xor i32 %186, %184 ; :187 [#uses=1] + store i32 %187, i32* @ui, align 4 + load i8* @uc, align 1 ; :188 [#uses=1] + zext i8 %188 to i64 ; :189 [#uses=2] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :190 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %190, i64 %189 ) ; :191 [#uses=1] + xor i64 %191, %189 ; :192 [#uses=1] + store i64 %192, i64* @sl, align 8 + load i8* @uc, align 1 ; :193 [#uses=1] + zext i8 %193 to i64 ; :194 [#uses=2] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :195 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %195, i64 %194 ) ; :196 [#uses=1] + xor i64 %196, %194 ; :197 [#uses=1] + store i64 %197, i64* @ul, align 8 + load i8* @uc, align 1 ; :198 [#uses=1] + zext i8 %198 to i64 ; :199 [#uses=2] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :200 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %200, i64 %199 ) ; :201 [#uses=1] + xor i64 %201, %199 ; :202 [#uses=1] + store i64 %202, i64* @sll, align 8 + load i8* @uc, align 1 ; :203 [#uses=1] + zext i8 %203 to i64 ; :204 [#uses=2] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :205 [#uses=1] + call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %205, i64 %204 ) ; :206 [#uses=1] + xor i64 %206, %204 ; :207 [#uses=1] + store i64 %207, i64* @ull, align 8 + load i8* @uc, align 1 ; :208 [#uses=1] + zext i8 %208 to i32 ; :209 [#uses=1] + trunc i32 %209 to i8 ; :210 [#uses=2] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %210 ) ; :211 [#uses=1] + and i8 %211, %210 ; :212 [#uses=1] + store i8 %212, i8* @sc, align 1 + load i8* @uc, align 1 ; :213 [#uses=1] + zext i8 %213 to i32 ; :214 [#uses=1] + trunc i32 %214 to i8 ; :215 [#uses=2] + call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %215 ) ; :216 [#uses=1] + and i8 %216, %215 ; :217 [#uses=1] + store i8 %217, i8* @uc, align 1 + load i8* @uc, align 1 ; :218 [#uses=1] + zext i8 %218 to i32 ; :219 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :220 [#uses=1] + trunc i32 %219 to i16 ; :221 [#uses=2] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %220, i16 %221 ) ; :222 [#uses=1] + and i16 %222, %221 ; :223 [#uses=1] + store i16 %223, i16* @ss, align 2 + load i8* @uc, align 1 ; :224 [#uses=1] + zext i8 %224 to i32 ; :225 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :226 [#uses=1] + trunc i32 %225 to i16 ; :227 [#uses=2] + call i16 @llvm.atomic.load.and.i16.p0i16( i16* %226, i16 %227 ) ; :228 [#uses=1] + and i16 %228, %227 ; :229 [#uses=1] + store i16 %229, i16* @us, align 2 + load i8* @uc, align 1 ; :230 [#uses=1] + zext i8 %230 to i32 ; :231 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :232 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %232, i32 %231 ) ; :233 [#uses=1] + and i32 %233, %231 ; :234 [#uses=1] + store i32 %234, i32* @si, align 4 + load i8* @uc, align 1 ; :235 [#uses=1] + zext i8 %235 to i32 ; :236 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :237 [#uses=1] + call i32 @llvm.atomic.load.and.i32.p0i32( i32* %237, i32 %236 ) ; :238 [#uses=1] + and i32 %238, %236 ; :239 [#uses=1] + store i32 %239, i32* @ui, align 4 + load i8* @uc, align 1 ; :240 [#uses=1] + zext i8 %240 to i64 ; :241 [#uses=2] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :242 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %242, i64 %241 ) ; :243 [#uses=1] + and i64 %243, %241 ; :244 [#uses=1] + store i64 %244, i64* @sl, align 8 + load i8* @uc, align 1 ; :245 [#uses=1] + zext i8 %245 to i64 ; :246 [#uses=2] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :247 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %247, i64 %246 ) ; :248 [#uses=1] + and i64 %248, %246 ; :249 [#uses=1] + store i64 %249, i64* @ul, align 8 + load i8* @uc, align 1 ; :250 [#uses=1] + zext i8 %250 to i64 ; :251 [#uses=2] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :252 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %252, i64 %251 ) ; :253 [#uses=1] + and i64 %253, %251 ; :254 [#uses=1] + store i64 %254, i64* @sll, align 8 + load i8* @uc, align 1 ; :255 [#uses=1] + zext i8 %255 to i64 ; :256 [#uses=2] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :257 [#uses=1] + call i64 @llvm.atomic.load.and.i64.p0i64( i64* %257, i64 %256 ) ; :258 [#uses=1] + and i64 %258, %256 ; :259 [#uses=1] + store i64 %259, i64* @ull, align 8 + load i8* @uc, align 1 ; :260 [#uses=1] + zext i8 %260 to i32 ; :261 [#uses=1] + trunc i32 %261 to i8 ; :262 [#uses=2] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %262 ) ; :263 [#uses=1] + xor i8 %263, -1 ; :264 [#uses=1] + and i8 %264, %262 ; :265 [#uses=1] + store i8 %265, i8* @sc, align 1 + load i8* @uc, align 1 ; :266 [#uses=1] + zext i8 %266 to i32 ; :267 [#uses=1] + trunc i32 %267 to i8 ; :268 [#uses=2] + call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %268 ) ; :269 [#uses=1] + xor i8 %269, -1 ; :270 [#uses=1] + and i8 %270, %268 ; :271 [#uses=1] + store i8 %271, i8* @uc, align 1 + load i8* @uc, align 1 ; :272 [#uses=1] + zext i8 %272 to i32 ; :273 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :274 [#uses=1] + trunc i32 %273 to i16 ; :275 [#uses=2] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %274, i16 %275 ) ; :276 [#uses=1] + xor i16 %276, -1 ; :277 [#uses=1] + and i16 %277, %275 ; :278 [#uses=1] + store i16 %278, i16* @ss, align 2 + load i8* @uc, align 1 ; :279 [#uses=1] + zext i8 %279 to i32 ; :280 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :281 [#uses=1] + trunc i32 %280 to i16 ; :282 [#uses=2] + call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %281, i16 %282 ) ; :283 [#uses=1] + xor i16 %283, -1 ; :284 [#uses=1] + and i16 %284, %282 ; :285 [#uses=1] + store i16 %285, i16* @us, align 2 + load i8* @uc, align 1 ; :286 [#uses=1] + zext i8 %286 to i32 ; :287 [#uses=2] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :288 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %288, i32 %287 ) ; :289 [#uses=1] + xor i32 %289, -1 ; :290 [#uses=1] + and i32 %290, %287 ; :291 [#uses=1] + store i32 %291, i32* @si, align 4 + load i8* @uc, align 1 ; :292 [#uses=1] + zext i8 %292 to i32 ; :293 [#uses=2] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :294 [#uses=1] + call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %294, i32 %293 ) ; :295 [#uses=1] + xor i32 %295, -1 ; :296 [#uses=1] + and i32 %296, %293 ; :297 [#uses=1] + store i32 %297, i32* @ui, align 4 + load i8* @uc, align 1 ; :298 [#uses=1] + zext i8 %298 to i64 ; :299 [#uses=2] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :300 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %300, i64 %299 ) ; :301 [#uses=1] + xor i64 %301, -1 ; :302 [#uses=1] + and i64 %302, %299 ; :303 [#uses=1] + store i64 %303, i64* @sl, align 8 + load i8* @uc, align 1 ; :304 [#uses=1] + zext i8 %304 to i64 ; :305 [#uses=2] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :306 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %306, i64 %305 ) ; :307 [#uses=1] + xor i64 %307, -1 ; :308 [#uses=1] + and i64 %308, %305 ; :309 [#uses=1] + store i64 %309, i64* @ul, align 8 + load i8* @uc, align 1 ; :310 [#uses=1] + zext i8 %310 to i64 ; :311 [#uses=2] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :312 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %312, i64 %311 ) ; :313 [#uses=1] + xor i64 %313, -1 ; :314 [#uses=1] + and i64 %314, %311 ; :315 [#uses=1] + store i64 %315, i64* @sll, align 8 + load i8* @uc, align 1 ; :316 [#uses=1] + zext i8 %316 to i64 ; :317 [#uses=2] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :318 [#uses=1] + call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %318, i64 %317 ) ; :319 [#uses=1] + xor i64 %319, -1 ; :320 [#uses=1] + and i64 %320, %317 ; :321 [#uses=1] + store i64 %321, i64* @ull, align 8 + br label %return + +return: ; preds = %entry + ret void +} + +define void @test_compare_and_swap() nounwind { +entry: + load i8* @sc, align 1 ; :0 [#uses=1] + zext i8 %0 to i32 ; :1 [#uses=1] + load i8* @uc, align 1 ; :2 [#uses=1] + zext i8 %2 to i32 ; :3 [#uses=1] + trunc i32 %3 to i8 ; :4 [#uses=1] + trunc i32 %1 to i8 ; :5 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; :6 [#uses=1] + store i8 %6, i8* @sc, align 1 + load i8* @sc, align 1 ; :7 [#uses=1] + zext i8 %7 to i32 ; :8 [#uses=1] + load i8* @uc, align 1 ; :9 [#uses=1] + zext i8 %9 to i32 ; :10 [#uses=1] + trunc i32 %10 to i8 ; :11 [#uses=1] + trunc i32 %8 to i8 ; :12 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; :13 [#uses=1] + store i8 %13, i8* @uc, align 1 + load i8* @sc, align 1 ; :14 [#uses=1] + sext i8 %14 to i16 ; :15 [#uses=1] + zext i16 %15 to i32 ; :16 [#uses=1] + load i8* @uc, align 1 ; :17 [#uses=1] + zext i8 %17 to i32 ; :18 [#uses=1] + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :19 [#uses=1] + trunc i32 %18 to i16 ; :20 [#uses=1] + trunc i32 %16 to i16 ; :21 [#uses=1] + call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; :22 [#uses=1] + store i16 %22, i16* @ss, align 2 + load i8* @sc, align 1 ; :23 [#uses=1] + sext i8 %23 to i16 ; :24 [#uses=1] + zext i16 %24 to i32 ; :25 [#uses=1] + load i8* @uc, align 1 ; :26 [#uses=1] + zext i8 %26 to i32 ; :27 [#uses=1] + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :28 [#uses=1] + trunc i32 %27 to i16 ; :29 [#uses=1] + trunc i32 %25 to i16 ; :30 [#uses=1] + call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; :31 [#uses=1] + store i16 %31, i16* @us, align 2 + load i8* @sc, align 1 ; :32 [#uses=1] + sext i8 %32 to i32 ; :33 [#uses=1] + load i8* @uc, align 1 ; :34 [#uses=1] + zext i8 %34 to i32 ; :35 [#uses=1] + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :36 [#uses=1] + call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; :37 [#uses=1] + store i32 %37, i32* @si, align 4 + load i8* @sc, align 1 ; :38 [#uses=1] + sext i8 %38 to i32 ; :39 [#uses=1] + load i8* @uc, align 1 ; :40 [#uses=1] + zext i8 %40 to i32 ; :41 [#uses=1] + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :42 [#uses=1] + call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; :43 [#uses=1] + store i32 %43, i32* @ui, align 4 + load i8* @sc, align 1 ; :44 [#uses=1] + sext i8 %44 to i64 ; :45 [#uses=1] + load i8* @uc, align 1 ; :46 [#uses=1] + zext i8 %46 to i64 ; :47 [#uses=1] + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :48 [#uses=1] + call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %48, i64 %47, i64 %45 ) ; :49 [#uses=1] + store i64 %49, i64* @sl, align 8 + load i8* @sc, align 1 ; :50 [#uses=1] + sext i8 %50 to i64 ; :51 [#uses=1] + load i8* @uc, align 1 ; :52 [#uses=1] + zext i8 %52 to i64 ; :53 [#uses=1] + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :54 [#uses=1] + call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %54, i64 %53, i64 %51 ) ; :55 [#uses=1] + store i64 %55, i64* @ul, align 8 + load i8* @sc, align 1 ; :56 [#uses=1] + sext i8 %56 to i64 ; :57 [#uses=1] + load i8* @uc, align 1 ; :58 [#uses=1] + zext i8 %58 to i64 ; :59 [#uses=1] + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :60 [#uses=1] + call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %60, i64 %59, i64 %57 ) ; :61 [#uses=1] + store i64 %61, i64* @sll, align 8 + load i8* @sc, align 1 ; :62 [#uses=1] + sext i8 %62 to i64 ; :63 [#uses=1] + load i8* @uc, align 1 ; :64 [#uses=1] + zext i8 %64 to i64 ; :65 [#uses=1] + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :66 [#uses=1] + call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %66, i64 %65, i64 %63 ) ; :67 [#uses=1] + store i64 %67, i64* @ull, align 8 + load i8* @sc, align 1 ; :68 [#uses=1] + zext i8 %68 to i32 ; :69 [#uses=1] + load i8* @uc, align 1 ; :70 [#uses=1] + zext i8 %70 to i32 ; :71 [#uses=1] + trunc i32 %71 to i8 ; :72 [#uses=2] + trunc i32 %69 to i8 ; :73 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %72, i8 %73 ) ; :74 [#uses=1] + icmp eq i8 %74, %72 ; :75 [#uses=1] + zext i1 %75 to i8 ; :76 [#uses=1] + zext i8 %76 to i32 ; :77 [#uses=1] + store i32 %77, i32* @ui, align 4 + load i8* @sc, align 1 ; :78 [#uses=1] + zext i8 %78 to i32 ; :79 [#uses=1] + load i8* @uc, align 1 ; :80 [#uses=1] + zext i8 %80 to i32 ; :81 [#uses=1] + trunc i32 %81 to i8 ; :82 [#uses=2] + trunc i32 %79 to i8 ; :83 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %82, i8 %83 ) ; :84 [#uses=1] + icmp eq i8 %84, %82 ; :85 [#uses=1] + zext i1 %85 to i8 ; :86 [#uses=1] + zext i8 %86 to i32 ; :87 [#uses=1] + store i32 %87, i32* @ui, align 4 + load i8* @sc, align 1 ; :88 [#uses=1] + sext i8 %88 to i16 ; :89 [#uses=1] + zext i16 %89 to i32 ; :90 [#uses=1] + load i8* @uc, align 1 ; :91 [#uses=1] + zext i8 %91 to i32 ; :92 [#uses=1] + trunc i32 %92 to i8 ; :93 [#uses=2] + trunc i32 %90 to i8 ; :94 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 ) ; :95 [#uses=1] + icmp eq i8 %95, %93 ; :96 [#uses=1] + zext i1 %96 to i8 ; :97 [#uses=1] + zext i8 %97 to i32 ; :98 [#uses=1] + store i32 %98, i32* @ui, align 4 + load i8* @sc, align 1 ; :99 [#uses=1] + sext i8 %99 to i16 ; :100 [#uses=1] + zext i16 %100 to i32 ; :101 [#uses=1] + load i8* @uc, align 1 ; :102 [#uses=1] + zext i8 %102 to i32 ; :103 [#uses=1] + trunc i32 %103 to i8 ; :104 [#uses=2] + trunc i32 %101 to i8 ; :105 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 ) ; :106 [#uses=1] + icmp eq i8 %106, %104 ; :107 [#uses=1] + zext i1 %107 to i8 ; :108 [#uses=1] + zext i8 %108 to i32 ; :109 [#uses=1] + store i32 %109, i32* @ui, align 4 + load i8* @sc, align 1 ; :110 [#uses=1] + sext i8 %110 to i32 ; :111 [#uses=1] + load i8* @uc, align 1 ; :112 [#uses=1] + zext i8 %112 to i32 ; :113 [#uses=1] + trunc i32 %113 to i8 ; :114 [#uses=2] + trunc i32 %111 to i8 ; :115 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 ) ; :116 [#uses=1] + icmp eq i8 %116, %114 ; :117 [#uses=1] + zext i1 %117 to i8 ; :118 [#uses=1] + zext i8 %118 to i32 ; :119 [#uses=1] + store i32 %119, i32* @ui, align 4 + load i8* @sc, align 1 ; :120 [#uses=1] + sext i8 %120 to i32 ; :121 [#uses=1] + load i8* @uc, align 1 ; :122 [#uses=1] + zext i8 %122 to i32 ; :123 [#uses=1] + trunc i32 %123 to i8 ; :124 [#uses=2] + trunc i32 %121 to i8 ; :125 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 ) ; :126 [#uses=1] + icmp eq i8 %126, %124 ; :127 [#uses=1] + zext i1 %127 to i8 ; :128 [#uses=1] + zext i8 %128 to i32 ; :129 [#uses=1] + store i32 %129, i32* @ui, align 4 + load i8* @sc, align 1 ; :130 [#uses=1] + sext i8 %130 to i64 ; :131 [#uses=1] + load i8* @uc, align 1 ; :132 [#uses=1] + zext i8 %132 to i64 ; :133 [#uses=1] + trunc i64 %133 to i8 ; :134 [#uses=2] + trunc i64 %131 to i8 ; :135 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 ) ; :136 [#uses=1] + icmp eq i8 %136, %134 ; :137 [#uses=1] + zext i1 %137 to i8 ; :138 [#uses=1] + zext i8 %138 to i32 ; :139 [#uses=1] + store i32 %139, i32* @ui, align 4 + load i8* @sc, align 1 ; :140 [#uses=1] + sext i8 %140 to i64 ; :141 [#uses=1] + load i8* @uc, align 1 ; :142 [#uses=1] + zext i8 %142 to i64 ; :143 [#uses=1] + trunc i64 %143 to i8 ; :144 [#uses=2] + trunc i64 %141 to i8 ; :145 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 ) ; :146 [#uses=1] + icmp eq i8 %146, %144 ; :147 [#uses=1] + zext i1 %147 to i8 ; :148 [#uses=1] + zext i8 %148 to i32 ; :149 [#uses=1] + store i32 %149, i32* @ui, align 4 + load i8* @sc, align 1 ; :150 [#uses=1] + sext i8 %150 to i64 ; :151 [#uses=1] + load i8* @uc, align 1 ; :152 [#uses=1] + zext i8 %152 to i64 ; :153 [#uses=1] + trunc i64 %153 to i8 ; :154 [#uses=2] + trunc i64 %151 to i8 ; :155 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 ) ; :156 [#uses=1] + icmp eq i8 %156, %154 ; :157 [#uses=1] + zext i1 %157 to i8 ; :158 [#uses=1] + zext i8 %158 to i32 ; :159 [#uses=1] + store i32 %159, i32* @ui, align 4 + load i8* @sc, align 1 ; :160 [#uses=1] + sext i8 %160 to i64 ; :161 [#uses=1] + load i8* @uc, align 1 ; :162 [#uses=1] + zext i8 %162 to i64 ; :163 [#uses=1] + trunc i64 %163 to i8 ; :164 [#uses=2] + trunc i64 %161 to i8 ; :165 [#uses=1] + call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 ) ; :166 [#uses=1] + icmp eq i8 %166, %164 ; :167 [#uses=1] + zext i1 %167 to i8 ; :168 [#uses=1] + zext i8 %168 to i32 ; :169 [#uses=1] + store i32 %169, i32* @ui, align 4 + br label %return + +return: ; preds = %entry + ret void +} + +declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind + +declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind + +declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind + +declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind + +define void @test_lock() nounwind { +entry: + call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=1] + store i8 %0, i8* @sc, align 1 + call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=1] + store i8 %1, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1] + call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=1] + store i16 %3, i16* @ss, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1] + call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=1] + store i16 %5, i16* @us, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1] + call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=1] + store i32 %7, i32* @si, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1] + call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=1] + store i32 %9, i32* @ui, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :10 [#uses=1] + call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; :11 [#uses=1] + store i64 %11, i64* @sl, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :12 [#uses=1] + call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; :13 [#uses=1] + store i64 %13, i64* @ul, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :14 [#uses=1] + call i64 @llvm.atomic.swap.i64.p0i64( i64* %14, i64 1 ) ; :15 [#uses=1] + store i64 %15, i64* @sll, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :16 [#uses=1] + call i64 @llvm.atomic.swap.i64.p0i64( i64* %16, i64 1 ) ; :17 [#uses=1] + store i64 %17, i64* @ull, align 8 + call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false ) + volatile store i8 0, i8* @sc, align 1 + volatile store i8 0, i8* @uc, align 1 + bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :18 [#uses=1] + volatile store i16 0, i16* %18, align 2 + bitcast i8* bitcast (i16* @us to i8*) to i16* ; :19 [#uses=1] + volatile store i16 0, i16* %19, align 2 + bitcast i8* bitcast (i32* @si to i8*) to i32* ; :20 [#uses=1] + volatile store i32 0, i32* %20, align 4 + bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :21 [#uses=1] + volatile store i32 0, i32* %21, align 4 + bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :22 [#uses=1] + volatile store i64 0, i64* %22, align 8 + bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :23 [#uses=1] + volatile store i64 0, i64* %23, align 8 + bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :24 [#uses=1] + volatile store i64 0, i64* %24, align 8 + bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :25 [#uses=1] + volatile store i64 0, i64* %25, align 8 + br label %return + +return: ; preds = %entry + ret void +} + +declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind + +declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind + +declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind + +declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind + +declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind -- 2.34.1