X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=test%2FCodeGen%2FPowerPC%2Fstructsinmem.ll;h=bfada4c63714350c4f42ed62241fb5ea556cefae;hp=884d3a89d15aa21aab7fa37fef1a1ce3ee498a81;hb=1e8077f10ca273511c55f7f13533ce35f8e4c3be;hpb=7a6cb15a929e76955471ef2a7b6db721198320a0 diff --git a/test/CodeGen/PowerPC/structsinmem.ll b/test/CodeGen/PowerPC/structsinmem.ll index 884d3a89d15..bfada4c6371 100644 --- a/test/CodeGen/PowerPC/structsinmem.ll +++ b/test/CodeGen/PowerPC/structsinmem.ll @@ -1,8 +1,4 @@ -; RUN: llc -mcpu=pwr7 -O0 -disable-fp-elim < %s | FileCheck %s - -; FIXME: The code generation for packed structs is very poor because the -; PowerPC target wrongly rejects all unaligned loads. This test case will -; need to be revised when that is fixed. +; RUN: llc -mcpu=ppc64 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" target triple = "powerpc64-unknown-linux-gnu" @@ -47,7 +43,7 @@ entry: %p6 = alloca %struct.s6, align 4 %p7 = alloca %struct.s7, align 4 %0 = bitcast %struct.s1* %p1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false) %1 = bitcast %struct.s2* %p2 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false) %2 = bitcast %struct.s3* %p3 to i8* @@ -92,34 +88,34 @@ entry: store i32 %z6, i32* %z6.addr, align 4 store i32 %z7, i32* %z7.addr, align 4 store i32 %z8, i32* %z8.addr, align 4 - %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0 - %0 = load i8* %a, align 1 + %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0 + %0 = load i8, i8* %a, align 1 %conv = zext i8 %0 to i32 - %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0 - %1 = load i16* %a1, align 2 + %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0 + %1 = load i16, i16* %a1, align 2 %conv2 = sext i16 %1 to i32 %add = add nsw i32 %conv, %conv2 - %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0 - %2 = load i16* %a3, align 2 + %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0 + %2 = load i16, i16* %a3, align 2 %conv4 = sext i16 %2 to i32 %add5 = add nsw i32 %add, %conv4 - %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0 - %3 = load i32* %a6, align 4 + %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0 + %3 = load i32, i32* %a6, align 4 %add7 = add nsw i32 %add5, %3 - %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0 - %4 = load i32* %a8, align 4 + %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0 + %4 = load i32, i32* %a8, align 4 %add9 = add nsw i32 %add7, %4 - %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0 - %5 = load i32* %a10, align 4 + %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0 + %5 = load i32, i32* %a10, align 4 %add11 = add nsw i32 %add9, %5 - %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0 - %6 = load i32* %a12, align 4 + %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0 + %6 = load i32, i32* %a12, align 4 %add13 = add nsw i32 %add11, %6 ret i32 %add13 ; CHECK: lha {{[0-9]+}}, 126(1) -; CHECK: lbz {{[0-9]+}}, 119(1) ; CHECK: lha {{[0-9]+}}, 132(1) +; CHECK: lbz {{[0-9]+}}, 119(1) ; CHECK: lwz {{[0-9]+}}, 140(1) ; CHECK: lwz {{[0-9]+}}, 144(1) ; CHECK: lwz {{[0-9]+}}, 152(1) @@ -136,7 +132,7 @@ entry: %p6 = alloca %struct.t6, align 1 %p7 = alloca %struct.t7, align 1 %0 = bitcast %struct.t1* %p1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false) %1 = bitcast %struct.t2* %p2 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false) %2 = bitcast %struct.t3* %p3 to i8* @@ -184,44 +180,36 @@ entry: store i32 %z6, i32* %z6.addr, align 4 store i32 %z7, i32* %z7.addr, align 4 store i32 %z8, i32* %z8.addr, align 4 - %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0 - %0 = load i8* %a, align 1 + %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0 + %0 = load i8, i8* %a, align 1 %conv = zext i8 %0 to i32 - %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0 - %1 = load i16* %a1, align 1 + %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0 + %1 = load i16, i16* %a1, align 1 %conv2 = sext i16 %1 to i32 %add = add nsw i32 %conv, %conv2 - %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0 - %2 = load i16* %a3, align 1 + %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0 + %2 = load i16, i16* %a3, align 1 %conv4 = sext i16 %2 to i32 %add5 = add nsw i32 %add, %conv4 - %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0 - %3 = load i32* %a6, align 1 + %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0 + %3 = load i32, i32* %a6, align 1 %add7 = add nsw i32 %add5, %3 - %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0 - %4 = load i32* %a8, align 1 + %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0 + %4 = load i32, i32* %a8, align 1 %add9 = add nsw i32 %add7, %4 - %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0 - %5 = load i32* %a10, align 1 + %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0 + %5 = load i32, i32* %a10, align 1 %add11 = add nsw i32 %add9, %5 - %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0 - %6 = load i32* %a12, align 1 + %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0 + %6 = load i32, i32* %a12, align 1 %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: lbz {{[0-9]+}}, 149(1) -; CHECK: lbz {{[0-9]+}}, 150(1) -; CHECK: lbz {{[0-9]+}}, 147(1) -; CHECK: lbz {{[0-9]+}}, 148(1) -; CHECK: lbz {{[0-9]+}}, 133(1) -; CHECK: lbz {{[0-9]+}}, 134(1) ; CHECK: lha {{[0-9]+}}, 126(1) +; CHECK: lha {{[0-9]+}}, 133(1) ; CHECK: lbz {{[0-9]+}}, 119(1) ; CHECK: lwz {{[0-9]+}}, 140(1) -; CHECK: lhz {{[0-9]+}}, 154(1) -; CHECK: lhz {{[0-9]+}}, 156(1) -; CHECK: lbz {{[0-9]+}}, 163(1) -; CHECK: lbz {{[0-9]+}}, 164(1) -; CHECK: lbz {{[0-9]+}}, 161(1) -; CHECK: lbz {{[0-9]+}}, 162(1) +; CHECK: lwz {{[0-9]+}}, 147(1) +; CHECK: lwz {{[0-9]+}}, 154(1) +; CHECK: lwz {{[0-9]+}}, 161(1) }