X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=test%2FCodeGen%2FX86%2Fstackmap.ll;h=0805e814704406c52d66dc14d51c3ce6c41ac40b;hb=32b845d223393d9f5e7317b9e754a52b79543de2;hp=cfd0c6e884512be93f282cfbbd056968f4bc8c17;hpb=a6ace00520a9df552ecfec6557f830ef805bea37;p=oota-llvm.git diff --git a/test/CodeGen/X86/stackmap.ll b/test/CodeGen/X86/stackmap.ll index cfd0c6e8845..0805e814704 100644 --- a/test/CodeGen/X86/stackmap.ll +++ b/test/CodeGen/X86/stackmap.ll @@ -1,50 +1,60 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck %s ; ; Note: Print verbose stackmaps using -debug-only=stackmaps. ; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps ; CHECK-NEXT: __LLVM_StackMaps: -; CHECK-NEXT: .long 0 +; Header +; CHECK-NEXT: .byte 1 +; CHECK-NEXT: .byte 0 +; CHECK-NEXT: .short 0 ; Num Functions -; CHECK-NEXT: .long 15 -; CHECK-NEXT: .long _constantargs -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _osrinline -; CHECK-NEXT: .long 24 -; CHECK-NEXT: .long _osrcold -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _propertyRead -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _propertyWrite -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _jsVoidCall -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _jsIntCall -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _spilledValue -; CHECK-NEXT: .long 56 -; CHECK-NEXT: .long _spilledStackMapValue -; CHECK-NEXT: .long 56 -; CHECK-NEXT: .long _spillSubReg -; CHECK-NEXT: .long 56 -; CHECK-NEXT: .long _subRegOffset -; CHECK-NEXT: .long 56 -; CHECK-NEXT: .long _liveConstant -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _directFrameIdx -; CHECK-NEXT: .long 56 -; CHECK-NEXT: .long _longid -; CHECK-NEXT: .long 8 -; CHECK-NEXT: .long _clobberScratch -; CHECK-NEXT: .long 56 +; CHECK-NEXT: .long 16 ; Num LargeConstants -; CHECK-NEXT: .long 3 +; CHECK-NEXT: .long 3 +; Num Callsites +; CHECK-NEXT: .long 20 + +; Functions and stack size +; CHECK-NEXT: .quad _constantargs +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _osrinline +; CHECK-NEXT: .quad 24 +; CHECK-NEXT: .quad _osrcold +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _propertyRead +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _propertyWrite +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _jsVoidCall +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _jsIntCall +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _spilledValue +; CHECK-NEXT: .quad 56 +; CHECK-NEXT: .quad _spilledStackMapValue +; CHECK-NEXT: .quad 56 +; CHECK-NEXT: .quad _spillSubReg +; CHECK-NEXT: .quad 56 +; CHECK-NEXT: .quad _subRegOffset +; CHECK-NEXT: .quad 56 +; CHECK-NEXT: .quad _liveConstant +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _directFrameIdx +; CHECK-NEXT: .quad 56 +; CHECK-NEXT: .quad _longid +; CHECK-NEXT: .quad 8 +; CHECK-NEXT: .quad _clobberScratch +; CHECK-NEXT: .quad 56 +; CHECK-NEXT: .quad _needsStackRealignment +; CHECK-NEXT: .quad -1 + +; Large Constants ; CHECK-NEXT: .quad 2147483648 ; CHECK-NEXT: .quad 4294967295 ; CHECK-NEXT: .quad 4294967296 -; Num Callsites -; CHECK-NEXT: .long 19 +; Callsites ; Constant arguments ; ; CHECK-NEXT: .quad 1 @@ -115,7 +125,7 @@ define void @constantargs() { entry: %0 = inttoptr i64 12345 to i8* - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 15, i8* %0, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 15, i8* %0, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1) ret void } @@ -137,7 +147,7 @@ entry: ; Runtime void->void call. call void inttoptr (i64 -559038737 to void ()*)() ; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars. - call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b) + call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b) ret void } @@ -163,7 +173,7 @@ entry: cold: ; OSR patchpoint with 12-byte nop-slide and 2 live vars. %thunk = inttoptr i64 -559038737 to i8* - call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4, i32 15, i8* %thunk, i32 0, i64 %a, i64 %b) + call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 15, i8* %thunk, i32 0, i64 %a, i64 %b) unreachable ret: ret void @@ -184,7 +194,7 @@ ret: define i64 @propertyRead(i64* %obj) { entry: %resolveRead = inttoptr i64 -559038737 to i8* - %result = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 15, i8* %resolveRead, i32 1, i64* %obj) + %result = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 15, i8* %resolveRead, i32 1, i64* %obj) %add = add i64 %result, 3 ret i64 %add } @@ -204,7 +214,7 @@ entry: define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) { entry: %resolveWrite = inttoptr i64 -559038737 to i8* - call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 15, i8* %resolveWrite, i32 2, i64* %obj, i64 %a) + call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 15, i8* %resolveWrite, i32 2, i64* %obj, i64 %a) ret void } @@ -226,7 +236,7 @@ entry: define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) { entry: %resolveCall = inttoptr i64 -559038737 to i8* - call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 7, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) + call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) ret void } @@ -248,7 +258,7 @@ entry: define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) { entry: %resolveCall = inttoptr i64 -559038737 to i8* - %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 8, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) + %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) %add = add i64 %result, 3 ret i64 %add } @@ -268,7 +278,7 @@ entry: ; CHECK-NEXT: .short 6 define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) { entry: - call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 11, i32 15, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) + call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 15, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) ret void } @@ -287,7 +297,7 @@ entry: ; CHECK-NEXT: .short 6 define webkit_jscc void @spilledStackMapValue(i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) { entry: - call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 15, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) + call void (i64, i32, ...) @llvm.experimental.stackmap(i64 12, i32 15, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) ret void } @@ -311,7 +321,7 @@ bb1: unreachable bb2: - %tmp = load i64* inttoptr (i64 140685446136880 to i64*) + %tmp = load i64, i64* inttoptr (i64 140685446136880 to i64*) br i1 undef, label %bb16, label %bb17 bb16: @@ -323,7 +333,7 @@ bb17: bb60: tail call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() nounwind - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 13, i32 5, i32 %tmp32) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 13, i32 5, i32 %tmp32) unreachable bb61: @@ -357,7 +367,7 @@ define void @subRegOffset(i16 %arg) { %arghi = lshr i16 %v, 8 %a1 = trunc i16 %arghi to i8 tail call void asm sideeffect "nop", "~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() nounwind - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 5, i8 %a0, i8 %a1) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 5, i8 %a0, i8 %a1) ret void } @@ -374,7 +384,7 @@ define void @subRegOffset(i16 %arg) { ; CHECK-NEXT: .long 33 define void @liveConstant() { - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 5, i32 33) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 15, i32 5, i32 33) ret void } @@ -412,10 +422,10 @@ entry: store i64 11, i64* %metadata1 store i64 12, i64* %metadata1 store i64 13, i64* %metadata1 - call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1) + call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1) %metadata2 = alloca i8, i32 4, align 8 %metadata3 = alloca i16, i32 4, align 8 - call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 17, i32 5, i8* null, i32 0, i8* %metadata2, i16* %metadata3) + call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 5, i8* null, i32 0, i8* %metadata2, i16* %metadata3) ret void } @@ -431,10 +441,10 @@ entry: ; CHECK-LABEL: .long L{{.*}}-_longid define void @longid() { entry: - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, i8* null, i32 0) - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, i8* null, i32 0) - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, i8* null, i32 0) - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 -1, i32 0, i8* null, i32 0) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, i8* null, i32 0) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, i8* null, i32 0) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, i8* null, i32 0) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, i8* null, i32 0) ret void } @@ -452,9 +462,26 @@ entry: ; CHECK-NEXT: .long -{{[0-9]+}} define void @clobberScratch(i32 %a) { tail call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r12},~{r13},~{r14},~{r15}"() nounwind - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 8, i32 %a) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 8, i32 %a) + ret void +} + +; A stack frame which needs to be realigned at runtime (to meet alignment +; criteria for values on the stack) does not have a fixed frame size. +; CHECK-LABEL: .long L{{.*}}-_needsStackRealignment +; CHECK-NEXT: .short 0 +; 0 locations +; CHECK-NEXT: .short 0 +define void @needsStackRealignment() { + %val = alloca i64, i32 3, align 128 + tail call void (...) @escape_values(i64* %val) +; Note: Adding any non-constant to the stackmap would fail because we +; expected to be able to address off the frame pointer. In a realigned +; frame, we must use the stack pointer instead. This is a separate bug. + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 0) ret void } +declare void @escape_values(...) declare void @llvm.experimental.stackmap(i64, i32, ...) declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)