}
}
+// The stack limit in the TCB is set to this many bytes above the actual stack
+// limit.
+static const uint64_t kSplitStackAvailable = 256;
+
void
X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MachineBasicBlock &prologueMBB = MF.front();
TlsReg = X86::FS;
TlsOffset = 0x70;
- BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
- .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+ if (StackSize < kSplitStackAvailable)
+ ScratchReg = X86::RSP;
+ else
+ BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
+ .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+
BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
} else {
TlsReg = X86::GS;
TlsOffset = 0x30;
- BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
- .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+ if (StackSize < kSplitStackAvailable)
+ ScratchReg = X86::ESP;
+ else
+ BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
+ .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+
BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
}
; X32: test_basic:
-; X32: leal -12(%esp), %ecx
-; X32-NEXT: cmpl %gs:48, %ecx
+; X32: cmpl %gs:48, %esp
; X32: pushl $4
; X32-NEXT: pushl $12
; X64: test_basic:
-; X64: leaq -24(%rsp), %r11
-; X64-NEXT: cmpq %fs:112, %r11
+; X64: cmpq %fs:112, %rsp
; X64: movabsq $24, %r10
; X64-NEXT: movabsq $0, %r11
%result = add i32 %other, %addend
ret i32 %result
-; X32: leal (%esp), %edx
-; X32-NEXT: cmpl %gs:48, %edx
-
+; X32: cmpl %gs:48, %esp
; X32: pushl $4
; X32-NEXT: pushl $0
; X32-NEXT: calll __morestack
; X32-NEXT: ret
-; X64: leaq (%rsp), %r11
-; X64-NEXT: cmpq %fs:112, %r11
+; X64: cmpq %fs:112, %rsp
; X64: movq %r10, %rax
; X64-NEXT: movabsq $0, %r10
; X64-NEXT: movq %rax, %r10
}
+
+define void @test_large() {
+ %mem = alloca i32, i32 10000
+ call void @dummy_use (i32* %mem, i32 0)
+ ret void
+
+; X32: leal -40012(%esp), %ecx
+; X32-NEXT: cmpl %gs:48, %ecx
+
+; X32: pushl $0
+; X32-NEXT: pushl $40012
+; X32-NEXT: calll __morestack
+; X32-NEXT: ret
+
+; X64: leaq -40008(%rsp), %r11
+; X64-NEXT: cmpq %fs:112, %r11
+
+; X64: movabsq $40008, %r10
+; X64-NEXT: movabsq $0, %r11
+; X64-NEXT: callq __morestack
+; X64-NEXT: ret
+
+}