1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineModuleInfo.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/Target/TargetFrameLowering.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/ADT/BitVector.h"
38 #include "llvm/ADT/STLExtras.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/CommandLine.h"
42 #define GET_REGINFO_TARGET_DESC
43 #include "X86GenRegisterInfo.inc"
48 ForceStackAlign("force-align-stack",
49 cl::desc("Force align the stack to the minimum alignment"
50 " needed for the function."),
51 cl::init(false), cl::Hidden);
53 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
54 const TargetInstrInfo &tii)
55 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit()
56 ? X86::RIP : X86::EIP,
57 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
58 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true)),
60 X86_MC::InitLLVM2SEHRegisterMapping(this);
62 // Cache some information.
63 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
64 Is64Bit = Subtarget->is64Bit();
65 IsWin64 = Subtarget->isTargetWin64();
78 /// getCompactUnwindRegNum - This function maps the register to the number for
79 /// compact unwind encoding. Return -1 if the register isn't valid.
80 int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
81 switch (getLLVMRegNum(RegNum, isEH)) {
82 case X86::EBX: case X86::RBX: return 1;
83 case X86::ECX: case X86::R12: return 2;
84 case X86::EDX: case X86::R13: return 3;
85 case X86::EDI: case X86::R14: return 4;
86 case X86::ESI: case X86::R15: return 5;
87 case X86::EBP: case X86::RBP: return 6;
94 X86RegisterInfo::getSEHRegNum(unsigned i) const {
95 int reg = X86_MC::getX86RegNum(i);
97 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
98 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
99 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
100 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
101 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
102 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
103 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
104 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
105 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
106 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
107 case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
108 case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
114 const TargetRegisterClass *
115 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
116 unsigned Idx) const {
117 // The sub_8bit sub-register index is more constrained in 32-bit mode.
118 // It behaves just like the sub_8bit_hi index.
119 if (!Is64Bit && Idx == X86::sub_8bit)
120 Idx = X86::sub_8bit_hi;
122 // Forward to TableGen's default version.
123 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
126 const TargetRegisterClass *
127 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
128 const TargetRegisterClass *B,
129 unsigned SubIdx) const {
133 if (B == &X86::GR8RegClass) {
134 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8)
136 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) {
137 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
138 A == &X86::GR64_NOREXRegClass ||
139 A == &X86::GR64_NOSPRegClass ||
140 A == &X86::GR64_NOREX_NOSPRegClass)
141 return &X86::GR64_ABCDRegClass;
142 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
143 A == &X86::GR32_NOREXRegClass ||
144 A == &X86::GR32_NOSPRegClass)
145 return &X86::GR32_ABCDRegClass;
146 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
147 A == &X86::GR16_NOREXRegClass)
148 return &X86::GR16_ABCDRegClass;
149 } else if (B == &X86::GR8_NOREXRegClass) {
150 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
151 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
152 return &X86::GR64_NOREXRegClass;
153 else if (A == &X86::GR64_ABCDRegClass)
154 return &X86::GR64_ABCDRegClass;
155 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
156 A == &X86::GR32_NOSPRegClass)
157 return &X86::GR32_NOREXRegClass;
158 else if (A == &X86::GR32_ABCDRegClass)
159 return &X86::GR32_ABCDRegClass;
160 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass)
161 return &X86::GR16_NOREXRegClass;
162 else if (A == &X86::GR16_ABCDRegClass)
163 return &X86::GR16_ABCDRegClass;
166 case X86::sub_8bit_hi:
167 if (B->hasSubClassEq(&X86::GR8_ABCD_HRegClass))
168 switch (A->getSize()) {
169 case 2: return getCommonSubClass(A, &X86::GR16_ABCDRegClass);
170 case 4: return getCommonSubClass(A, &X86::GR32_ABCDRegClass);
171 case 8: return getCommonSubClass(A, &X86::GR64_ABCDRegClass);
176 if (B == &X86::GR16RegClass) {
177 if (A->getSize() == 4 || A->getSize() == 8)
179 } else if (B == &X86::GR16_ABCDRegClass) {
180 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
181 A == &X86::GR64_NOREXRegClass ||
182 A == &X86::GR64_NOSPRegClass ||
183 A == &X86::GR64_NOREX_NOSPRegClass)
184 return &X86::GR64_ABCDRegClass;
185 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
186 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
187 return &X86::GR32_ABCDRegClass;
188 } else if (B == &X86::GR16_NOREXRegClass) {
189 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
190 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
191 return &X86::GR64_NOREXRegClass;
192 else if (A == &X86::GR64_ABCDRegClass)
193 return &X86::GR64_ABCDRegClass;
194 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
195 A == &X86::GR32_NOSPRegClass)
196 return &X86::GR32_NOREXRegClass;
197 else if (A == &X86::GR32_ABCDRegClass)
198 return &X86::GR64_ABCDRegClass;
202 if (B == &X86::GR32RegClass) {
203 if (A->getSize() == 8)
205 } else if (B == &X86::GR32_NOSPRegClass) {
206 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass)
207 return &X86::GR64_NOSPRegClass;
208 if (A->getSize() == 8)
209 return getCommonSubClass(A, &X86::GR64_NOSPRegClass);
210 } else if (B == &X86::GR32_ABCDRegClass) {
211 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
212 A == &X86::GR64_NOREXRegClass ||
213 A == &X86::GR64_NOSPRegClass ||
214 A == &X86::GR64_NOREX_NOSPRegClass)
215 return &X86::GR64_ABCDRegClass;
216 } else if (B == &X86::GR32_NOREXRegClass) {
217 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass)
218 return &X86::GR64_NOREXRegClass;
219 else if (A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
220 return &X86::GR64_NOREX_NOSPRegClass;
221 else if (A == &X86::GR64_ABCDRegClass)
222 return &X86::GR64_ABCDRegClass;
223 } else if (B == &X86::GR32_NOREX_NOSPRegClass) {
224 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
225 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
226 return &X86::GR64_NOREX_NOSPRegClass;
227 else if (A == &X86::GR64_ABCDRegClass)
228 return &X86::GR64_ABCDRegClass;
232 if (B == &X86::FR32RegClass)
236 if (B == &X86::FR64RegClass)
240 if (B == &X86::VR128RegClass)
247 const TargetRegisterClass*
248 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
249 const TargetRegisterClass *Super = RC;
250 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
252 switch (Super->getID()) {
253 case X86::GR8RegClassID:
254 case X86::GR16RegClassID:
255 case X86::GR32RegClassID:
256 case X86::GR64RegClassID:
257 case X86::FR32RegClassID:
258 case X86::FR64RegClassID:
259 case X86::RFP32RegClassID:
260 case X86::RFP64RegClassID:
261 case X86::RFP80RegClassID:
262 case X86::VR128RegClassID:
263 case X86::VR256RegClassID:
264 // Don't return a super-class that would shrink the spill size.
265 // That can happen with the vector and float classes.
266 if (Super->getSize() == RC->getSize())
274 const TargetRegisterClass *
275 X86RegisterInfo::getPointerRegClass(unsigned Kind) const {
277 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
278 case 0: // Normal GPRs.
279 if (TM.getSubtarget<X86Subtarget>().is64Bit())
280 return &X86::GR64RegClass;
281 return &X86::GR32RegClass;
282 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
283 if (TM.getSubtarget<X86Subtarget>().is64Bit())
284 return &X86::GR64_NOSPRegClass;
285 return &X86::GR32_NOSPRegClass;
286 case 2: // Available for tailcall (not callee-saved GPRs).
287 if (TM.getSubtarget<X86Subtarget>().isTargetWin64())
288 return &X86::GR64_TCW64RegClass;
289 if (TM.getSubtarget<X86Subtarget>().is64Bit())
290 return &X86::GR64_TCRegClass;
291 return &X86::GR32_TCRegClass;
295 const TargetRegisterClass *
296 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
297 if (RC == &X86::CCRRegClass) {
299 return &X86::GR64RegClass;
301 return &X86::GR32RegClass;
307 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
308 MachineFunction &MF) const {
309 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
311 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
312 switch (RC->getID()) {
315 case X86::GR32RegClassID:
317 case X86::GR64RegClassID:
319 case X86::VR128RegClassID:
320 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
321 case X86::VR64RegClassID:
327 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
328 bool callsEHReturn = false;
329 bool ghcCall = false;
332 callsEHReturn = MF->getMMI().callsEHReturn();
333 const Function *F = MF->getFunction();
334 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
337 static const unsigned GhcCalleeSavedRegs[] = {
341 static const unsigned CalleeSavedRegs32Bit[] = {
342 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
345 static const unsigned CalleeSavedRegs32EHRet[] = {
346 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
349 static const unsigned CalleeSavedRegs64Bit[] = {
350 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
353 static const unsigned CalleeSavedRegs64EHRet[] = {
354 X86::RAX, X86::RDX, X86::RBX, X86::R12,
355 X86::R13, X86::R14, X86::R15, X86::RBP, 0
358 static const unsigned CalleeSavedRegsWin64[] = {
359 X86::RBX, X86::RBP, X86::RDI, X86::RSI,
360 X86::R12, X86::R13, X86::R14, X86::R15,
361 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
362 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
363 X86::XMM14, X86::XMM15, 0
367 return GhcCalleeSavedRegs;
368 } else if (Is64Bit) {
370 return CalleeSavedRegsWin64;
372 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
374 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
378 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
379 BitVector Reserved(getNumRegs());
380 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
382 // Set the stack-pointer register and its aliases as reserved.
383 Reserved.set(X86::RSP);
384 Reserved.set(X86::ESP);
385 Reserved.set(X86::SP);
386 Reserved.set(X86::SPL);
388 // Set the instruction pointer register and its aliases as reserved.
389 Reserved.set(X86::RIP);
390 Reserved.set(X86::EIP);
391 Reserved.set(X86::IP);
393 // Set the frame-pointer register and its aliases as reserved if needed.
394 if (TFI->hasFP(MF)) {
395 Reserved.set(X86::RBP);
396 Reserved.set(X86::EBP);
397 Reserved.set(X86::BP);
398 Reserved.set(X86::BPL);
401 // Mark the segment registers as reserved.
402 Reserved.set(X86::CS);
403 Reserved.set(X86::SS);
404 Reserved.set(X86::DS);
405 Reserved.set(X86::ES);
406 Reserved.set(X86::FS);
407 Reserved.set(X86::GS);
409 // Reserve the registers that only exist in 64-bit mode.
411 // These 8-bit registers are part of the x86-64 extension even though their
412 // super-registers are old 32-bits.
413 Reserved.set(X86::SIL);
414 Reserved.set(X86::DIL);
415 Reserved.set(X86::BPL);
416 Reserved.set(X86::SPL);
418 for (unsigned n = 0; n != 8; ++n) {
420 const unsigned GPR64[] = {
421 X86::R8, X86::R9, X86::R10, X86::R11,
422 X86::R12, X86::R13, X86::R14, X86::R15
424 for (const unsigned *AI = getOverlaps(GPR64[n]); unsigned Reg = *AI; ++AI)
428 assert(X86::XMM15 == X86::XMM8+7);
429 for (const unsigned *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI;
438 //===----------------------------------------------------------------------===//
439 // Stack Frame Processing methods
440 //===----------------------------------------------------------------------===//
442 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
443 const MachineFrameInfo *MFI = MF.getFrameInfo();
444 return (RealignStack &&
445 !MFI->hasVarSizedObjects());
448 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
449 const MachineFrameInfo *MFI = MF.getFrameInfo();
450 const Function *F = MF.getFunction();
451 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
452 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
453 F->hasFnAttr(Attribute::StackAlignment));
455 // FIXME: Currently we don't support stack realignment for functions with
456 // variable-sized allocas.
457 // FIXME: It's more complicated than this...
458 if (0 && requiresRealignment && MFI->hasVarSizedObjects())
460 "Stack realignment in presence of dynamic allocas is not supported");
462 // If we've requested that we force align the stack do so now.
464 return canRealignStack(MF);
466 return requiresRealignment && canRealignStack(MF);
469 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
470 unsigned Reg, int &FrameIdx) const {
471 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
473 if (Reg == FramePtr && TFI->hasFP(MF)) {
474 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
480 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
483 return X86::SUB64ri8;
484 return X86::SUB64ri32;
487 return X86::SUB32ri8;
492 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
495 return X86::ADD64ri8;
496 return X86::ADD64ri32;
499 return X86::ADD32ri8;
504 void X86RegisterInfo::
505 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
506 MachineBasicBlock::iterator I) const {
507 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
508 bool reseveCallFrame = TFI->hasReservedCallFrame(MF);
509 int Opcode = I->getOpcode();
510 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
511 DebugLoc DL = I->getDebugLoc();
512 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
513 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
516 if (!reseveCallFrame) {
517 // If the stack pointer can be changed after prologue, turn the
518 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
519 // adjcallstackdown instruction into 'add ESP, <amt>'
520 // TODO: consider using push / pop instead of sub + store / add
524 // We need to keep the stack aligned properly. To do this, we round the
525 // amount of space needed for the outgoing arguments up to the next
526 // alignment boundary.
527 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
528 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
530 MachineInstr *New = 0;
531 if (Opcode == TII.getCallFrameSetupOpcode()) {
532 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)),
537 assert(Opcode == TII.getCallFrameDestroyOpcode());
539 // Factor out the amount the callee already popped.
543 unsigned Opc = getADDriOpcode(Is64Bit, Amount);
544 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
545 .addReg(StackPtr).addImm(Amount);
550 // The EFLAGS implicit def is dead.
551 New->getOperand(3).setIsDead();
553 // Replace the pseudo instruction with a new instruction.
560 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
561 // If we are performing frame pointer elimination and if the callee pops
562 // something off the stack pointer, add it back. We do this until we have
563 // more advanced stack pointer tracking ability.
564 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
565 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
566 .addReg(StackPtr).addImm(CalleeAmt);
568 // The EFLAGS implicit def is dead.
569 New->getOperand(3).setIsDead();
571 // We are not tracking the stack pointer adjustment by the callee, so make
572 // sure we restore the stack pointer immediately after the call, there may
573 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
574 MachineBasicBlock::iterator B = MBB.begin();
575 while (I != B && !llvm::prior(I)->getDesc().isCall())
582 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
583 int SPAdj, RegScavenger *RS) const{
584 assert(SPAdj == 0 && "Unexpected");
587 MachineInstr &MI = *II;
588 MachineFunction &MF = *MI.getParent()->getParent();
589 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
591 while (!MI.getOperand(i).isFI()) {
593 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
596 int FrameIndex = MI.getOperand(i).getIndex();
599 unsigned Opc = MI.getOpcode();
600 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
601 if (needsStackRealignment(MF))
602 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
606 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
608 // This must be part of a four operand memory reference. Replace the
609 // FrameIndex with base register with EBP. Add an offset to the offset.
610 MI.getOperand(i).ChangeToRegister(BasePtr, false);
612 // Now add the frame object offset to the offset from EBP.
615 // Tail call jmp happens after FP is popped.
616 const MachineFrameInfo *MFI = MF.getFrameInfo();
617 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
619 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
621 if (MI.getOperand(i+3).isImm()) {
622 // Offset is a 32-bit integer.
623 int Imm = (int)(MI.getOperand(i + 3).getImm());
624 int Offset = FIOffset + Imm;
625 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
626 "Requesting 64-bit offset in 32-bit immediate!");
627 MI.getOperand(i + 3).ChangeToImmediate(Offset);
629 // Offset is symbolic. This is extremely rare.
630 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
631 MI.getOperand(i+3).setOffset(Offset);
635 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
636 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
637 return TFI->hasFP(MF) ? FramePtr : StackPtr;
640 unsigned X86RegisterInfo::getEHExceptionRegister() const {
641 llvm_unreachable("What is the exception register");
645 unsigned X86RegisterInfo::getEHHandlerRegister() const {
646 llvm_unreachable("What is the exception handler register");
651 unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
652 switch (VT.getSimpleVT().SimpleTy) {
658 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
660 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
662 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
664 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
670 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
672 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
674 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
676 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
678 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
680 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
682 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
684 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
686 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
688 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
690 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
692 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
694 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
696 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
698 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
700 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
707 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
709 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
711 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
713 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
715 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
717 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
719 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
721 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
723 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
725 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
727 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
729 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
731 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
733 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
735 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
737 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
743 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
745 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
747 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
749 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
751 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
753 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
755 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
757 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
759 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
761 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
763 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
765 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
767 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
769 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
771 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
773 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
779 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
781 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
783 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
785 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
787 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
789 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
791 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
793 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
795 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
797 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
799 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
801 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
803 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
805 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
807 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
809 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
819 struct MSAH : public MachineFunctionPass {
821 MSAH() : MachineFunctionPass(ID) {}
823 virtual bool runOnMachineFunction(MachineFunction &MF) {
824 const X86TargetMachine *TM =
825 static_cast<const X86TargetMachine *>(&MF.getTarget());
826 const TargetFrameLowering *TFI = TM->getFrameLowering();
827 MachineRegisterInfo &RI = MF.getRegInfo();
828 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
829 unsigned StackAlignment = TFI->getStackAlignment();
831 // Be over-conservative: scan over all vreg defs and find whether vector
832 // registers are used. If yes, there is a possibility that vector register
833 // will be spilled and thus require dynamic stack realignment.
834 for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) {
835 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
836 if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) {
837 FuncInfo->setForceFramePointer(true);
845 virtual const char *getPassName() const {
846 return "X86 Maximal Stack Alignment Check";
849 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
850 AU.setPreservesCFG();
851 MachineFunctionPass::getAnalysisUsage(AU);
859 llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }