1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineFunctionPass.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Target/TargetFrameLowering.h"
38 #include "llvm/Target/TargetInstrInfo.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Target/TargetOptions.h"
42 #define GET_REGINFO_TARGET_DESC
43 #include "X86GenRegisterInfo.inc"
48 ForceStackAlign("force-align-stack",
49 cl::desc("Force align the stack to the minimum alignment"
50 " needed for the function."),
51 cl::init(false), cl::Hidden);
54 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
55 cl::desc("Enable use of a base pointer for complex stack frames"));
57 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm)
58 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
59 ? X86::RIP : X86::EIP),
60 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
61 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
62 (tm.getSubtarget<X86Subtarget>().is64Bit()
63 ? X86::RIP : X86::EIP)),
65 X86_MC::InitLLVM2SEHRegisterMapping(this);
67 // Cache some information.
68 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
69 Is64Bit = Subtarget->is64Bit();
70 IsWin64 = Subtarget->isTargetWin64();
81 // Use a callee-saved register as the base pointer. These registers must
82 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
83 // requires GOT in the EBX register before function calls via PLT GOT pointer.
84 BasePtr = Is64Bit ? X86::RBX : X86::ESI;
87 /// getCompactUnwindRegNum - This function maps the register to the number for
88 /// compact unwind encoding. Return -1 if the register isn't valid.
89 int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
90 switch (getLLVMRegNum(RegNum, isEH)) {
91 case X86::EBX: case X86::RBX: return 1;
92 case X86::ECX: case X86::R12: return 2;
93 case X86::EDX: case X86::R13: return 3;
94 case X86::EDI: case X86::R14: return 4;
95 case X86::ESI: case X86::R15: return 5;
96 case X86::EBP: case X86::RBP: return 6;
103 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
104 // Only enable when post-RA scheduling is enabled and this is needed.
105 return TM.getSubtargetImpl()->postRAScheduler();
109 X86RegisterInfo::getSEHRegNum(unsigned i) const {
110 return getEncodingValue(i);
113 const TargetRegisterClass *
114 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
115 unsigned Idx) const {
116 // The sub_8bit sub-register index is more constrained in 32-bit mode.
117 // It behaves just like the sub_8bit_hi index.
118 if (!Is64Bit && Idx == X86::sub_8bit)
119 Idx = X86::sub_8bit_hi;
121 // Forward to TableGen's default version.
122 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
125 const TargetRegisterClass *
126 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
127 const TargetRegisterClass *B,
128 unsigned SubIdx) const {
129 // The sub_8bit sub-register index is more constrained in 32-bit mode.
130 if (!Is64Bit && SubIdx == X86::sub_8bit) {
131 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
135 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
138 const TargetRegisterClass*
139 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
140 // Don't allow super-classes of GR8_NOREX. This class is only used after
141 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied
142 // to the full GR8 register class in 64-bit mode, so we cannot allow the
143 // reigster class inflation.
145 // The GR8_NOREX class is always used in a way that won't be constrained to a
146 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
148 if (RC == &X86::GR8_NOREXRegClass)
151 const TargetRegisterClass *Super = RC;
152 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
154 switch (Super->getID()) {
155 case X86::GR8RegClassID:
156 case X86::GR16RegClassID:
157 case X86::GR32RegClassID:
158 case X86::GR64RegClassID:
159 case X86::FR32RegClassID:
160 case X86::FR64RegClassID:
161 case X86::RFP32RegClassID:
162 case X86::RFP64RegClassID:
163 case X86::RFP80RegClassID:
164 case X86::VR128RegClassID:
165 case X86::VR256RegClassID:
166 // Don't return a super-class that would shrink the spill size.
167 // That can happen with the vector and float classes.
168 if (Super->getSize() == RC->getSize())
176 const TargetRegisterClass *
177 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
179 const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
181 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
182 case 0: // Normal GPRs.
183 if (Subtarget.isTarget64BitLP64())
184 return &X86::GR64RegClass;
185 return &X86::GR32RegClass;
186 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
187 if (Subtarget.isTarget64BitLP64())
188 return &X86::GR64_NOSPRegClass;
189 return &X86::GR32_NOSPRegClass;
190 case 2: // Available for tailcall (not callee-saved GPRs).
191 if (Subtarget.isTargetWin64())
192 return &X86::GR64_TCW64RegClass;
193 else if (Subtarget.is64Bit())
194 return &X86::GR64_TCRegClass;
196 const Function *F = MF.getFunction();
197 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
199 return &X86::GR32RegClass;
200 return &X86::GR32_TCRegClass;
204 const TargetRegisterClass *
205 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
206 if (RC == &X86::CCRRegClass) {
208 return &X86::GR64RegClass;
210 return &X86::GR32RegClass;
216 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
217 MachineFunction &MF) const {
218 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
220 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
221 switch (RC->getID()) {
224 case X86::GR32RegClassID:
226 case X86::GR64RegClassID:
228 case X86::VR128RegClassID:
229 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
230 case X86::VR64RegClassID:
236 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
237 switch (MF->getFunction()->getCallingConv()) {
238 case CallingConv::GHC:
239 case CallingConv::HiPE:
240 return CSR_NoRegs_SaveList;
242 case CallingConv::Intel_OCL_BI: {
243 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
244 if (HasAVX && IsWin64)
245 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
246 if (HasAVX && Is64Bit)
247 return CSR_64_Intel_OCL_BI_AVX_SaveList;
248 if (!HasAVX && !IsWin64 && Is64Bit)
249 return CSR_64_Intel_OCL_BI_SaveList;
253 case CallingConv::Cold:
255 return CSR_MostRegs_64_SaveList;
262 bool CallsEHReturn = MF->getMMI().callsEHReturn();
265 return CSR_Win64_SaveList;
267 return CSR_64EHRet_SaveList;
268 return CSR_64_SaveList;
271 return CSR_32EHRet_SaveList;
272 return CSR_32_SaveList;
276 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
277 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
279 if (CC == CallingConv::Intel_OCL_BI) {
280 if (IsWin64 && HasAVX)
281 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
282 if (Is64Bit && HasAVX)
283 return CSR_64_Intel_OCL_BI_AVX_RegMask;
284 if (!HasAVX && !IsWin64 && Is64Bit)
285 return CSR_64_Intel_OCL_BI_RegMask;
287 if (CC == CallingConv::GHC || CC == CallingConv::HiPE)
288 return CSR_NoRegs_RegMask;
290 return CSR_32_RegMask;
291 if (CC == CallingConv::Cold)
292 return CSR_MostRegs_64_RegMask;
294 return CSR_Win64_RegMask;
295 return CSR_64_RegMask;
299 X86RegisterInfo::getNoPreservedMask() const {
300 return CSR_NoRegs_RegMask;
303 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
304 BitVector Reserved(getNumRegs());
305 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
307 // Set the stack-pointer register and its aliases as reserved.
308 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
312 // Set the instruction pointer register and its aliases as reserved.
313 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
317 // Set the frame-pointer register and its aliases as reserved if needed.
318 if (TFI->hasFP(MF)) {
319 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
324 // Set the base-pointer register and its aliases as reserved if needed.
325 if (hasBasePointer(MF)) {
326 CallingConv::ID CC = MF.getFunction()->getCallingConv();
327 const uint32_t* RegMask = getCallPreservedMask(CC);
328 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
330 "Stack realignment in presence of dynamic allocas is not supported with"
331 "this calling convention.");
333 for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true);
338 // Mark the segment registers as reserved.
339 Reserved.set(X86::CS);
340 Reserved.set(X86::SS);
341 Reserved.set(X86::DS);
342 Reserved.set(X86::ES);
343 Reserved.set(X86::FS);
344 Reserved.set(X86::GS);
346 // Mark the floating point stack registers as reserved.
347 Reserved.set(X86::ST0);
348 Reserved.set(X86::ST1);
349 Reserved.set(X86::ST2);
350 Reserved.set(X86::ST3);
351 Reserved.set(X86::ST4);
352 Reserved.set(X86::ST5);
353 Reserved.set(X86::ST6);
354 Reserved.set(X86::ST7);
356 // Reserve the registers that only exist in 64-bit mode.
358 // These 8-bit registers are part of the x86-64 extension even though their
359 // super-registers are old 32-bits.
360 Reserved.set(X86::SIL);
361 Reserved.set(X86::DIL);
362 Reserved.set(X86::BPL);
363 Reserved.set(X86::SPL);
365 for (unsigned n = 0; n != 8; ++n) {
367 static const uint16_t GPR64[] = {
368 X86::R8, X86::R9, X86::R10, X86::R11,
369 X86::R12, X86::R13, X86::R14, X86::R15
371 for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
375 static const uint16_t XMMReg[] = {
376 X86::XMM8, X86::XMM9, X86::XMM10, X86::XMM11,
377 X86::XMM12, X86::XMM13, X86::XMM14, X86::XMM15
379 for (MCRegAliasIterator AI(XMMReg[n], this, true); AI.isValid(); ++AI)
387 //===----------------------------------------------------------------------===//
388 // Stack Frame Processing methods
389 //===----------------------------------------------------------------------===//
391 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
392 const MachineFrameInfo *MFI = MF.getFrameInfo();
394 if (!EnableBasePointer)
397 // When we need stack realignment and there are dynamic allocas, we can't
398 // reference off of the stack pointer, so we reserve a base pointer.
400 // This is also true if the function contain MS-style inline assembly. We
401 // do this because if any stack changes occur in the inline assembly, e.g.,
402 // "pusha", then any C local variable or C argument references in the
403 // inline assembly will be wrong because the SP is not properly tracked.
404 if ((needsStackRealignment(MF) && MFI->hasVarSizedObjects()) ||
411 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
412 const MachineFrameInfo *MFI = MF.getFrameInfo();
413 const MachineRegisterInfo *MRI = &MF.getRegInfo();
414 if (!MF.getTarget().Options.RealignStack)
417 // Stack realignment requires a frame pointer. If we already started
418 // register allocation with frame pointer elimination, it is too late now.
419 if (!MRI->canReserveReg(FramePtr))
422 // If a base pointer is necessary. Check that it isn't too late to reserve
424 if (MFI->hasVarSizedObjects())
425 return MRI->canReserveReg(BasePtr);
429 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
430 const MachineFrameInfo *MFI = MF.getFrameInfo();
431 const Function *F = MF.getFunction();
432 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
433 bool requiresRealignment =
434 ((MFI->getMaxAlignment() > StackAlign) ||
435 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
436 Attribute::StackAlignment));
438 // If we've requested that we force align the stack do so now.
440 return canRealignStack(MF);
442 return requiresRealignment && canRealignStack(MF);
445 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
446 unsigned Reg, int &FrameIdx) const {
447 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
449 if (Reg == FramePtr && TFI->hasFP(MF)) {
450 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
457 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
458 int SPAdj, unsigned FIOperandNum,
459 RegScavenger *RS) const {
460 assert(SPAdj == 0 && "Unexpected");
462 MachineInstr &MI = *II;
463 MachineFunction &MF = *MI.getParent()->getParent();
464 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
465 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
468 unsigned Opc = MI.getOpcode();
469 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
470 if (hasBasePointer(MF))
471 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
472 else if (needsStackRealignment(MF))
473 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
477 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
479 // This must be part of a four operand memory reference. Replace the
480 // FrameIndex with base register with EBP. Add an offset to the offset.
481 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
483 // Now add the frame object offset to the offset from EBP.
486 // Tail call jmp happens after FP is popped.
487 const MachineFrameInfo *MFI = MF.getFrameInfo();
488 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
490 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
492 if (MI.getOperand(FIOperandNum+3).isImm()) {
493 // Offset is a 32-bit integer.
494 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
495 int Offset = FIOffset + Imm;
496 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
497 "Requesting 64-bit offset in 32-bit immediate!");
498 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
500 // Offset is symbolic. This is extremely rare.
501 uint64_t Offset = FIOffset +
502 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
503 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
507 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
508 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
509 return TFI->hasFP(MF) ? FramePtr : StackPtr;
512 unsigned X86RegisterInfo::getEHExceptionRegister() const {
513 llvm_unreachable("What is the exception register");
516 unsigned X86RegisterInfo::getEHHandlerRegister() const {
517 llvm_unreachable("What is the exception handler register");
521 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
524 default: llvm_unreachable("Unexpected VT");
528 default: return getX86SubSuperRegister(Reg, MVT::i64);
529 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
531 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
533 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
535 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
537 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
539 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
541 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
543 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
548 default: llvm_unreachable("Unexpected register");
549 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
551 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
553 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
555 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
557 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
559 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
561 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
563 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
565 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
567 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
569 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
571 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
573 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
575 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
577 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
579 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
585 default: llvm_unreachable("Unexpected register");
586 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
588 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
590 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
592 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
594 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
596 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
598 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
600 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
602 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
604 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
606 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
608 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
610 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
612 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
614 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
616 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
621 default: llvm_unreachable("Unexpected register");
622 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
624 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
626 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
628 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
630 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
632 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
634 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
636 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
638 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
640 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
642 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
644 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
646 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
648 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
650 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
652 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
657 default: llvm_unreachable("Unexpected register");
658 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
660 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
662 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
664 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
666 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
668 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
670 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
672 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
674 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
676 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
678 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
680 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
682 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
684 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
686 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
688 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: