using namespace llvm;
-MipsSEInstrInfo::MipsSEInstrInfo(MipsTargetMachine &tm)
- : MipsInstrInfo(tm,
- tm.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J),
- RI(*tm.getSubtargetImpl()),
- IsN64(tm.getSubtarget<MipsSubtarget>().isABI_N64()) {}
+MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI)
+ : MipsInstrInfo(STI, STI.getRelocationModel() == Reloc::PIC_ ? Mips::B
+ : Mips::J),
+ RI() {}
const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
return RI;
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned MipsSEInstrInfo::
-isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
+unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
unsigned Opc = MI->getOpcode();
if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned MipsSEInstrInfo::
-isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
+unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
unsigned Opc = MI->getOpcode();
if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
unsigned Opc = 0, ZeroReg = 0;
- bool isMicroMips = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode();
+ bool isMicroMips = Subtarget.inMicroMipsMode();
if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg.
if (Mips::GPR32RegClass.contains(SrcReg)) {
if (isMicroMips)
Opc = Mips::MOVE16_MM;
else
- Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
+ Opc = Mips::OR, ZeroReg = Mips::ZERO;
} else if (Mips::CCRRegClass.contains(SrcReg))
Opc = Mips::CFC1;
else if (Mips::FGR32RegClass.contains(SrcReg))
Opc = Mips::FMOV_D64;
else if (Mips::GPR64RegClass.contains(DestReg)) { // Copy to CPU64 Reg.
if (Mips::GPR64RegClass.contains(SrcReg))
- Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64;
+ Opc = Mips::OR64, ZeroReg = Mips::ZERO_64;
else if (Mips::HI64RegClass.contains(SrcReg))
Opc = Mips::MFHI64, SrcReg = 0;
else if (Mips::LO64RegClass.contains(SrcReg))
const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
int64_t Offset) const {
DebugLoc DL;
- if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
unsigned Opc = 0;
Opc = Mips::ST_W;
else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
Opc = Mips::ST_D;
+ else if (Mips::LO32RegClass.hasSubClassEq(RC))
+ Opc = Mips::SW;
+ else if (Mips::LO64RegClass.hasSubClassEq(RC))
+ Opc = Mips::SD;
+ else if (Mips::HI32RegClass.hasSubClassEq(RC))
+ Opc = Mips::SW;
+ else if (Mips::HI64RegClass.hasSubClassEq(RC))
+ Opc = Mips::SD;
+
+ // Hi, Lo are normally caller save but they are callee save
+ // for interrupt handling.
+ const Function *Func = MBB.getParent()->getFunction();
+ if (Func->hasFnAttribute("interrupt")) {
+ if (Mips::HI32RegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Mips::MFHI), Mips::K0);
+ SrcReg = Mips::K0;
+ } else if (Mips::HI64RegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Mips::MFHI64), Mips::K0_64);
+ SrcReg = Mips::K0_64;
+ } else if (Mips::LO32RegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Mips::MFLO), Mips::K0);
+ SrcReg = Mips::K0;
+ } else if (Mips::LO64RegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Mips::MFLO64), Mips::K0_64);
+ SrcReg = Mips::K0_64;
+ }
+ }
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
unsigned Opc = 0;
+ const Function *Func = MBB.getParent()->getFunction();
+ bool ReqIndirectLoad = Func->hasFnAttribute("interrupt") &&
+ (DestReg == Mips::LO0 || DestReg == Mips::LO0_64 ||
+ DestReg == Mips::HI0 || DestReg == Mips::HI0_64);
+
if (Mips::GPR32RegClass.hasSubClassEq(RC))
Opc = Mips::LW;
else if (Mips::GPR64RegClass.hasSubClassEq(RC))
Opc = Mips::LD_W;
else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
Opc = Mips::LD_D;
+ else if (Mips::HI32RegClass.hasSubClassEq(RC))
+ Opc = Mips::LW;
+ else if (Mips::HI64RegClass.hasSubClassEq(RC))
+ Opc = Mips::LD;
+ else if (Mips::LO32RegClass.hasSubClassEq(RC))
+ Opc = Mips::LW;
+ else if (Mips::LO64RegClass.hasSubClassEq(RC))
+ Opc = Mips::LD;
assert(Opc && "Register class not handled!");
- BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset)
- .addMemOperand(MMO);
+
+ if (!ReqIndirectLoad)
+ BuildMI(MBB, I, DL, get(Opc), DestReg)
+ .addFrameIndex(FI)
+ .addImm(Offset)
+ .addMemOperand(MMO);
+ else {
+ // Load HI/LO through K0. Notably the DestReg is encoded into the
+ // instruction itself.
+ unsigned Reg = Mips::K0;
+ unsigned LdOp = Mips::MTLO;
+ if (DestReg == Mips::HI0)
+ LdOp = Mips::MTHI;
+
+ if (Subtarget.getABI().ArePtrs64bit()) {
+ Reg = Mips::K0_64;
+ if (DestReg == Mips::HI0_64)
+ LdOp = Mips::MTHI64;
+ else
+ LdOp = Mips::MTLO64;
+ }
+
+ BuildMI(MBB, I, DL, get(Opc), Reg)
+ .addFrameIndex(FI)
+ .addImm(Offset)
+ .addMemOperand(MMO);
+ BuildMI(MBB, I, DL, get(LdOp)).addReg(Reg);
+ }
}
bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
MachineBasicBlock &MBB = *MI->getParent();
- bool isMicroMips = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode();
+ bool isMicroMips = Subtarget.inMicroMipsMode();
unsigned Opc;
switch(MI->getDesc().getOpcode()) {
case Mips::RetRA:
expandRetRA(MBB, MI);
break;
+ case Mips::ERet:
+ expandERet(MBB, MI);
+ break;
case Mips::PseudoMFHI:
Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI;
expandPseudoMFHiLo(MBB, MI, Opc);
case Mips::BLEZ64: return Mips::BGTZ64;
case Mips::BC1T: return Mips::BC1F;
case Mips::BC1F: return Mips::BC1T;
+ case Mips::BEQZC_MM: return Mips::BNEZC_MM;
+ case Mips::BNEZC_MM: return Mips::BEQZC_MM;
}
}
void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
- DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
- unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
+ MipsABIInfo ABI = Subtarget.getABI();
+ DebugLoc DL;
+ unsigned ADDu = ABI.GetPtrAdduOp();
+ unsigned ADDiu = ABI.GetPtrAddiuOp();
+
+ if (Amount == 0)
+ return;
if (isInt<16>(Amount))// addi sp, sp, amount
BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount);
MachineBasicBlock::iterator II, DebugLoc DL,
unsigned *NewImm) const {
MipsAnalyzeImmediate AnalyzeImm;
- const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
+ const MipsSubtarget &STI = Subtarget;
MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
unsigned Size = STI.isABI_N64() ? 64 : 32;
unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 ||
Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B ||
- Opc == Mips::J) ?
+ Opc == Mips::J || Opc == Mips::BEQZC_MM || Opc == Mips::BNEZC_MM) ?
Opc : 0;
}
void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const auto &Subtarget = TM.getSubtarget<MipsSubtarget>();
-
if (Subtarget.isGP64bit())
BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn64))
.addReg(Mips::RA_64);
BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn)).addReg(Mips::RA);
}
+void MipsSEInstrInfo::expandERet(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ BuildMI(MBB, I, I->getDebugLoc(), get(Mips::ERET));
+}
+
std::pair<bool, bool>
MipsSEInstrInfo::compareOpndSize(unsigned Opc,
const MachineFunction &MF) const {
unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo;
unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
- if (SubIdx == Mips::sub_hi && FP64) {
- // FIXME: The .addReg(SrcReg, RegState::Implicit) is a white lie used to
+ // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2()));
+
+ // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg()));
+
+ if (SubIdx == Mips::sub_hi && Subtarget.hasMTHC1()) {
+ // FIXME: Strictly speaking MFHC1 only reads the top 32-bits however, we
+ // claim to read the whole 64-bits as part of a white lie used to
// temporarily work around a widespread bug in the -mfp64 support.
// The problem is that none of the 32-bit fpu ops mention the fact
// that they clobber the upper 32-bits of the 64-bit FPR. Fixing that
// We therefore pretend that it reads the bottom 32-bits to
// artificially create a dependency and prevent the scheduler
// changing the behaviour of the code.
- BuildMI(MBB, I, dl, get(Mips::MFHC1), DstReg).addReg(SubReg).addReg(
- SrcReg, RegState::Implicit);
+ BuildMI(MBB, I, dl, get(FP64 ? Mips::MFHC1_D64 : Mips::MFHC1_D32), DstReg)
+ .addReg(SrcReg);
} else
BuildMI(MBB, I, dl, get(Mips::MFC1), DstReg).addReg(SubReg);
}
const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1);
DebugLoc dl = I->getDebugLoc();
const TargetRegisterInfo &TRI = getRegisterInfo();
- bool HasMTHC1 = TM.getSubtarget<MipsSubtarget>().hasMips32r2() ||
- TM.getSubtarget<MipsSubtarget>().hasMips32r6();
// When mthc1 is available, use:
// mtc1 Lo, $fp
// mthc1 Hi, $fp
//
- // Otherwise, for FP64:
+ // Otherwise, for O32 FPXX ABI:
// spill + reload via ldc1
- // This has not been implemented since FP64 on MIPS32 and earlier is not
- // supported.
+ // This case is handled by the frame lowering code.
//
// Otherwise, for FP32:
// mtc1 Lo, $fp
// mtc1 Hi, $fp + 1
+ //
+ // The case where dmtc1 is available doesn't need to be handled here
+ // because it never creates a BuildPairF64 node.
+
+ // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2()));
+
+ // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg()));
BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_lo))
.addReg(LoReg);
- if (HasMTHC1 || FP64) {
- assert(TM.getSubtarget<MipsSubtarget>().hasMips32r2() &&
- "MTHC1 requires MIPS32r2");
-
+ if (Subtarget.hasMTHC1()) {
// FIXME: The .addReg(DstReg) is a white lie used to temporarily work
// around a widespread bug in the -mfp64 support.
// The problem is that none of the 32-bit fpu ops mention the fact
BuildMI(MBB, I, dl, get(FP64 ? Mips::MTHC1_D64 : Mips::MTHC1_D32), DstReg)
.addReg(DstReg)
.addReg(HiReg);
- } else
+ } else if (Subtarget.isABI_FPXX())
+ llvm_unreachable("BuildPairF64 not expanded in frame lowering code!");
+ else
BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_hi))
.addReg(HiReg);
}
// This pseudo instruction is generated as part of the lowering of
// ISD::EH_RETURN. We convert it to a stack increment by OffsetReg, and
// indirect jump to TargetReg
- const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
- unsigned ADDU = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned SP = STI.isGP64bit() ? Mips::SP_64 : Mips::SP;
- unsigned RA = STI.isGP64bit() ? Mips::RA_64 : Mips::RA;
- unsigned T9 = STI.isGP64bit() ? Mips::T9_64 : Mips::T9;
- unsigned ZERO = STI.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
+ MipsABIInfo ABI = Subtarget.getABI();
+ unsigned ADDU = ABI.GetPtrAdduOp();
+ unsigned SP = Subtarget.isGP64bit() ? Mips::SP_64 : Mips::SP;
+ unsigned RA = Subtarget.isGP64bit() ? Mips::RA_64 : Mips::RA;
+ unsigned T9 = Subtarget.isGP64bit() ? Mips::T9_64 : Mips::T9;
+ unsigned ZERO = Subtarget.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
unsigned OffsetReg = I->getOperand(0).getReg();
unsigned TargetReg = I->getOperand(1).getReg();
// addu $ra, $v0, $zero
// addu $sp, $sp, $v1
// jr $ra (via RetRA)
+ const TargetMachine &TM = MBB.getParent()->getTarget();
if (TM.getRelocationModel() == Reloc::PIC_)
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), T9)
- .addReg(TargetReg).addReg(ZERO);
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), RA)
- .addReg(TargetReg).addReg(ZERO);
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), SP)
- .addReg(SP).addReg(OffsetReg);
+ BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), T9)
+ .addReg(TargetReg)
+ .addReg(ZERO);
+ BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), RA)
+ .addReg(TargetReg)
+ .addReg(ZERO);
+ BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), SP).addReg(SP).addReg(OffsetReg);
expandRetRA(MBB, I);
}
-const MipsInstrInfo *llvm::createMipsSEInstrInfo(MipsTargetMachine &TM) {
- return new MipsSEInstrInfo(TM);
+const MipsInstrInfo *llvm::createMipsSEInstrInfo(const MipsSubtarget &STI) {
+ return new MipsSEInstrInfo(STI);
}