X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FMips%2FAsmParser%2FMipsAsmParser.cpp;h=aea022a336f45122122e87af32f9e00d9d11e402;hb=f5ed626e96b8c0f0abe986e7730dfb95c5edf66b;hp=8f7968939cb24979e0fc4198b6a2a3ae69531d7e;hpb=b0f7871d4ef50a48064842b7b43a0a4cdf5d3f43;p=oota-llvm.git diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index 8f7968939cb..aea022a336f 100644 --- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -11,6 +11,7 @@ #include "MCTargetDesc/MipsMCExpr.h" #include "MCTargetDesc/MipsMCTargetDesc.h" #include "MipsRegisterInfo.h" +#include "MipsTargetObjectFile.h" #include "MipsTargetStreamer.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/SmallVector.h" @@ -114,6 +115,12 @@ class MipsAsmParser : public MCTargetAsmParser { // selected. This usually happens after an '.end func' // directive. bool IsLittleEndian; + bool IsPicEnabled; + bool IsCpRestoreSet; + int CpRestoreOffset; + unsigned CpSaveLocation; + /// If true, then CpSaveLocation is a register, otherwise it's an offset. + bool CpSaveLocationIsRegister; // Print a warning along with its fix-it message at the given range. void printWarningWithFixIt(const Twine &Msg, const Twine &FixMsg, @@ -141,50 +148,41 @@ class MipsAsmParser : public MCTargetAsmParser { bool ParseDirective(AsmToken DirectiveID) override; - MipsAsmParser::OperandMatchResultTy parseMemOperand(OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy + OperandMatchResultTy parseMemOperand(OperandVector &Operands); + OperandMatchResultTy matchAnyRegisterNameWithoutDollar(OperandVector &Operands, StringRef Identifier, SMLoc S); - - MipsAsmParser::OperandMatchResultTy - matchAnyRegisterWithoutDollar(OperandVector &Operands, SMLoc S); - - MipsAsmParser::OperandMatchResultTy parseAnyRegister(OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy parseImm(OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy parseJumpTarget(OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy parseInvNum(OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy parseLSAImm(OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy - parseRegisterPair (OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy - parseMovePRegPair(OperandVector &Operands); - - MipsAsmParser::OperandMatchResultTy - parseRegisterList (OperandVector &Operands); + OperandMatchResultTy matchAnyRegisterWithoutDollar(OperandVector &Operands, + SMLoc S); + OperandMatchResultTy parseAnyRegister(OperandVector &Operands); + OperandMatchResultTy parseImm(OperandVector &Operands); + OperandMatchResultTy parseJumpTarget(OperandVector &Operands); + OperandMatchResultTy parseInvNum(OperandVector &Operands); + OperandMatchResultTy parseLSAImm(OperandVector &Operands); + OperandMatchResultTy parseRegisterPair(OperandVector &Operands); + OperandMatchResultTy parseMovePRegPair(OperandVector &Operands); + OperandMatchResultTy parseRegisterList(OperandVector &Operands); bool searchSymbolAlias(OperandVector &Operands); bool parseOperand(OperandVector &, StringRef Mnemonic); - bool needsExpansion(MCInst &Inst); + enum MacroExpanderResultTy { + MER_NotAMacro, + MER_Success, + MER_Fail, + }; // Expands assembly pseudo instructions. - // Returns false on success, true otherwise. - bool expandInstruction(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl &Instructions); + MacroExpanderResultTy + tryExpandInstruction(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions); bool expandJalWithRegs(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions); bool loadImmediate(int64_t ImmValue, unsigned DstReg, unsigned SrcReg, - bool Is32BitImm, SMLoc IDLoc, + bool Is32BitImm, bool IsAddress, SMLoc IDLoc, SmallVectorImpl &Instructions); bool loadAndAddSymbolAddress(const MCExpr *SymExpr, unsigned DstReg, @@ -194,11 +192,10 @@ class MipsAsmParser : public MCTargetAsmParser { bool expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc, SmallVectorImpl &Instructions); - bool expandLoadAddressImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc, - SmallVectorImpl &Instructions); + bool expandLoadAddress(unsigned DstReg, unsigned BaseReg, + const MCOperand &Offset, bool Is32BitAddress, + SMLoc IDLoc, SmallVectorImpl &Instructions); - bool expandLoadAddressReg(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc, - SmallVectorImpl &Instructions); bool expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions); @@ -209,14 +206,21 @@ class MipsAsmParser : public MCTargetAsmParser { bool expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions); + bool expandAliasImmediate(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions); + bool expandBranchImm(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions); bool expandCondBranches(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions); - bool expandUlhu(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl &Instructions); + bool expandDiv(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions, const bool IsMips64, + const bool Signed); + + bool expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc, + SmallVectorImpl &Instructions); bool expandUlw(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions); @@ -227,6 +231,9 @@ class MipsAsmParser : public MCTargetAsmParser { void createAddu(unsigned DstReg, unsigned SrcReg, unsigned TrgReg, bool Is64Bit, SmallVectorImpl &Instructions); + void createCpRestoreMemOp(bool IsLoad, int StackOffset, SMLoc IDLoc, + SmallVectorImpl &Instructions); + bool reportParseError(Twine ErrorMsg); bool reportParseError(SMLoc Loc, Twine ErrorMsg); @@ -239,8 +246,11 @@ class MipsAsmParser : public MCTargetAsmParser { bool parseSetMips0Directive(); bool parseSetArchDirective(); bool parseSetFeature(uint64_t Feature); + bool isPicAndNotNxxAbi(); // Used by .cpload, .cprestore, and .cpsetup. bool parseDirectiveCpLoad(SMLoc Loc); + bool parseDirectiveCpRestore(SMLoc Loc); bool parseDirectiveCPSetup(); + bool parseDirectiveCPReturn(); bool parseDirectiveNaN(); bool parseDirectiveSet(); bool parseDirectiveOption(); @@ -361,18 +371,27 @@ class MipsAsmParser : public MCTargetAsmParser { } } + void setModuleFeatureBits(uint64_t Feature, StringRef FeatureString) { + setFeatureBits(Feature, FeatureString); + AssemblerOptions.front()->setFeatures(STI.getFeatureBits()); + } + + void clearModuleFeatureBits(uint64_t Feature, StringRef FeatureString) { + clearFeatureBits(Feature, FeatureString); + AssemblerOptions.front()->setFeatures(STI.getFeatureBits()); + } + public: enum MipsMatchResultTy { - Match_RequiresDifferentSrcAndDst = FIRST_TARGET_MATCH_RESULT_TY + Match_RequiresDifferentSrcAndDst = FIRST_TARGET_MATCH_RESULT_TY, #define GET_OPERAND_DIAGNOSTIC_TYPES #include "MipsGenAsmMatcher.inc" #undef GET_OPERAND_DIAGNOSTIC_TYPES - }; MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser, const MCInstrInfo &MII, const MCTargetOptions &Options) - : MCTargetAsmParser(), STI(sti), + : MCTargetAsmParser(Options), STI(sti), ABI(MipsABIInfo::computeTargetABI(Triple(sti.getTargetTriple()), sti.getCPU(), Options)) { MCAsmParserExtension::Initialize(parser); @@ -381,11 +400,11 @@ public: // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); - + // Remember the initial assembler options. The user can not modify these. AssemblerOptions.push_back( llvm::make_unique(STI.getFeatureBits())); - + // Create an assembler options environment for the user to modify. AssemblerOptions.push_back( llvm::make_unique(STI.getFeatureBits())); @@ -397,6 +416,12 @@ public: CurrentFn = nullptr; + IsPicEnabled = + (getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_); + + IsCpRestoreSet = false; + CpRestoreOffset = -1; + Triple TheTriple(sti.getTargetTriple()); if ((TheTriple.getArch() == Triple::mips) || (TheTriple.getArch() == Triple::mips64)) @@ -461,15 +486,24 @@ public: bool hasDSP() const { return STI.getFeatureBits()[Mips::FeatureDSP]; } bool hasDSPR2() const { return STI.getFeatureBits()[Mips::FeatureDSPR2]; } + bool hasDSPR3() const { return STI.getFeatureBits()[Mips::FeatureDSPR3]; } bool hasMSA() const { return STI.getFeatureBits()[Mips::FeatureMSA]; } bool hasCnMips() const { return (STI.getFeatureBits()[Mips::FeatureCnMips]); } + bool inPicMode() { + return IsPicEnabled; + } + bool inMips16Mode() const { return STI.getFeatureBits()[Mips::FeatureMips16]; } + bool useTraps() const { + return STI.getFeatureBits()[Mips::FeatureUseTCCInDIV]; + } + bool useSoftFloat() const { return STI.getFeatureBits()[Mips::FeatureSoftFloat]; } @@ -859,6 +893,15 @@ public: Inst.addOperand(MCOperand::createReg(getHWRegsReg())); } + template + void addConstantUImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + uint64_t Imm = getConstantImm() - Offset; + Imm &= (1 << Bits) - 1; + Imm += Offset; + Inst.addOperand(MCOperand::createImm(Imm)); + } + void addImmOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); const MCExpr *Expr = getImm(); @@ -868,7 +911,9 @@ public: void addMemOperands(MCInst &Inst, unsigned N) const { assert(N == 2 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createReg(getMemBase()->getGPR32Reg())); + Inst.addOperand(MCOperand::createReg(AsmParser.getABI().ArePtrs64bit() + ? getMemBase()->getGPR64Reg() + : getMemBase()->getGPR32Reg())); const MCExpr *Expr = getMemOff(); addExpr(Inst, Expr); @@ -916,6 +961,12 @@ public: bool isConstantImm() const { return isImm() && dyn_cast(getImm()); } + bool isConstantImmz() const { + return isConstantImm() && getConstantImm() == 0; + } + template bool isConstantUImm() const { + return isConstantImm() && isUInt(getConstantImm() - Offset); + } template bool isUImm() const { return isImm() && isConstantImm() && isUInt(getConstantImm()); } @@ -929,7 +980,12 @@ public: return isMem() && dyn_cast(getMemOff()); } template bool isMemWithSimmOffset() const { - return isMem() && isConstantMemOff() && isInt(getConstantMemOff()); + return isMem() && isConstantMemOff() && isInt(getConstantMemOff()) + && getMemBase()->isGPRAsmReg(); + } + template bool isMemWithSimmOffsetGPR() const { + return isMem() && isConstantMemOff() && isInt(getConstantMemOff()) && + getMemBase()->isGPRAsmReg(); } bool isMemWithGRPMM16Base() const { return isMem() && getMemBase()->isMM16AsmReg(); @@ -943,6 +999,9 @@ public: && (getConstantMemOff() % 4 == 0) && getMemBase()->isRegIdx() && (getMemBase()->getGPR32Reg() == Mips::SP); } + bool isUImm5Lsl2() const { + return (isImm() && isConstantImm() && isShiftedUInt<5, 2>(getConstantImm())); + } bool isRegList16() const { if (!isRegList()) return false; @@ -1294,9 +1353,123 @@ static bool hasShortDelaySlot(unsigned Opcode) { } } +static const MCSymbol *getSingleMCSymbol(const MCExpr *Expr) { + if (const MCSymbolRefExpr *SRExpr = dyn_cast(Expr)) { + return &SRExpr->getSymbol(); + } + + if (const MCBinaryExpr *BExpr = dyn_cast(Expr)) { + const MCSymbol *LHSSym = getSingleMCSymbol(BExpr->getLHS()); + const MCSymbol *RHSSym = getSingleMCSymbol(BExpr->getRHS()); + + if (LHSSym) + return LHSSym; + + if (RHSSym) + return RHSSym; + + return nullptr; + } + + if (const MCUnaryExpr *UExpr = dyn_cast(Expr)) + return getSingleMCSymbol(UExpr->getSubExpr()); + + return nullptr; +} + +static unsigned countMCSymbolRefExpr(const MCExpr *Expr) { + if (isa(Expr)) + return 1; + + if (const MCBinaryExpr *BExpr = dyn_cast(Expr)) + return countMCSymbolRefExpr(BExpr->getLHS()) + + countMCSymbolRefExpr(BExpr->getRHS()); + + if (const MCUnaryExpr *UExpr = dyn_cast(Expr)) + return countMCSymbolRefExpr(UExpr->getSubExpr()); + + return 0; +} + +namespace { +void emitRX(unsigned Opcode, unsigned Reg0, MCOperand Op1, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + MCInst tmpInst; + tmpInst.setOpcode(Opcode); + tmpInst.addOperand(MCOperand::createReg(Reg0)); + tmpInst.addOperand(Op1); + tmpInst.setLoc(IDLoc); + Instructions.push_back(tmpInst); +} + +void emitRI(unsigned Opcode, unsigned Reg0, int32_t Imm, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + emitRX(Opcode, Reg0, MCOperand::createImm(Imm), IDLoc, Instructions); +} + +void emitRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + emitRX(Opcode, Reg0, MCOperand::createReg(Reg1), IDLoc, Instructions); +} + +void emitII(unsigned Opcode, int16_t Imm1, int16_t Imm2, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + MCInst tmpInst; + tmpInst.setOpcode(Opcode); + tmpInst.addOperand(MCOperand::createImm(Imm1)); + tmpInst.addOperand(MCOperand::createImm(Imm2)); + tmpInst.setLoc(IDLoc); + Instructions.push_back(tmpInst); +} + +void emitR(unsigned Opcode, unsigned Reg0, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + MCInst tmpInst; + tmpInst.setOpcode(Opcode); + tmpInst.addOperand(MCOperand::createReg(Reg0)); + tmpInst.setLoc(IDLoc); + Instructions.push_back(tmpInst); +} + +void emitRRX(unsigned Opcode, unsigned Reg0, unsigned Reg1, MCOperand Op2, + SMLoc IDLoc, SmallVectorImpl &Instructions) { + MCInst tmpInst; + tmpInst.setOpcode(Opcode); + tmpInst.addOperand(MCOperand::createReg(Reg0)); + tmpInst.addOperand(MCOperand::createReg(Reg1)); + tmpInst.addOperand(Op2); + tmpInst.setLoc(IDLoc); + Instructions.push_back(tmpInst); +} + +void emitRRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, unsigned Reg2, + SMLoc IDLoc, SmallVectorImpl &Instructions) { + emitRRX(Opcode, Reg0, Reg1, MCOperand::createReg(Reg2), IDLoc, + Instructions); +} + +void emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm, + SMLoc IDLoc, SmallVectorImpl &Instructions) { + emitRRX(Opcode, Reg0, Reg1, MCOperand::createImm(Imm), IDLoc, + Instructions); +} + +void emitAppropriateDSLL(unsigned DstReg, unsigned SrcReg, int16_t ShiftAmount, + SMLoc IDLoc, SmallVectorImpl &Instructions) { + if (ShiftAmount >= 32) { + emitRRI(Mips::DSLL32, DstReg, SrcReg, ShiftAmount - 32, IDLoc, + Instructions); + return; + } + + emitRRI(Mips::DSLL, DstReg, SrcReg, ShiftAmount, IDLoc, Instructions); +} +} // end anonymous namespace. + bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions) { const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); + bool ExpandedJalSym = false; Inst.setLoc(IDLoc); @@ -1355,12 +1528,14 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, return Error(IDLoc, "branch to misaligned address"); break; case Mips::BEQZ16_MM: + case Mips::BEQZC16_MMR6: case Mips::BNEZ16_MM: + case Mips::BNEZC16_MMR6: assert(MCID.getNumOperands() == 2 && "unexpected number of operands"); Offset = Inst.getOperand(1); if (!Offset.isImm()) break; // We'll deal with this situation later on when applying fixups. - if (!isIntN(8, Offset.getImm())) + if (!isInt<8>(Offset.getImm())) return Error(IDLoc, "branch target out of range"); if (OffsetToAlignment(Offset.getImm(), 2LL)) return Error(IDLoc, "branch to misaligned address"); @@ -1444,6 +1619,81 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, } } + // This expansion is not in a function called by tryExpandInstruction() + // because the pseudo-instruction doesn't have a distinct opcode. + if ((Inst.getOpcode() == Mips::JAL || Inst.getOpcode() == Mips::JAL_MM) && + inPicMode()) { + warnIfNoMacro(IDLoc); + + const MCExpr *JalExpr = Inst.getOperand(0).getExpr(); + + // We can do this expansion if there's only 1 symbol in the argument + // expression. + if (countMCSymbolRefExpr(JalExpr) > 1) + return Error(IDLoc, "jal doesn't support multiple symbols in PIC mode"); + + // FIXME: This is checking the expression can be handled by the later stages + // of the assembler. We ought to leave it to those later stages but + // we can't do that until we stop evaluateRelocExpr() rewriting the + // expressions into non-equivalent forms. + const MCSymbol *JalSym = getSingleMCSymbol(JalExpr); + + // FIXME: Add support for label+offset operands (currently causes an error). + // FIXME: Add support for forward-declared local symbols. + // FIXME: Add expansion for when the LargeGOT option is enabled. + if (JalSym->isInSection() || JalSym->isTemporary()) { + if (isABI_O32()) { + // If it's a local symbol and the O32 ABI is being used, we expand to: + // lw $25, 0($gp) + // R_(MICRO)MIPS_GOT16 label + // addiu $25, $25, 0 + // R_(MICRO)MIPS_LO16 label + // jalr $25 + const MCExpr *Got16RelocExpr = evaluateRelocExpr(JalExpr, "got"); + const MCExpr *Lo16RelocExpr = evaluateRelocExpr(JalExpr, "lo"); + + emitRRX(Mips::LW, Mips::T9, Mips::GP, + MCOperand::createExpr(Got16RelocExpr), IDLoc, Instructions); + emitRRX(Mips::ADDiu, Mips::T9, Mips::T9, + MCOperand::createExpr(Lo16RelocExpr), IDLoc, Instructions); + } else if (isABI_N32() || isABI_N64()) { + // If it's a local symbol and the N32/N64 ABIs are being used, + // we expand to: + // lw/ld $25, 0($gp) + // R_(MICRO)MIPS_GOT_DISP label + // jalr $25 + const MCExpr *GotDispRelocExpr = evaluateRelocExpr(JalExpr, "got_disp"); + + emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, Mips::GP, + MCOperand::createExpr(GotDispRelocExpr), IDLoc, Instructions); + } + } else { + // If it's an external/weak symbol, we expand to: + // lw/ld $25, 0($gp) + // R_(MICRO)MIPS_CALL16 label + // jalr $25 + const MCExpr *Call16RelocExpr = evaluateRelocExpr(JalExpr, "call16"); + + emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, Mips::GP, + MCOperand::createExpr(Call16RelocExpr), IDLoc, Instructions); + } + + MCInst JalrInst; + if (IsCpRestoreSet && inMicroMipsMode()) + JalrInst.setOpcode(Mips::JALRS_MM); + else + JalrInst.setOpcode(inMicroMipsMode() ? Mips::JALR_MM : Mips::JALR); + JalrInst.addOperand(MCOperand::createReg(Mips::RA)); + JalrInst.addOperand(MCOperand::createReg(Mips::T9)); + + // FIXME: Add an R_(MICRO)MIPS_JALR relocation after the JALR. + // This relocation is supposed to be an optimization hint for the linker + // and is not necessary for correctness. + + Inst = JalrInst; + ExpandedJalSym = true; + } + if (MCID.mayLoad() || MCID.mayStore()) { // Check the offset of memory operand, if it is a symbol // reference or immediate we may have to expand instructions. @@ -1490,17 +1740,14 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, int MemOffset = Op.getImm(); MCOperand &DstReg = Inst.getOperand(0); MCOperand &BaseReg = Inst.getOperand(1); - if (isIntN(9, MemOffset) && (MemOffset % 4 == 0) && + if (isInt<9>(MemOffset) && (MemOffset % 4 == 0) && getContext().getRegisterInfo()->getRegClass( Mips::GPRMM16RegClassID).contains(DstReg.getReg()) && - BaseReg.getReg() == Mips::GP) { - MCInst TmpInst; - TmpInst.setLoc(IDLoc); - TmpInst.setOpcode(Mips::LWGP_MM); - TmpInst.addOperand(MCOperand::createReg(DstReg.getReg())); - TmpInst.addOperand(MCOperand::createReg(Mips::GP)); - TmpInst.addOperand(MCOperand::createImm(MemOffset)); - Instructions.push_back(TmpInst); + (BaseReg.getReg() == Mips::GP || + BaseReg.getReg() == Mips::GP_64)) { + + emitRRI(Mips::LWGP_MM, DstReg.getReg(), Mips::GP, MemOffset, + IDLoc, Instructions); return false; } } @@ -1587,6 +1834,12 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, if (Imm < -1 || Imm > 14) return Error(IDLoc, "immediate operand value out of range"); break; + case Mips::TEQ_MM: + case Mips::TGE_MM: + case Mips::TGEU_MM: + case Mips::TLT_MM: + case Mips::TLTU_MM: + case Mips::TNE_MM: case Mips::SB16_MM: Opnd = Inst.getOperand(2); if (!Opnd.isImm()) @@ -1613,6 +1866,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, if (Imm < 0 || Imm > 60 || (Imm % 4 != 0)) return Error(IDLoc, "immediate operand value out of range"); break; + case Mips::PREFX_MM: case Mips::CACHE: case Mips::PREF: Opnd = Inst.getOperand(2); @@ -1627,79 +1881,106 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, if (!Opnd.isImm()) return Error(IDLoc, "expected immediate operand kind"); int Imm = Opnd.getImm(); - if ((Imm % 4 != 0) || !isIntN(25, Imm)) + if ((Imm % 4 != 0) || !isInt<25>(Imm)) return Error(IDLoc, "immediate operand value out of range"); break; } } - if (needsExpansion(Inst)) { - if (expandInstruction(Inst, IDLoc, Instructions)) - return true; - } else + MacroExpanderResultTy ExpandResult = + tryExpandInstruction(Inst, IDLoc, Instructions); + switch (ExpandResult) { + case MER_NotAMacro: Instructions.push_back(Inst); + break; + case MER_Success: + break; + case MER_Fail: + return true; + } // If this instruction has a delay slot and .set reorder is active, // emit a NOP after it. if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder()) createNop(hasShortDelaySlot(Inst.getOpcode()), IDLoc, Instructions); - return false; -} + if ((Inst.getOpcode() == Mips::JalOneReg || + Inst.getOpcode() == Mips::JalTwoReg || ExpandedJalSym) && + isPicAndNotNxxAbi()) { + if (IsCpRestoreSet) { + // We need a NOP between the JALR and the LW: + // If .set reorder has been used, we've already emitted a NOP. + // If .set noreorder has been used, we need to emit a NOP at this point. + if (!AssemblerOptions.back()->isReorder()) + createNop(hasShortDelaySlot(Inst.getOpcode()), IDLoc, Instructions); -bool MipsAsmParser::needsExpansion(MCInst &Inst) { + // Load the $gp from the stack. + SmallVector LoadInsts; + createCpRestoreMemOp(true /*IsLoad*/, CpRestoreOffset /*StackOffset*/, + IDLoc, LoadInsts); - switch (Inst.getOpcode()) { - case Mips::LoadImm32: - case Mips::LoadImm64: - case Mips::LoadAddrImm32: - case Mips::LoadAddrReg32: - case Mips::B_MM_Pseudo: - case Mips::LWM_MM: - case Mips::SWM_MM: - case Mips::JalOneReg: - case Mips::JalTwoReg: - case Mips::BneImm: - case Mips::BeqImm: - case Mips::BLT: - case Mips::BLE: - case Mips::BGE: - case Mips::BGT: - case Mips::BLTU: - case Mips::BLEU: - case Mips::BGEU: - case Mips::BGTU: - case Mips::Ulhu: - case Mips::Ulw: - return true; - default: - return false; + for (const MCInst &Inst : LoadInsts) + Instructions.push_back(Inst); + + } else + Warning(IDLoc, "no .cprestore used in PIC mode"); } + + return false; } -bool MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl &Instructions) { +MipsAsmParser::MacroExpanderResultTy +MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions) { switch (Inst.getOpcode()) { - default: llvm_unreachable("unimplemented expansion"); + default: + return MER_NotAMacro; case Mips::LoadImm32: - return expandLoadImm(Inst, true, IDLoc, Instructions); + return expandLoadImm(Inst, true, IDLoc, Instructions) ? MER_Fail + : MER_Success; case Mips::LoadImm64: - return expandLoadImm(Inst, false, IDLoc, Instructions); + return expandLoadImm(Inst, false, IDLoc, Instructions) ? MER_Fail + : MER_Success; case Mips::LoadAddrImm32: - return expandLoadAddressImm(Inst, true, IDLoc, Instructions); + case Mips::LoadAddrImm64: + assert(Inst.getOperand(0).isReg() && "expected register operand kind"); + assert((Inst.getOperand(1).isImm() || Inst.getOperand(1).isExpr()) && + "expected immediate operand kind"); + + return expandLoadAddress(Inst.getOperand(0).getReg(), Mips::NoRegister, + Inst.getOperand(1), + Inst.getOpcode() == Mips::LoadAddrImm32, IDLoc, + Instructions) + ? MER_Fail + : MER_Success; case Mips::LoadAddrReg32: - return expandLoadAddressReg(Inst, true, IDLoc, Instructions); + case Mips::LoadAddrReg64: + assert(Inst.getOperand(0).isReg() && "expected register operand kind"); + assert(Inst.getOperand(1).isReg() && "expected register operand kind"); + assert((Inst.getOperand(2).isImm() || Inst.getOperand(2).isExpr()) && + "expected immediate operand kind"); + + return expandLoadAddress(Inst.getOperand(0).getReg(), + Inst.getOperand(1).getReg(), Inst.getOperand(2), + Inst.getOpcode() == Mips::LoadAddrReg32, IDLoc, + Instructions) + ? MER_Fail + : MER_Success; case Mips::B_MM_Pseudo: - return expandUncondBranchMMPseudo(Inst, IDLoc, Instructions); + case Mips::B_MMR6_Pseudo: + return expandUncondBranchMMPseudo(Inst, IDLoc, Instructions) ? MER_Fail + : MER_Success; case Mips::SWM_MM: case Mips::LWM_MM: - return expandLoadStoreMultiple(Inst, IDLoc, Instructions); + return expandLoadStoreMultiple(Inst, IDLoc, Instructions) ? MER_Fail + : MER_Success; case Mips::JalOneReg: case Mips::JalTwoReg: - return expandJalWithRegs(Inst, IDLoc, Instructions); + return expandJalWithRegs(Inst, IDLoc, Instructions) ? MER_Fail + : MER_Success; case Mips::BneImm: case Mips::BeqImm: - return expandBranchImm(Inst, IDLoc, Instructions); + return expandBranchImm(Inst, IDLoc, Instructions) ? MER_Fail : MER_Success; case Mips::BLT: case Mips::BLE: case Mips::BGE: @@ -1708,56 +1989,81 @@ bool MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc, case Mips::BLEU: case Mips::BGEU: case Mips::BGTU: - return expandCondBranches(Inst, IDLoc, Instructions); + case Mips::BLTL: + case Mips::BLEL: + case Mips::BGEL: + case Mips::BGTL: + case Mips::BLTUL: + case Mips::BLEUL: + case Mips::BGEUL: + case Mips::BGTUL: + case Mips::BLTImmMacro: + case Mips::BLEImmMacro: + case Mips::BGEImmMacro: + case Mips::BGTImmMacro: + case Mips::BLTUImmMacro: + case Mips::BLEUImmMacro: + case Mips::BGEUImmMacro: + case Mips::BGTUImmMacro: + case Mips::BLTLImmMacro: + case Mips::BLELImmMacro: + case Mips::BGELImmMacro: + case Mips::BGTLImmMacro: + case Mips::BLTULImmMacro: + case Mips::BLEULImmMacro: + case Mips::BGEULImmMacro: + case Mips::BGTULImmMacro: + return expandCondBranches(Inst, IDLoc, Instructions) ? MER_Fail + : MER_Success; + case Mips::SDivMacro: + return expandDiv(Inst, IDLoc, Instructions, false, true) ? MER_Fail + : MER_Success; + case Mips::DSDivMacro: + return expandDiv(Inst, IDLoc, Instructions, true, true) ? MER_Fail + : MER_Success; + case Mips::UDivMacro: + return expandDiv(Inst, IDLoc, Instructions, false, false) ? MER_Fail + : MER_Success; + case Mips::DUDivMacro: + return expandDiv(Inst, IDLoc, Instructions, true, false) ? MER_Fail + : MER_Success; + case Mips::Ulh: + return expandUlh(Inst, true, IDLoc, Instructions) ? MER_Fail : MER_Success; case Mips::Ulhu: - return expandUlhu(Inst, IDLoc, Instructions); + return expandUlh(Inst, false, IDLoc, Instructions) ? MER_Fail : MER_Success; case Mips::Ulw: - return expandUlw(Inst, IDLoc, Instructions); + return expandUlw(Inst, IDLoc, Instructions) ? MER_Fail : MER_Success; + case Mips::NORImm: + return expandAliasImmediate(Inst, IDLoc, Instructions) ? MER_Fail + : MER_Success; + case Mips::ADDi: + case Mips::ADDiu: + case Mips::SLTi: + case Mips::SLTiu: + if ((Inst.getNumOperands() == 3) && Inst.getOperand(0).isReg() && + Inst.getOperand(1).isReg() && Inst.getOperand(2).isImm()) { + int64_t ImmValue = Inst.getOperand(2).getImm(); + if (isInt<16>(ImmValue)) + return MER_NotAMacro; + return expandAliasImmediate(Inst, IDLoc, Instructions) ? MER_Fail + : MER_Success; + } + return MER_NotAMacro; + case Mips::ANDi: + case Mips::ORi: + case Mips::XORi: + if ((Inst.getNumOperands() == 3) && Inst.getOperand(0).isReg() && + Inst.getOperand(1).isReg() && Inst.getOperand(2).isImm()) { + int64_t ImmValue = Inst.getOperand(2).getImm(); + if (isUInt<16>(ImmValue)) + return MER_NotAMacro; + return expandAliasImmediate(Inst, IDLoc, Instructions) ? MER_Fail + : MER_Success; + } + return MER_NotAMacro; } } -namespace { -template -void createLShiftOri(MCOperand Operand, unsigned RegNo, SMLoc IDLoc, - SmallVectorImpl &Instructions) { - MCInst tmpInst; - if (ShiftAmount >= 32) { - tmpInst.setOpcode(Mips::DSLL32); - tmpInst.addOperand(MCOperand::createReg(RegNo)); - tmpInst.addOperand(MCOperand::createReg(RegNo)); - tmpInst.addOperand(MCOperand::createImm(ShiftAmount - 32)); - tmpInst.setLoc(IDLoc); - Instructions.push_back(tmpInst); - tmpInst.clear(); - } else if (ShiftAmount > 0) { - tmpInst.setOpcode(Mips::DSLL); - tmpInst.addOperand(MCOperand::createReg(RegNo)); - tmpInst.addOperand(MCOperand::createReg(RegNo)); - tmpInst.addOperand(MCOperand::createImm(ShiftAmount)); - tmpInst.setLoc(IDLoc); - Instructions.push_back(tmpInst); - tmpInst.clear(); - } - // There's no need for an ORi if the immediate is 0. - if (Operand.isImm() && Operand.getImm() == 0) - return; - - tmpInst.setOpcode(Mips::ORi); - tmpInst.addOperand(MCOperand::createReg(RegNo)); - tmpInst.addOperand(MCOperand::createReg(RegNo)); - tmpInst.addOperand(Operand); - tmpInst.setLoc(IDLoc); - Instructions.push_back(tmpInst); -} - -template -void createLShiftOri(int64_t Value, unsigned RegNo, SMLoc IDLoc, - SmallVectorImpl &Instructions) { - createLShiftOri(MCOperand::createImm(Value), RegNo, IDLoc, - Instructions); -} -} - bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions) { // Create a JALR instruction which is going to replace the pseudo-JAL. @@ -1768,8 +2074,11 @@ bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc, if (Opcode == Mips::JalOneReg) { // jal $rs => jalr $rs - if (inMicroMipsMode()) { - JalrInst.setOpcode(Mips::JALR16_MM); + if (IsCpRestoreSet && inMicroMipsMode()) { + JalrInst.setOpcode(Mips::JALRS16_MM); + JalrInst.addOperand(FirstRegOp); + } else if (inMicroMipsMode()) { + JalrInst.setOpcode(hasMips32r6() ? Mips::JALRC16_MMR6 : Mips::JALR16_MM); JalrInst.addOperand(FirstRegOp); } else { JalrInst.setOpcode(Mips::JALR); @@ -1778,42 +2087,72 @@ bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc, } } else if (Opcode == Mips::JalTwoReg) { // jal $rd, $rs => jalr $rd, $rs - JalrInst.setOpcode(inMicroMipsMode() ? Mips::JALR_MM : Mips::JALR); + if (IsCpRestoreSet && inMicroMipsMode()) + JalrInst.setOpcode(Mips::JALRS_MM); + else + JalrInst.setOpcode(inMicroMipsMode() ? Mips::JALR_MM : Mips::JALR); JalrInst.addOperand(FirstRegOp); const MCOperand SecondRegOp = Inst.getOperand(1); JalrInst.addOperand(SecondRegOp); } Instructions.push_back(JalrInst); - // If .set reorder is active, emit a NOP after it. - if (AssemblerOptions.back()->isReorder()) { - // This is a 32-bit NOP because these 2 pseudo-instructions - // do not have a short delay slot. - MCInst NopInst; - NopInst.setOpcode(Mips::SLL); - NopInst.addOperand(MCOperand::createReg(Mips::ZERO)); - NopInst.addOperand(MCOperand::createReg(Mips::ZERO)); - NopInst.addOperand(MCOperand::createImm(0)); - Instructions.push_back(NopInst); + // If .set reorder is active and branch instruction has a delay slot, + // emit a NOP after it. + const MCInstrDesc &MCID = getInstDesc(JalrInst.getOpcode()); + if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder()) { + createNop(hasShortDelaySlot(JalrInst.getOpcode()), IDLoc, Instructions); } return false; } +/// Can the value be represented by a unsigned N-bit value and a shift left? +template static bool isShiftedUIntAtAnyPosition(uint64_t x) { + unsigned BitNum = findFirstSet(x); + + return (x == x >> BitNum << BitNum) && isUInt(x >> BitNum); +} + +/// Load (or add) an immediate into a register. +/// +/// @param ImmValue The immediate to load. +/// @param DstReg The register that will hold the immediate. +/// @param SrcReg A register to add to the immediate or Mips::NoRegister +/// for a simple initialization. +/// @param Is32BitImm Is ImmValue 32-bit or 64-bit? +/// @param IsAddress True if the immediate represents an address. False if it +/// is an integer. +/// @param IDLoc Location of the immediate in the source file. +/// @param Instructions The instructions emitted by this expansion. bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg, - unsigned SrcReg, bool Is32BitImm, SMLoc IDLoc, + unsigned SrcReg, bool Is32BitImm, + bool IsAddress, SMLoc IDLoc, SmallVectorImpl &Instructions) { if (!Is32BitImm && !isGP64bit()) { Error(IDLoc, "instruction requires a 64-bit architecture"); return true; } + if (Is32BitImm) { + if (isInt<32>(ImmValue) || isUInt<32>(ImmValue)) { + // Sign extend up to 64-bit so that the predicates match the hardware + // behaviour. In particular, isInt<16>(0xffff8000) and similar should be + // true. + ImmValue = SignExtend64<32>(ImmValue); + } else { + Error(IDLoc, "instruction requires a 32-bit immediate"); + return true; + } + } + + unsigned ZeroReg = IsAddress ? ABI.GetNullPtr() : ABI.GetZeroReg(); + unsigned AdduOp = !Is32BitImm ? Mips::DADDu : Mips::ADDu; + bool UseSrcReg = false; if (SrcReg != Mips::NoRegister) UseSrcReg = true; - MCInst tmpInst; - unsigned TmpReg = DstReg; if (UseSrcReg && (DstReg == SrcReg)) { // At this point we need AT to perform the expansions and we exit if it is @@ -1824,138 +2163,129 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg, TmpReg = ATReg; } - tmpInst.setLoc(IDLoc); - // FIXME: gas has a special case for values that are 000...1111, which - // becomes a li -1 and then a dsrl - if (0 <= ImmValue && ImmValue <= 65535) { - // For unsigned and positive signed 16-bit values (0 <= j <= 65535): - // li d,j => ori d,$zero,j + if (isInt<16>(ImmValue)) { if (!UseSrcReg) - SrcReg = isGP64bit() ? Mips::ZERO_64 : Mips::ZERO; - tmpInst.setOpcode(Mips::ORi); - tmpInst.addOperand(MCOperand::createReg(DstReg)); - tmpInst.addOperand(MCOperand::createReg(SrcReg)); - tmpInst.addOperand(MCOperand::createImm(ImmValue)); - Instructions.push_back(tmpInst); - } else if (ImmValue < 0 && ImmValue >= -32768) { - // For negative signed 16-bit values (-32768 <= j < 0): - // li d,j => addiu d,$zero,j - if (!UseSrcReg) - SrcReg = Mips::ZERO; - tmpInst.setOpcode(Mips::ADDiu); - tmpInst.addOperand(MCOperand::createReg(DstReg)); - tmpInst.addOperand(MCOperand::createReg(SrcReg)); - tmpInst.addOperand(MCOperand::createImm(ImmValue)); - Instructions.push_back(tmpInst); - } else if (isInt<32>(ImmValue) || isUInt<32>(ImmValue)) { + SrcReg = ZeroReg; + + // This doesn't quite follow the usual ABI expectations for N32 but matches + // traditional assembler behaviour. N32 would normally use addiu for both + // integers and addresses. + if (IsAddress && !Is32BitImm) { + emitRRI(Mips::DADDiu, DstReg, SrcReg, ImmValue, IDLoc, Instructions); + return false; + } + + emitRRI(Mips::ADDiu, DstReg, SrcReg, ImmValue, IDLoc, Instructions); + return false; + } + + if (isUInt<16>(ImmValue)) { + unsigned TmpReg = DstReg; + if (SrcReg == DstReg) { + TmpReg = getATReg(IDLoc); + if (!TmpReg) + return true; + } + + emitRRI(Mips::ORi, TmpReg, ZeroReg, ImmValue, IDLoc, Instructions); + if (UseSrcReg) + emitRRR(ABI.GetPtrAdduOp(), DstReg, TmpReg, SrcReg, IDLoc, Instructions); + return false; + } + + if (isInt<32>(ImmValue) || isUInt<32>(ImmValue)) { warnIfNoMacro(IDLoc); - // For all other values which are representable as a 32-bit integer: - // li d,j => lui d,hi16(j) - // ori d,d,lo16(j) uint16_t Bits31To16 = (ImmValue >> 16) & 0xffff; uint16_t Bits15To0 = ImmValue & 0xffff; if (!Is32BitImm && !isInt<32>(ImmValue)) { - // For DLI, expand to an ORi instead of a LUi to avoid sign-extending the + // Traditional behaviour seems to special case this particular value. It's + // not clear why other masks are handled differently. + if (ImmValue == 0xffffffff) { + emitRI(Mips::LUi, TmpReg, 0xffff, IDLoc, Instructions); + emitRRI(Mips::DSRL32, TmpReg, TmpReg, 0, IDLoc, Instructions); + if (UseSrcReg) + emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions); + return false; + } + + // Expand to an ORi instead of a LUi to avoid sign-extending into the // upper 32 bits. - tmpInst.setOpcode(Mips::ORi); - tmpInst.addOperand(MCOperand::createReg(TmpReg)); - tmpInst.addOperand(MCOperand::createReg(Mips::ZERO)); - tmpInst.addOperand(MCOperand::createImm(Bits31To16)); - tmpInst.setLoc(IDLoc); - Instructions.push_back(tmpInst); - // Move the value to the upper 16 bits by doing a 16-bit left shift. - createLShiftOri<16>(0, TmpReg, IDLoc, Instructions); - } else { - tmpInst.setOpcode(Mips::LUi); - tmpInst.addOperand(MCOperand::createReg(TmpReg)); - tmpInst.addOperand(MCOperand::createImm(Bits31To16)); - Instructions.push_back(tmpInst); + emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits31To16, IDLoc, Instructions); + emitRRI(Mips::DSLL, TmpReg, TmpReg, 16, IDLoc, Instructions); + if (Bits15To0) + emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, Instructions); + if (UseSrcReg) + emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions); + return false; } - createLShiftOri<0>(Bits15To0, TmpReg, IDLoc, Instructions); + emitRI(Mips::LUi, TmpReg, Bits31To16, IDLoc, Instructions); + if (Bits15To0) + emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, Instructions); if (UseSrcReg) - createAddu(DstReg, TmpReg, SrcReg, !Is32BitImm, Instructions); + emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions); + return false; + } - } else if ((ImmValue & (0xffffLL << 48)) == 0) { + if (isShiftedUIntAtAnyPosition<16>(ImmValue)) { if (Is32BitImm) { Error(IDLoc, "instruction requires a 32-bit immediate"); return true; } - warnIfNoMacro(IDLoc); - - // <------- lo32 ------> - // <------- hi32 ------> - // <- hi16 -> <- lo16 -> - // _________________________________ - // | | | | - // | 16-bits | 16-bits | 16-bits | - // |__________|__________|__________| - // - // For any 64-bit value that is representable as a 48-bit integer: - // li d,j => lui d,hi16(j) - // ori d,d,hi16(lo32(j)) - // dsll d,d,16 - // ori d,d,lo16(lo32(j)) - uint16_t Bits47To32 = (ImmValue >> 32) & 0xffff; - uint16_t Bits31To16 = (ImmValue >> 16) & 0xffff; - uint16_t Bits15To0 = ImmValue & 0xffff; - tmpInst.setOpcode(Mips::LUi); - tmpInst.addOperand(MCOperand::createReg(TmpReg)); - tmpInst.addOperand(MCOperand::createImm(Bits47To32)); - Instructions.push_back(tmpInst); - createLShiftOri<0>(Bits31To16, TmpReg, IDLoc, Instructions); - createLShiftOri<16>(Bits15To0, TmpReg, IDLoc, Instructions); + // Traditionally, these immediates are shifted as little as possible and as + // such we align the most significant bit to bit 15 of our temporary. + unsigned FirstSet = findFirstSet((uint64_t)ImmValue); + unsigned LastSet = findLastSet((uint64_t)ImmValue); + unsigned ShiftAmount = FirstSet - (15 - (LastSet - FirstSet)); + uint16_t Bits = (ImmValue >> ShiftAmount) & 0xffff; + emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits, IDLoc, Instructions); + emitRRI(Mips::DSLL, TmpReg, TmpReg, ShiftAmount, IDLoc, Instructions); if (UseSrcReg) - createAddu(DstReg, TmpReg, SrcReg, !Is32BitImm, Instructions); + emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions); - } else { - if (Is32BitImm) { - Error(IDLoc, "instruction requires a 32-bit immediate"); - return true; - } - warnIfNoMacro(IDLoc); + return false; + } - // <------- hi32 ------> <------- lo32 ------> - // <- hi16 -> <- lo16 -> - // ___________________________________________ - // | | | | | - // | 16-bits | 16-bits | 16-bits | 16-bits | - // |__________|__________|__________|__________| - // - // For all other values which are representable as a 64-bit integer: - // li d,j => lui d,hi16(j) - // ori d,d,lo16(hi32(j)) - // dsll d,d,16 - // ori d,d,hi16(lo32(j)) - // dsll d,d,16 - // ori d,d,lo16(lo32(j)) - uint16_t Bits63To48 = (ImmValue >> 48) & 0xffff; - uint16_t Bits47To32 = (ImmValue >> 32) & 0xffff; - uint16_t Bits31To16 = (ImmValue >> 16) & 0xffff; - uint16_t Bits15To0 = ImmValue & 0xffff; + warnIfNoMacro(IDLoc); - tmpInst.setOpcode(Mips::LUi); - tmpInst.addOperand(MCOperand::createReg(TmpReg)); - tmpInst.addOperand(MCOperand::createImm(Bits63To48)); - Instructions.push_back(tmpInst); - createLShiftOri<0>(Bits47To32, TmpReg, IDLoc, Instructions); + // The remaining case is packed with a sequence of dsll and ori with zeros + // being omitted and any neighbouring dsll's being coalesced. + // The highest 32-bit's are equivalent to a 32-bit immediate load. - // When Bits31To16 is 0, do a left shift of 32 bits instead of doing - // two left shifts of 16 bits. - if (Bits31To16 == 0) { - createLShiftOri<32>(Bits15To0, TmpReg, IDLoc, Instructions); - } else { - createLShiftOri<16>(Bits31To16, TmpReg, IDLoc, Instructions); - createLShiftOri<16>(Bits15To0, TmpReg, IDLoc, Instructions); + // Load bits 32-63 of ImmValue into bits 0-31 of the temporary register. + if (loadImmediate(ImmValue >> 32, TmpReg, Mips::NoRegister, true, false, + IDLoc, Instructions)) + return false; + + // Shift and accumulate into the register. If a 16-bit chunk is zero, then + // skip it and defer the shift to the next chunk. + unsigned ShiftCarriedForwards = 16; + for (int BitNum = 16; BitNum >= 0; BitNum -= 16) { + uint16_t ImmChunk = (ImmValue >> BitNum) & 0xffff; + + if (ImmChunk != 0) { + emitAppropriateDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc, + Instructions); + emitRRI(Mips::ORi, TmpReg, TmpReg, ImmChunk, IDLoc, Instructions); + ShiftCarriedForwards = 0; } - if (UseSrcReg) - createAddu(DstReg, TmpReg, SrcReg, !Is32BitImm, Instructions); + ShiftCarriedForwards += 16; } + ShiftCarriedForwards -= 16; + + // Finish any remaining shifts left by trailing zeros. + if (ShiftCarriedForwards) + emitAppropriateDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc, + Instructions); + + if (UseSrcReg) + emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions); + return false; } @@ -1967,63 +2297,38 @@ bool MipsAsmParser::expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc, assert(DstRegOp.isReg() && "expected register operand kind"); if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), Mips::NoRegister, - Is32BitImm, IDLoc, Instructions)) + Is32BitImm, false, IDLoc, Instructions)) return true; return false; } -bool -MipsAsmParser::expandLoadAddressReg(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc, - SmallVectorImpl &Instructions) { - const MCOperand &DstRegOp = Inst.getOperand(0); - assert(DstRegOp.isReg() && "expected register operand kind"); - - const MCOperand &SrcRegOp = Inst.getOperand(1); - assert(SrcRegOp.isReg() && "expected register operand kind"); - - const MCOperand &ImmOp = Inst.getOperand(2); - assert((ImmOp.isImm() || ImmOp.isExpr()) && - "expected immediate operand kind"); - if (!ImmOp.isImm()) { - if (loadAndAddSymbolAddress(ImmOp.getExpr(), DstRegOp.getReg(), - SrcRegOp.getReg(), Is32BitImm, IDLoc, - Instructions)) - return true; - - return false; - } - - if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), SrcRegOp.getReg(), - Is32BitImm, IDLoc, Instructions)) +bool MipsAsmParser::expandLoadAddress(unsigned DstReg, unsigned BaseReg, + const MCOperand &Offset, + bool Is32BitAddress, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + // la can't produce a usable address when addresses are 64-bit. + if (Is32BitAddress && ABI.ArePtrs64bit()) { + // FIXME: Demote this to a warning and continue as if we had 'dla' instead. + // We currently can't do this because we depend on the equality + // operator and N64 can end up with a GPR32/GPR64 mismatch. + Error(IDLoc, "la used to load 64-bit address"); + // Continue as if we had 'dla' instead. + Is32BitAddress = false; + } + + // dla requires 64-bit addresses. + if (!Is32BitAddress && !ABI.ArePtrs64bit()) { + Error(IDLoc, "instruction requires a 64-bit architecture"); return true; - - return false; -} - -bool -MipsAsmParser::expandLoadAddressImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc, - SmallVectorImpl &Instructions) { - const MCOperand &DstRegOp = Inst.getOperand(0); - assert(DstRegOp.isReg() && "expected register operand kind"); - - const MCOperand &ImmOp = Inst.getOperand(1); - assert((ImmOp.isImm() || ImmOp.isExpr()) && - "expected immediate operand kind"); - if (!ImmOp.isImm()) { - if (loadAndAddSymbolAddress(ImmOp.getExpr(), DstRegOp.getReg(), - Mips::NoRegister, Is32BitImm, IDLoc, - Instructions)) - return true; - - return false; } - if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), Mips::NoRegister, - Is32BitImm, IDLoc, Instructions)) - return true; + if (!Offset.isImm()) + return loadAndAddSymbolAddress(Offset.getExpr(), DstReg, BaseReg, + Is32BitAddress, IDLoc, Instructions); - return false; + return loadImmediate(Offset.getImm(), DstReg, BaseReg, Is32BitAddress, true, + IDLoc, Instructions); } bool MipsAsmParser::loadAndAddSymbolAddress( @@ -2031,67 +2336,102 @@ bool MipsAsmParser::loadAndAddSymbolAddress( SMLoc IDLoc, SmallVectorImpl &Instructions) { warnIfNoMacro(IDLoc); - if (Is32BitSym && isABI_N64()) - Warning(IDLoc, "instruction loads the 32-bit address of a 64-bit symbol"); - - MCInst tmpInst; - const MCSymbolRefExpr *Symbol = cast(SymExpr); - const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::create( - &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_ABS_HI, getContext()); - const MCSymbolRefExpr *LoExpr = MCSymbolRefExpr::create( - &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_ABS_LO, getContext()); + const MCExpr *Symbol = cast(SymExpr); + const MipsMCExpr *HiExpr = MipsMCExpr::create( + MCSymbolRefExpr::VK_Mips_ABS_HI, Symbol, getContext()); + const MipsMCExpr *LoExpr = MipsMCExpr::create( + MCSymbolRefExpr::VK_Mips_ABS_LO, Symbol, getContext()); bool UseSrcReg = SrcReg != Mips::NoRegister; + // This is the 64-bit symbol address expansion. + if (ABI.ArePtrs64bit() && isGP64bit()) { + // We always need AT for the 64-bit expansion. + // If it is not available we exit. + unsigned ATReg = getATReg(IDLoc); + if (!ATReg) + return true; + + const MipsMCExpr *HighestExpr = MipsMCExpr::create( + MCSymbolRefExpr::VK_Mips_HIGHEST, Symbol, getContext()); + const MipsMCExpr *HigherExpr = MipsMCExpr::create( + MCSymbolRefExpr::VK_Mips_HIGHER, Symbol, getContext()); + + if (UseSrcReg && (DstReg == SrcReg)) { + // If $rs is the same as $rd: + // (d)la $rd, sym($rd) => lui $at, %highest(sym) + // daddiu $at, $at, %higher(sym) + // dsll $at, $at, 16 + // daddiu $at, $at, %hi(sym) + // dsll $at, $at, 16 + // daddiu $at, $at, %lo(sym) + // daddu $rd, $at, $rd + emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HighestExpr), IDLoc, + Instructions); + emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HigherExpr), + IDLoc, Instructions); + emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, Instructions); + emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HiExpr), IDLoc, + Instructions); + emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, Instructions); + emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr), IDLoc, + Instructions); + emitRRR(Mips::DADDu, DstReg, ATReg, SrcReg, IDLoc, Instructions); + + return false; + } + + // Otherwise, if the $rs is different from $rd or if $rs isn't specified: + // (d)la $rd, sym/sym($rs) => lui $rd, %highest(sym) + // lui $at, %hi(sym) + // daddiu $rd, $rd, %higher(sym) + // daddiu $at, $at, %lo(sym) + // dsll32 $rd, $rd, 0 + // daddu $rd, $rd, $at + // (daddu $rd, $rd, $rs) + emitRX(Mips::LUi, DstReg, MCOperand::createExpr(HighestExpr), IDLoc, + Instructions); + emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HiExpr), IDLoc, + Instructions); + emitRRX(Mips::DADDiu, DstReg, DstReg, MCOperand::createExpr(HigherExpr), + IDLoc, Instructions); + emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr), IDLoc, + Instructions); + emitRRI(Mips::DSLL32, DstReg, DstReg, 0, IDLoc, Instructions); + emitRRR(Mips::DADDu, DstReg, DstReg, ATReg, IDLoc, Instructions); + if (UseSrcReg) + emitRRR(Mips::DADDu, DstReg, DstReg, SrcReg, IDLoc, Instructions); + + return false; + } + + // And now, the 32-bit symbol address expansion: + // If $rs is the same as $rd: + // (d)la $rd, sym($rd) => lui $at, %hi(sym) + // ori $at, $at, %lo(sym) + // addu $rd, $at, $rd + // Otherwise, if the $rs is different from $rd or if $rs isn't specified: + // (d)la $rd, sym/sym($rs) => lui $rd, %hi(sym) + // ori $rd, $rd, %lo(sym) + // (addu $rd, $rd, $rs) unsigned TmpReg = DstReg; if (UseSrcReg && (DstReg == SrcReg)) { - // At this point we need AT to perform the expansions and we exit if it is - // not available. + // If $rs is the same as $rd, we need to use AT. + // If it is not available we exit. unsigned ATReg = getATReg(IDLoc); if (!ATReg) return true; TmpReg = ATReg; } - if (!Is32BitSym) { - // If it's a 64-bit architecture, expand to: - // la d,sym => lui d,highest(sym) - // ori d,d,higher(sym) - // dsll d,d,16 - // ori d,d,hi16(sym) - // dsll d,d,16 - // ori d,d,lo16(sym) - const MCSymbolRefExpr *HighestExpr = MCSymbolRefExpr::create( - &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_HIGHEST, getContext()); - const MCSymbolRefExpr *HigherExpr = MCSymbolRefExpr::create( - &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_HIGHER, getContext()); - - tmpInst.setOpcode(Mips::LUi); - tmpInst.addOperand(MCOperand::createReg(TmpReg)); - tmpInst.addOperand(MCOperand::createExpr(HighestExpr)); - Instructions.push_back(tmpInst); - - createLShiftOri<0>(MCOperand::createExpr(HigherExpr), TmpReg, SMLoc(), - Instructions); - createLShiftOri<16>(MCOperand::createExpr(HiExpr), TmpReg, SMLoc(), - Instructions); - createLShiftOri<16>(MCOperand::createExpr(LoExpr), TmpReg, SMLoc(), - Instructions); - } else { - // Otherwise, expand to: - // la d,sym => lui d,hi16(sym) - // ori d,d,lo16(sym) - tmpInst.setOpcode(Mips::LUi); - tmpInst.addOperand(MCOperand::createReg(TmpReg)); - tmpInst.addOperand(MCOperand::createExpr(HiExpr)); - Instructions.push_back(tmpInst); - - createLShiftOri<0>(MCOperand::createExpr(LoExpr), TmpReg, SMLoc(), - Instructions); - } + emitRX(Mips::LUi, TmpReg, MCOperand::createExpr(HiExpr), IDLoc, Instructions); + emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr), IDLoc, + Instructions); if (UseSrcReg) - createAddu(DstReg, TmpReg, SrcReg, !Is32BitSym, Instructions); + emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, Instructions); + else + assert(DstReg == TmpReg); return false; } @@ -2110,12 +2450,13 @@ bool MipsAsmParser::expandUncondBranchMMPseudo( Inst.addOperand(MCOperand::createExpr(Offset.getExpr())); } else { assert(Offset.isImm() && "expected immediate operand kind"); - if (isIntN(11, Offset.getImm())) { + if (isInt<11>(Offset.getImm())) { // If offset fits into 11 bits then this instruction becomes microMIPS // 16-bit unconditional branch instruction. - Inst.setOpcode(Mips::B16_MM); + if (inMicroMipsMode()) + Inst.setOpcode(hasMips32r6() ? Mips::BC16_MMR6 : Mips::B16_MM); } else { - if (!isIntN(17, Offset.getImm())) + if (!isInt<17>(Offset.getImm())) Error(IDLoc, "branch target out of range"); if (OffsetToAlignment(Offset.getImm(), 1LL << 1)) Error(IDLoc, "branch to misaligned address"); @@ -2128,8 +2469,10 @@ bool MipsAsmParser::expandUncondBranchMMPseudo( } Instructions.push_back(Inst); - // If .set reorder is active, emit a NOP after the branch instruction. - if (AssemblerOptions.back()->isReorder()) + // If .set reorder is active and branch instruction has a delay slot, + // emit a NOP after it. + const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); + if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder()) createNop(true, IDLoc, Instructions); return false; @@ -2160,30 +2503,21 @@ bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc, } int64_t ImmValue = ImmOp.getImm(); - if (ImmValue == 0) { - MCInst BranchInst; - BranchInst.setOpcode(OpCode); - BranchInst.addOperand(DstRegOp); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MemOffsetOp); - Instructions.push_back(BranchInst); - } else { + if (ImmValue == 0) + emitRRX(OpCode, DstRegOp.getReg(), Mips::ZERO, MemOffsetOp, IDLoc, + Instructions); + else { warnIfNoMacro(IDLoc); unsigned ATReg = getATReg(IDLoc); if (!ATReg) return true; - if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, !isGP64bit(), IDLoc, - Instructions)) + if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, !isGP64bit(), true, + IDLoc, Instructions)) return true; - MCInst BranchInst; - BranchInst.setOpcode(OpCode); - BranchInst.addOperand(DstRegOp); - BranchInst.addOperand(MCOperand::createReg(ATReg)); - BranchInst.addOperand(MemOffsetOp); - Instructions.push_back(BranchInst); + emitRRX(OpCode, DstRegOp.getReg(), ATReg, MemOffsetOp, IDLoc, Instructions); } return false; } @@ -2191,7 +2525,6 @@ bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc, void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions, bool isLoad, bool isImmOpnd) { - MCInst TempInst; unsigned ImmOffset, HiOffset, LoOffset; const MCExpr *ExprOffset; unsigned TmpRegNum; @@ -2212,8 +2545,6 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, HiOffset++; } else ExprOffset = Inst.getOperand(2).getExpr(); - // All instructions will have the same location. - TempInst.setLoc(IDLoc); // These are some of the types of expansions we perform here: // 1) lw $8, sym => lui $8, %hi(sym) // lw $8, %lo(sym)($8) @@ -2252,40 +2583,20 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, return; } - TempInst.setOpcode(Mips::LUi); - TempInst.addOperand(MCOperand::createReg(TmpRegNum)); - if (isImmOpnd) - TempInst.addOperand(MCOperand::createImm(HiOffset)); - else { - const MCExpr *HiExpr = evaluateRelocExpr(ExprOffset, "hi"); - TempInst.addOperand(MCOperand::createExpr(HiExpr)); - } - // Add the instruction to the list. - Instructions.push_back(TempInst); - // Prepare TempInst for next instruction. - TempInst.clear(); + emitRX(Mips::LUi, TmpRegNum, + isImmOpnd ? MCOperand::createImm(HiOffset) + : MCOperand::createExpr(evaluateRelocExpr(ExprOffset, "hi")), + IDLoc, Instructions); // Add temp register to base. - if (BaseRegNum != Mips::ZERO) { - TempInst.setOpcode(Mips::ADDu); - TempInst.addOperand(MCOperand::createReg(TmpRegNum)); - TempInst.addOperand(MCOperand::createReg(TmpRegNum)); - TempInst.addOperand(MCOperand::createReg(BaseRegNum)); - Instructions.push_back(TempInst); - TempInst.clear(); - } + if (BaseRegNum != Mips::ZERO) + emitRRR(Mips::ADDu, TmpRegNum, TmpRegNum, BaseRegNum, IDLoc, Instructions); // And finally, create original instruction with low part // of offset and new base. - TempInst.setOpcode(Inst.getOpcode()); - TempInst.addOperand(MCOperand::createReg(RegOpNum)); - TempInst.addOperand(MCOperand::createReg(TmpRegNum)); - if (isImmOpnd) - TempInst.addOperand(MCOperand::createImm(LoOffset)); - else { - const MCExpr *LoExpr = evaluateRelocExpr(ExprOffset, "lo"); - TempInst.addOperand(MCOperand::createExpr(LoExpr)); - } - Instructions.push_back(TempInst); - TempInst.clear(); + emitRRX(Inst.getOpcode(), RegOpNum, TmpRegNum, + isImmOpnd + ? MCOperand::createImm(LoOffset) + : MCOperand::createExpr(evaluateRelocExpr(ExprOffset, "lo")), + IDLoc, Instructions); } bool @@ -2313,44 +2624,126 @@ MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc, bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl &Instructions) { + bool EmittedNoMacroWarning = false; unsigned PseudoOpcode = Inst.getOpcode(); unsigned SrcReg = Inst.getOperand(0).getReg(); - unsigned TrgReg = Inst.getOperand(1).getReg(); + const MCOperand &TrgOp = Inst.getOperand(1); const MCExpr *OffsetExpr = Inst.getOperand(2).getExpr(); unsigned ZeroSrcOpcode, ZeroTrgOpcode; - bool ReverseOrderSLT, IsUnsigned, AcceptsEquality; + bool ReverseOrderSLT, IsUnsigned, IsLikely, AcceptsEquality; + + unsigned TrgReg; + if (TrgOp.isReg()) + TrgReg = TrgOp.getReg(); + else if (TrgOp.isImm()) { + warnIfNoMacro(IDLoc); + EmittedNoMacroWarning = true; + + TrgReg = getATReg(IDLoc); + if (!TrgReg) + return true; + + switch(PseudoOpcode) { + default: + llvm_unreachable("unknown opcode for branch pseudo-instruction"); + case Mips::BLTImmMacro: + PseudoOpcode = Mips::BLT; + break; + case Mips::BLEImmMacro: + PseudoOpcode = Mips::BLE; + break; + case Mips::BGEImmMacro: + PseudoOpcode = Mips::BGE; + break; + case Mips::BGTImmMacro: + PseudoOpcode = Mips::BGT; + break; + case Mips::BLTUImmMacro: + PseudoOpcode = Mips::BLTU; + break; + case Mips::BLEUImmMacro: + PseudoOpcode = Mips::BLEU; + break; + case Mips::BGEUImmMacro: + PseudoOpcode = Mips::BGEU; + break; + case Mips::BGTUImmMacro: + PseudoOpcode = Mips::BGTU; + break; + case Mips::BLTLImmMacro: + PseudoOpcode = Mips::BLTL; + break; + case Mips::BLELImmMacro: + PseudoOpcode = Mips::BLEL; + break; + case Mips::BGELImmMacro: + PseudoOpcode = Mips::BGEL; + break; + case Mips::BGTLImmMacro: + PseudoOpcode = Mips::BGTL; + break; + case Mips::BLTULImmMacro: + PseudoOpcode = Mips::BLTUL; + break; + case Mips::BLEULImmMacro: + PseudoOpcode = Mips::BLEUL; + break; + case Mips::BGEULImmMacro: + PseudoOpcode = Mips::BGEUL; + break; + case Mips::BGTULImmMacro: + PseudoOpcode = Mips::BGTUL; + break; + } + + if (loadImmediate(TrgOp.getImm(), TrgReg, Mips::NoRegister, !isGP64bit(), + false, IDLoc, Instructions)) + return true; + } switch (PseudoOpcode) { case Mips::BLT: case Mips::BLTU: + case Mips::BLTL: + case Mips::BLTUL: AcceptsEquality = false; ReverseOrderSLT = false; - IsUnsigned = (PseudoOpcode == Mips::BLTU); + IsUnsigned = ((PseudoOpcode == Mips::BLTU) || (PseudoOpcode == Mips::BLTUL)); + IsLikely = ((PseudoOpcode == Mips::BLTL) || (PseudoOpcode == Mips::BLTUL)); ZeroSrcOpcode = Mips::BGTZ; ZeroTrgOpcode = Mips::BLTZ; break; case Mips::BLE: case Mips::BLEU: + case Mips::BLEL: + case Mips::BLEUL: AcceptsEquality = true; ReverseOrderSLT = true; - IsUnsigned = (PseudoOpcode == Mips::BLEU); + IsUnsigned = ((PseudoOpcode == Mips::BLEU) || (PseudoOpcode == Mips::BLEUL)); + IsLikely = ((PseudoOpcode == Mips::BLEL) || (PseudoOpcode == Mips::BLEUL)); ZeroSrcOpcode = Mips::BGEZ; ZeroTrgOpcode = Mips::BLEZ; break; case Mips::BGE: case Mips::BGEU: + case Mips::BGEL: + case Mips::BGEUL: AcceptsEquality = true; ReverseOrderSLT = false; - IsUnsigned = (PseudoOpcode == Mips::BGEU); + IsUnsigned = ((PseudoOpcode == Mips::BGEU) || (PseudoOpcode == Mips::BGEUL)); + IsLikely = ((PseudoOpcode == Mips::BGEL) || (PseudoOpcode == Mips::BGEUL)); ZeroSrcOpcode = Mips::BLEZ; ZeroTrgOpcode = Mips::BGEZ; break; case Mips::BGT: case Mips::BGTU: + case Mips::BGTL: + case Mips::BGTUL: AcceptsEquality = false; ReverseOrderSLT = true; - IsUnsigned = (PseudoOpcode == Mips::BGTU); + IsUnsigned = ((PseudoOpcode == Mips::BGTU) || (PseudoOpcode == Mips::BGTUL)); + IsLikely = ((PseudoOpcode == Mips::BGTL) || (PseudoOpcode == Mips::BGTUL)); ZeroSrcOpcode = Mips::BLTZ; ZeroTrgOpcode = Mips::BGTZ; break; @@ -2358,7 +2751,6 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc, llvm_unreachable("unknown opcode for branch pseudo-instruction"); } - MCInst BranchInst; bool IsTrgRegZero = (TrgReg == Mips::ZERO); bool IsSrcRegZero = (SrcReg == Mips::ZERO); if (IsSrcRegZero && IsTrgRegZero) { @@ -2366,51 +2758,37 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc, // with GAS' behaviour. However, they may not generate the most efficient // code in some circumstances. if (PseudoOpcode == Mips::BLT) { - BranchInst.setOpcode(Mips::BLTZ); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRX(Mips::BLTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc, + Instructions); return false; } if (PseudoOpcode == Mips::BLE) { - BranchInst.setOpcode(Mips::BLEZ); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRX(Mips::BLEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc, + Instructions); Warning(IDLoc, "branch is always taken"); return false; } if (PseudoOpcode == Mips::BGE) { - BranchInst.setOpcode(Mips::BGEZ); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRX(Mips::BGEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc, + Instructions); Warning(IDLoc, "branch is always taken"); return false; } if (PseudoOpcode == Mips::BGT) { - BranchInst.setOpcode(Mips::BGTZ); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRX(Mips::BGTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc, + Instructions); return false; } if (PseudoOpcode == Mips::BGTU) { - BranchInst.setOpcode(Mips::BNE); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRRX(Mips::BNE, Mips::ZERO, Mips::ZERO, + MCOperand::createExpr(OffsetExpr), IDLoc, Instructions); return false; } if (AcceptsEquality) { // If both registers are $0 and the pseudo-branch accepts equality, it // will always be taken, so we emit an unconditional branch. - BranchInst.setOpcode(Mips::BEQ); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO, + MCOperand::createExpr(OffsetExpr), IDLoc, Instructions); Warning(IDLoc, "branch is always taken"); return false; } @@ -2434,11 +2812,8 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc, // the pseudo-branch will always be taken, so we emit an unconditional // branch. // This only applies to unsigned pseudo-branches. - BranchInst.setOpcode(Mips::BEQ); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO, + MCOperand::createExpr(OffsetExpr), IDLoc, Instructions); Warning(IDLoc, "branch is always taken"); return false; } @@ -2455,21 +2830,17 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc, // // Because only BLEU and BGEU branch on equality, we can use the // AcceptsEquality variable to decide when to emit the BEQZ. - BranchInst.setOpcode(AcceptsEquality ? Mips::BEQ : Mips::BNE); - BranchInst.addOperand( - MCOperand::createReg(IsSrcRegZero ? TrgReg : SrcReg)); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRRX(AcceptsEquality ? Mips::BEQ : Mips::BNE, + IsSrcRegZero ? TrgReg : SrcReg, Mips::ZERO, + MCOperand::createExpr(OffsetExpr), IDLoc, Instructions); return false; } // If we have a signed pseudo-branch and one of the registers is $0, // we can use an appropriate compare-to-zero branch. We select which one // to use in the switch statement above. - BranchInst.setOpcode(IsSrcRegZero ? ZeroSrcOpcode : ZeroTrgOpcode); - BranchInst.addOperand(MCOperand::createReg(IsSrcRegZero ? TrgReg : SrcReg)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRX(IsSrcRegZero ? ZeroSrcOpcode : ZeroTrgOpcode, + IsSrcRegZero ? TrgReg : SrcReg, MCOperand::createExpr(OffsetExpr), + IDLoc, Instructions); return false; } @@ -2479,7 +2850,8 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc, if (!ATRegNum) return true; - warnIfNoMacro(IDLoc); + if (!EmittedNoMacroWarning) + warnIfNoMacro(IDLoc); // SLT fits well with 2 of our 4 pseudo-branches: // BLT, where $rs < $rt, translates into "slt $at, $rs, $rt" and @@ -2496,23 +2868,135 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc, // // The same applies to the unsigned variants, except that SLTu is used // instead of SLT. - MCInst SetInst; - SetInst.setOpcode(IsUnsigned ? Mips::SLTu : Mips::SLT); - SetInst.addOperand(MCOperand::createReg(ATRegNum)); - SetInst.addOperand(MCOperand::createReg(ReverseOrderSLT ? TrgReg : SrcReg)); - SetInst.addOperand(MCOperand::createReg(ReverseOrderSLT ? SrcReg : TrgReg)); - Instructions.push_back(SetInst); - - BranchInst.setOpcode(AcceptsEquality ? Mips::BEQ : Mips::BNE); - BranchInst.addOperand(MCOperand::createReg(ATRegNum)); - BranchInst.addOperand(MCOperand::createReg(Mips::ZERO)); - BranchInst.addOperand(MCOperand::createExpr(OffsetExpr)); - Instructions.push_back(BranchInst); + emitRRR(IsUnsigned ? Mips::SLTu : Mips::SLT, ATRegNum, + ReverseOrderSLT ? TrgReg : SrcReg, ReverseOrderSLT ? SrcReg : TrgReg, + IDLoc, Instructions); + + emitRRX(IsLikely ? (AcceptsEquality ? Mips::BEQL : Mips::BNEL) + : (AcceptsEquality ? Mips::BEQ : Mips::BNE), + ATRegNum, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc, + Instructions); return false; } -bool MipsAsmParser::expandUlhu(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl &Instructions) { +bool MipsAsmParser::expandDiv(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions, + const bool IsMips64, const bool Signed) { + if (hasMips32r6()) { + Error(IDLoc, "instruction not supported on mips32r6 or mips64r6"); + return false; + } + + warnIfNoMacro(IDLoc); + + const MCOperand &RsRegOp = Inst.getOperand(0); + assert(RsRegOp.isReg() && "expected register operand kind"); + unsigned RsReg = RsRegOp.getReg(); + + const MCOperand &RtRegOp = Inst.getOperand(1); + assert(RtRegOp.isReg() && "expected register operand kind"); + unsigned RtReg = RtRegOp.getReg(); + unsigned DivOp; + unsigned ZeroReg; + + if (IsMips64) { + DivOp = Signed ? Mips::DSDIV : Mips::DUDIV; + ZeroReg = Mips::ZERO_64; + } else { + DivOp = Signed ? Mips::SDIV : Mips::UDIV; + ZeroReg = Mips::ZERO; + } + + bool UseTraps = useTraps(); + + if (RsReg == Mips::ZERO || RsReg == Mips::ZERO_64) { + if (RtReg == Mips::ZERO || RtReg == Mips::ZERO_64) + Warning(IDLoc, "dividing zero by zero"); + if (IsMips64) { + if (Signed && (RtReg == Mips::ZERO || RtReg == Mips::ZERO_64)) { + if (UseTraps) { + emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, Instructions); + return false; + } + + emitII(Mips::BREAK, 0x7, 0, IDLoc, Instructions); + return false; + } + } else { + emitRR(DivOp, RsReg, RtReg, IDLoc, Instructions); + return false; + } + } + + if (RtReg == Mips::ZERO || RtReg == Mips::ZERO_64) { + Warning(IDLoc, "division by zero"); + if (Signed) { + if (UseTraps) { + emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, Instructions); + return false; + } + + emitII(Mips::BREAK, 0x7, 0, IDLoc, Instructions); + return false; + } + } + + // FIXME: The values for these two BranchTarget variables may be different in + // micromips. These magic numbers need to be removed. + unsigned BranchTargetNoTraps; + unsigned BranchTarget; + + if (UseTraps) { + BranchTarget = IsMips64 ? 12 : 8; + emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, Instructions); + } else { + BranchTarget = IsMips64 ? 20 : 16; + BranchTargetNoTraps = 8; + // Branch to the li instruction. + emitRRI(Mips::BNE, RtReg, ZeroReg, BranchTargetNoTraps, IDLoc, + Instructions); + } + + emitRR(DivOp, RsReg, RtReg, IDLoc, Instructions); + + if (!UseTraps) + emitII(Mips::BREAK, 0x7, 0, IDLoc, Instructions); + + if (!Signed) { + emitR(Mips::MFLO, RsReg, IDLoc, Instructions); + return false; + } + + unsigned ATReg = getATReg(IDLoc); + if (!ATReg) + return true; + + emitRRI(Mips::ADDiu, ATReg, ZeroReg, -1, IDLoc, Instructions); + if (IsMips64) { + // Branch to the mflo instruction. + emitRRI(Mips::BNE, RtReg, ATReg, BranchTarget, IDLoc, Instructions); + emitRRI(Mips::ADDiu, ATReg, ZeroReg, 1, IDLoc, Instructions); + emitRRI(Mips::DSLL32, ATReg, ATReg, 0x1f, IDLoc, Instructions); + } else { + // Branch to the mflo instruction. + emitRRI(Mips::BNE, RtReg, ATReg, BranchTarget, IDLoc, Instructions); + emitRI(Mips::LUi, ATReg, (uint16_t)0x8000, IDLoc, Instructions); + } + + if (UseTraps) + emitRRI(Mips::TEQ, RsReg, ATReg, 0x6, IDLoc, Instructions); + else { + // Branch to the mflo instruction. + emitRRI(Mips::BNE, RsReg, ATReg, BranchTargetNoTraps, IDLoc, Instructions); + emitRRI(Mips::SLL, ZeroReg, ZeroReg, 0, IDLoc, Instructions); + emitII(Mips::BREAK, 0x6, 0, IDLoc, Instructions); + } + emitR(Mips::MFLO, RsReg, IDLoc, Instructions); + return false; +} + +bool MipsAsmParser::expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc, + SmallVectorImpl &Instructions) { if (hasMips32r6() || hasMips64r6()) { Error(IDLoc, "instruction not supported on mips32r6 or mips64r6"); return false; @@ -2547,7 +3031,7 @@ bool MipsAsmParser::expandUlhu(MCInst &Inst, SMLoc IDLoc, LoadedOffsetInAT = true; if (loadImmediate(OffsetValue, ATReg, Mips::NoRegister, !ABI.ArePtrs64bit(), - IDLoc, Instructions)) + true, IDLoc, Instructions)) return true; // NOTE: We do this (D)ADDu here instead of doing it in loadImmediate() @@ -2575,33 +3059,15 @@ bool MipsAsmParser::expandUlhu(MCInst &Inst, SMLoc IDLoc, unsigned SllReg = LoadedOffsetInAT ? DstReg : ATReg; - MCInst TmpInst; - TmpInst.setOpcode(Mips::LBu); - TmpInst.addOperand(MCOperand::createReg(FirstLbuDstReg)); - TmpInst.addOperand(MCOperand::createReg(LbuSrcReg)); - TmpInst.addOperand(MCOperand::createImm(FirstLbuOffset)); - Instructions.push_back(TmpInst); - - TmpInst.clear(); - TmpInst.setOpcode(Mips::LBu); - TmpInst.addOperand(MCOperand::createReg(SecondLbuDstReg)); - TmpInst.addOperand(MCOperand::createReg(LbuSrcReg)); - TmpInst.addOperand(MCOperand::createImm(SecondLbuOffset)); - Instructions.push_back(TmpInst); - - TmpInst.clear(); - TmpInst.setOpcode(Mips::SLL); - TmpInst.addOperand(MCOperand::createReg(SllReg)); - TmpInst.addOperand(MCOperand::createReg(SllReg)); - TmpInst.addOperand(MCOperand::createImm(8)); - Instructions.push_back(TmpInst); - - TmpInst.clear(); - TmpInst.setOpcode(Mips::OR); - TmpInst.addOperand(MCOperand::createReg(DstReg)); - TmpInst.addOperand(MCOperand::createReg(DstReg)); - TmpInst.addOperand(MCOperand::createReg(ATReg)); - Instructions.push_back(TmpInst); + emitRRI(Signed ? Mips::LB : Mips::LBu, FirstLbuDstReg, LbuSrcReg, + FirstLbuOffset, IDLoc, Instructions); + + emitRRI(Mips::LBu, SecondLbuDstReg, LbuSrcReg, SecondLbuOffset, IDLoc, + Instructions); + + emitRRI(Mips::SLL, SllReg, SllReg, 8, IDLoc, Instructions); + + emitRRR(Mips::OR, DstReg, DstReg, ATReg, IDLoc, Instructions); return false; } @@ -2639,7 +3105,7 @@ bool MipsAsmParser::expandUlw(MCInst &Inst, SMLoc IDLoc, warnIfNoMacro(IDLoc); if (loadImmediate(OffsetValue, ATReg, Mips::NoRegister, !ABI.ArePtrs64bit(), - IDLoc, Instructions)) + true, IDLoc, Instructions)) return true; // NOTE: We do this (D)ADDu here instead of doing it in loadImmediate() @@ -2662,48 +3128,112 @@ bool MipsAsmParser::expandUlw(MCInst &Inst, SMLoc IDLoc, RightLoadOffset = LoadedOffsetInAT ? 3 : (OffsetValue + 3); } - MCInst LeftLoadInst; - LeftLoadInst.setOpcode(Mips::LWL); - LeftLoadInst.addOperand(DstRegOp); - LeftLoadInst.addOperand(MCOperand::createReg(FinalSrcReg)); - LeftLoadInst.addOperand(MCOperand::createImm(LeftLoadOffset)); - Instructions.push_back(LeftLoadInst); + emitRRI(Mips::LWL, DstRegOp.getReg(), FinalSrcReg, LeftLoadOffset, IDLoc, + Instructions); - MCInst RightLoadInst; - RightLoadInst.setOpcode(Mips::LWR); - RightLoadInst.addOperand(DstRegOp); - RightLoadInst.addOperand(MCOperand::createReg(FinalSrcReg)); - RightLoadInst.addOperand(MCOperand::createImm(RightLoadOffset )); - Instructions.push_back(RightLoadInst); + emitRRI(Mips::LWR, DstRegOp.getReg(), FinalSrcReg, RightLoadOffset, IDLoc, + Instructions); return false; } +bool MipsAsmParser::expandAliasImmediate(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + + assert (Inst.getNumOperands() == 3 && "Invalid operand count"); + assert (Inst.getOperand(0).isReg() && + Inst.getOperand(1).isReg() && + Inst.getOperand(2).isImm() && "Invalid instruction operand."); + + unsigned ATReg = Mips::NoRegister; + unsigned FinalDstReg = Mips::NoRegister; + unsigned DstReg = Inst.getOperand(0).getReg(); + unsigned SrcReg = Inst.getOperand(1).getReg(); + int64_t ImmValue = Inst.getOperand(2).getImm(); + + bool Is32Bit = isInt<32>(ImmValue) || isUInt<32>(ImmValue); + + unsigned FinalOpcode = Inst.getOpcode(); + + if (DstReg == SrcReg) { + ATReg = getATReg(Inst.getLoc()); + if (!ATReg) + return true; + FinalDstReg = DstReg; + DstReg = ATReg; + } + + if (!loadImmediate(ImmValue, DstReg, Mips::NoRegister, Is32Bit, false, Inst.getLoc(), Instructions)) { + switch (FinalOpcode) { + default: + llvm_unreachable("unimplemented expansion"); + case (Mips::ADDi): + FinalOpcode = Mips::ADD; + break; + case (Mips::ADDiu): + FinalOpcode = Mips::ADDu; + break; + case (Mips::ANDi): + FinalOpcode = Mips::AND; + break; + case (Mips::NORImm): + FinalOpcode = Mips::NOR; + break; + case (Mips::ORi): + FinalOpcode = Mips::OR; + break; + case (Mips::SLTi): + FinalOpcode = Mips::SLT; + break; + case (Mips::SLTiu): + FinalOpcode = Mips::SLTu; + break; + case (Mips::XORi): + FinalOpcode = Mips::XOR; + break; + } + + if (FinalDstReg == Mips::NoRegister) + emitRRR(FinalOpcode, DstReg, DstReg, SrcReg, IDLoc, Instructions); + else + emitRRR(FinalOpcode, FinalDstReg, FinalDstReg, DstReg, IDLoc, + Instructions); + return false; + } + return true; +} + void MipsAsmParser::createNop(bool hasShortDelaySlot, SMLoc IDLoc, SmallVectorImpl &Instructions) { - MCInst NopInst; - if (hasShortDelaySlot) { - NopInst.setOpcode(Mips::MOVE16_MM); - NopInst.addOperand(MCOperand::createReg(Mips::ZERO)); - NopInst.addOperand(MCOperand::createReg(Mips::ZERO)); - } else { - NopInst.setOpcode(Mips::SLL); - NopInst.addOperand(MCOperand::createReg(Mips::ZERO)); - NopInst.addOperand(MCOperand::createReg(Mips::ZERO)); - NopInst.addOperand(MCOperand::createImm(0)); - } - Instructions.push_back(NopInst); + if (hasShortDelaySlot) + emitRR(Mips::MOVE16_MM, Mips::ZERO, Mips::ZERO, IDLoc, Instructions); + else + emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, Instructions); } void MipsAsmParser::createAddu(unsigned DstReg, unsigned SrcReg, unsigned TrgReg, bool Is64Bit, SmallVectorImpl &Instructions) { - MCInst AdduInst; - AdduInst.setOpcode(Is64Bit ? Mips::DADDu : Mips::ADDu); - AdduInst.addOperand(MCOperand::createReg(DstReg)); - AdduInst.addOperand(MCOperand::createReg(SrcReg)); - AdduInst.addOperand(MCOperand::createReg(TrgReg)); - Instructions.push_back(AdduInst); + emitRRR(Is64Bit ? Mips::DADDu : Mips::ADDu, DstReg, SrcReg, TrgReg, SMLoc(), + Instructions); +} + +void MipsAsmParser::createCpRestoreMemOp( + bool IsLoad, int StackOffset, SMLoc IDLoc, + SmallVectorImpl &Instructions) { + // If the offset can not fit into 16 bits, we need to expand. + if (!isInt<16>(StackOffset)) { + MCInst MemInst; + MemInst.setOpcode(IsLoad ? Mips::LW : Mips::SW); + MemInst.addOperand(MCOperand::createReg(Mips::GP)); + MemInst.addOperand(MCOperand::createReg(Mips::SP)); + MemInst.addOperand(MCOperand::createImm(StackOffset)); + expandMemInst(MemInst, IDLoc, Instructions, IsLoad, true /*HasImmOpnd*/); + return; + } + + emitRRI(IsLoad ? Mips::LW : Mips::SW, Mips::GP, Mips::SP, StackOffset, IDLoc, + Instructions); } unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) { @@ -2718,6 +3248,17 @@ unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) { return Match_Success; } +static SMLoc RefineErrorLoc(const SMLoc Loc, const OperandVector &Operands, + uint64_t ErrorInfo) { + if (ErrorInfo != ~0ULL && ErrorInfo < Operands.size()) { + SMLoc ErrorLoc = Operands[ErrorInfo]->getStartLoc(); + if (ErrorLoc == SMLoc()) + return Loc; + return ErrorLoc; + } + return Loc; +} + bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, @@ -2746,7 +3287,7 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); - ErrorLoc = ((MipsOperand &)*Operands[ErrorInfo]).getStartLoc(); + ErrorLoc = Operands[ErrorInfo]->getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } @@ -2757,6 +3298,14 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(IDLoc, "invalid instruction"); case Match_RequiresDifferentSrcAndDst: return Error(IDLoc, "source and destination must be different"); + case Match_Immz: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected '0'"); + case Match_UImm2_0: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected 2-bit unsigned immediate"); + case Match_UImm2_1: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range 1 .. 4"); } llvm_unreachable("Implement any new match types added!"); @@ -3253,7 +3802,7 @@ MipsAsmParser::parseMemOperand(OperandVector &Operands) { const AsmToken &Tok = Parser.getTok(); // Get the next token. if (Tok.isNot(AsmToken::LParen)) { MipsOperand &Mnemonic = static_cast(*Operands[0]); - if (Mnemonic.getToken() == "la") { + if (Mnemonic.getToken() == "la" || Mnemonic.getToken() == "dla") { SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(MipsOperand::CreateImm(IdVal, S, E, *this)); @@ -4355,6 +4904,14 @@ bool MipsAsmParser::eatComma(StringRef ErrorStr) { return true; } +// Used to determine if .cpload, .cprestore, and .cpsetup have any effect. +// In this class, it is only used for .cprestore. +// FIXME: Only keep track of IsPicEnabled in one place, instead of in both +// MipsTargetELFStreamer and MipsAsmParser. +bool MipsAsmParser::isPicAndNotNxxAbi() { + return inPicMode() && !(isABI_N32() || isABI_N64()); +} + bool MipsAsmParser::parseDirectiveCpLoad(SMLoc Loc) { if (AssemblerOptions.back()->isReorder()) Warning(Loc, ".cpload should be inside a noreorder section"); @@ -4387,6 +4944,54 @@ bool MipsAsmParser::parseDirectiveCpLoad(SMLoc Loc) { return false; } +bool MipsAsmParser::parseDirectiveCpRestore(SMLoc Loc) { + MCAsmParser &Parser = getParser(); + + // Note that .cprestore is ignored if used with the N32 and N64 ABIs or if it + // is used in non-PIC mode. + + if (inMips16Mode()) { + reportParseError(".cprestore is not supported in Mips16 mode"); + return false; + } + + // Get the stack offset value. + const MCExpr *StackOffset; + int64_t StackOffsetVal; + if (Parser.parseExpression(StackOffset)) { + reportParseError("expected stack offset value"); + return false; + } + + if (!StackOffset->evaluateAsAbsolute(StackOffsetVal)) { + reportParseError("stack offset is not an absolute expression"); + return false; + } + + if (StackOffsetVal < 0) { + Warning(Loc, ".cprestore with negative stack offset has no effect"); + IsCpRestoreSet = false; + } else { + IsCpRestoreSet = true; + CpRestoreOffset = StackOffsetVal; + } + + // If this is not the end of the statement, report an error. + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token, expected end of statement"); + return false; + } + + // Store the $gp on the stack. + SmallVector StoreInsts; + createCpRestoreMemOp(false /*IsLoad*/, CpRestoreOffset /*StackOffset*/, Loc, + StoreInsts); + + getTargetStreamer().emitDirectiveCpRestore(StoreInsts, CpRestoreOffset); + Parser.Lex(); // Consume the EndOfStatement. + return false; +} + bool MipsAsmParser::parseDirectiveCPSetup() { MCAsmParser &Parser = getParser(); unsigned FuncReg; @@ -4416,16 +5021,19 @@ bool MipsAsmParser::parseDirectiveCPSetup() { ResTy = parseAnyRegister(TmpReg); if (ResTy == MatchOperand_NoMatch) { - const AsmToken &Tok = Parser.getTok(); - if (Tok.is(AsmToken::Integer)) { - Save = Tok.getIntVal(); - SaveIsReg = false; - Parser.Lex(); - } else { - reportParseError("expected save register or stack offset"); + const MCExpr *OffsetExpr; + int64_t OffsetVal; + SMLoc ExprLoc = getLexer().getLoc(); + + if (Parser.parseExpression(OffsetExpr) || + !OffsetExpr->evaluateAsAbsolute(OffsetVal)) { + reportParseError(ExprLoc, "expected save register or stack offset"); Parser.eatToEndOfStatement(); return false; } + + Save = OffsetVal; + SaveIsReg = false; } else { MipsOperand &SaveOpnd = static_cast(*TmpReg[0]); if (!SaveOpnd.isGPRAsmReg()) { @@ -4451,11 +5059,20 @@ bool MipsAsmParser::parseDirectiveCPSetup() { } const MCSymbolRefExpr *Ref = static_cast(Expr); + CpSaveLocation = Save; + CpSaveLocationIsRegister = SaveIsReg; + getTargetStreamer().emitDirectiveCpsetup(FuncReg, Save, Ref->getSymbol(), SaveIsReg); return false; } +bool MipsAsmParser::parseDirectiveCPReturn() { + getTargetStreamer().emitDirectiveCpreturn(CpSaveLocation, + CpSaveLocationIsRegister); + return false; +} + bool MipsAsmParser::parseDirectiveNaN() { MCAsmParser &Parser = getParser(); if (getLexer().isNot(AsmToken::EndOfStatement)) { @@ -4644,6 +5261,9 @@ bool MipsAsmParser::parseDirectiveOption() { StringRef Option = Tok.getIdentifier(); if (Option == "pic0") { + // MipsAsmParser needs to know if the current PIC mode changes. + IsPicEnabled = false; + getTargetStreamer().emitDirectiveOptionPic0(); Parser.Lex(); if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { @@ -4655,6 +5275,9 @@ bool MipsAsmParser::parseDirectiveOption() { } if (Option == "pic2") { + // MipsAsmParser needs to know if the current PIC mode changes. + IsPicEnabled = true; + getTargetStreamer().emitDirectiveOptionPic2(); Parser.Lex(); if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { @@ -4693,6 +5316,8 @@ bool MipsAsmParser::parseInsnDirective() { /// ::= .module oddspreg /// ::= .module nooddspreg /// ::= .module fp=value +/// ::= .module softfloat +/// ::= .module hardfloat bool MipsAsmParser::parseDirectiveModule() { MCAsmParser &Parser = getParser(); MCAsmLexer &Lexer = getLexer(); @@ -4711,7 +5336,7 @@ bool MipsAsmParser::parseDirectiveModule() { } if (Option == "oddspreg") { - clearFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg"); + clearModuleFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg"); // Synchronize the abiflags information with the FeatureBits information we // changed above. @@ -4735,7 +5360,7 @@ bool MipsAsmParser::parseDirectiveModule() { return false; } - setFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg"); + setModuleFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg"); // Synchronize the abiflags information with the FeatureBits information we // changed above. @@ -4755,6 +5380,44 @@ bool MipsAsmParser::parseDirectiveModule() { return false; // parseDirectiveModule has finished successfully. } else if (Option == "fp") { return parseDirectiveModuleFP(); + } else if (Option == "softfloat") { + setModuleFeatureBits(Mips::FeatureSoftFloat, "soft-float"); + + // Synchronize the ABI Flags information with the FeatureBits information we + // updated above. + getTargetStreamer().updateABIInfo(*this); + + // If printing assembly, use the recently updated ABI Flags information. + // If generating ELF, don't do anything (the .MIPS.abiflags section gets + // emitted later). + getTargetStreamer().emitDirectiveModuleSoftFloat(); + + // If this is not the end of the statement, report an error. + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token, expected end of statement"); + return false; + } + + return false; // parseDirectiveModule has finished successfully. + } else if (Option == "hardfloat") { + clearModuleFeatureBits(Mips::FeatureSoftFloat, "soft-float"); + + // Synchronize the ABI Flags information with the FeatureBits information we + // updated above. + getTargetStreamer().updateABIInfo(*this); + + // If printing assembly, use the recently updated ABI Flags information. + // If generating ELF, don't do anything (the .MIPS.abiflags section gets + // emitted later). + getTargetStreamer().emitDirectiveModuleHardFloat(); + + // If this is not the end of the statement, report an error. + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token, expected end of statement"); + return false; + } + + return false; // parseDirectiveModule has finished successfully. } else { return Error(L, "'" + Twine(Option) + "' is not a valid .module option."); } @@ -4800,6 +5463,7 @@ bool MipsAsmParser::parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI, StringRef Directive) { MCAsmParser &Parser = getParser(); MCAsmLexer &Lexer = getLexer(); + bool ModuleLevelOptions = Directive == ".module"; if (Lexer.is(AsmToken::Identifier)) { StringRef Value = Parser.getTok().getString(); @@ -4816,8 +5480,13 @@ bool MipsAsmParser::parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI, } FpABI = MipsABIFlagsSection::FpABIKind::XX; - setFeatureBits(Mips::FeatureFPXX, "fpxx"); - clearFeatureBits(Mips::FeatureFP64Bit, "fp64"); + if (ModuleLevelOptions) { + setModuleFeatureBits(Mips::FeatureFPXX, "fpxx"); + clearModuleFeatureBits(Mips::FeatureFP64Bit, "fp64"); + } else { + setFeatureBits(Mips::FeatureFPXX, "fpxx"); + clearFeatureBits(Mips::FeatureFP64Bit, "fp64"); + } return true; } @@ -4837,12 +5506,22 @@ bool MipsAsmParser::parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI, } FpABI = MipsABIFlagsSection::FpABIKind::S32; - clearFeatureBits(Mips::FeatureFPXX, "fpxx"); - clearFeatureBits(Mips::FeatureFP64Bit, "fp64"); + if (ModuleLevelOptions) { + clearModuleFeatureBits(Mips::FeatureFPXX, "fpxx"); + clearModuleFeatureBits(Mips::FeatureFP64Bit, "fp64"); + } else { + clearFeatureBits(Mips::FeatureFPXX, "fpxx"); + clearFeatureBits(Mips::FeatureFP64Bit, "fp64"); + } } else { FpABI = MipsABIFlagsSection::FpABIKind::S64; - clearFeatureBits(Mips::FeatureFPXX, "fpxx"); - setFeatureBits(Mips::FeatureFP64Bit, "fp64"); + if (ModuleLevelOptions) { + clearModuleFeatureBits(Mips::FeatureFPXX, "fpxx"); + setModuleFeatureBits(Mips::FeatureFP64Bit, "fp64"); + } else { + clearFeatureBits(Mips::FeatureFPXX, "fpxx"); + setFeatureBits(Mips::FeatureFP64Bit, "fp64"); + } } return true; @@ -4857,6 +5536,8 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { if (IDVal == ".cpload") return parseDirectiveCpLoad(DirectiveID.getLoc()); + if (IDVal == ".cprestore") + return parseDirectiveCpRestore(DirectiveID.getLoc()); if (IDVal == ".dword") { parseDataDirective(8, DirectiveID.getLoc()); return false; @@ -4907,6 +5588,7 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { getTargetStreamer().emitDirectiveEnt(*Sym); CurrentFn = Sym; + IsCpRestoreSet = false; return false; } @@ -4935,6 +5617,7 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { getTargetStreamer().emitDirectiveEnd(SymbolName); CurrentFn = nullptr; + IsCpRestoreSet = false; return false; } @@ -5006,6 +5689,7 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { getTargetStreamer().emitFrame(StackReg, FrameSizeVal, ReturnRegOpnd.getGPR32Reg()); + IsCpRestoreSet = false; return false; } @@ -5106,6 +5790,9 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { if (IDVal == ".cpsetup") return parseDirectiveCPSetup(); + if (IDVal == ".cpreturn") + return parseDirectiveCPReturn(); + if (IDVal == ".module") return parseDirectiveModule();