SmallVectorImpl<MCInst> &Instructions);
bool loadImmediate(int64_t ImmValue, unsigned DstReg, unsigned SrcReg,
- bool Is32BitImm, SMLoc IDLoc,
+ bool Is32BitImm, bool IsAddress, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
bool loadAndAddSymbolAddress(const MCExpr *SymExpr, unsigned DstReg,
bool expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
- bool expandLoadAddressImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandLoadAddress(unsigned DstReg, unsigned BaseReg,
+ const MCOperand &Offset, bool Is32BitAddress,
+ SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions);
- bool expandLoadAddressReg(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
bool expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
-
+
// Remember the initial assembler options. The user can not modify these.
AssemblerOptions.push_back(
llvm::make_unique<MipsAssemblerOptions>(STI.getFeatureBits()));
-
+
// Create an assembler options environment for the user to modify.
AssemblerOptions.push_back(
llvm::make_unique<MipsAssemblerOptions>(STI.getFeatureBits()));
void addMemOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(getMemBase()->getGPR32Reg()));
+ Inst.addOperand(MCOperand::createReg(AsmParser.getABI().ArePtrs64bit()
+ ? getMemBase()->getGPR64Reg()
+ : getMemBase()->getGPR32Reg()));
const MCExpr *Expr = getMemOff();
addExpr(Inst, Expr);
case Mips::LoadImm32:
case Mips::LoadImm64:
case Mips::LoadAddrImm32:
+ case Mips::LoadAddrImm64:
case Mips::LoadAddrReg32:
+ case Mips::LoadAddrReg64:
case Mips::B_MM_Pseudo:
case Mips::LWM_MM:
case Mips::SWM_MM:
case Mips::LoadImm64:
return expandLoadImm(Inst, false, IDLoc, Instructions);
case Mips::LoadAddrImm32:
- return expandLoadAddressImm(Inst, true, IDLoc, Instructions);
+ case Mips::LoadAddrImm64:
+ assert(Inst.getOperand(0).isReg() && "expected register operand kind");
+ assert((Inst.getOperand(1).isImm() || Inst.getOperand(1).isExpr()) &&
+ "expected immediate operand kind");
+
+ return expandLoadAddress(
+ Inst.getOperand(0).getReg(), Mips::NoRegister, Inst.getOperand(1),
+ Inst.getOpcode() == Mips::LoadAddrImm32, IDLoc, Instructions);
case Mips::LoadAddrReg32:
- return expandLoadAddressReg(Inst, true, IDLoc, Instructions);
+ case Mips::LoadAddrReg64:
+ assert(Inst.getOperand(0).isReg() && "expected register operand kind");
+ assert(Inst.getOperand(1).isReg() && "expected register operand kind");
+ assert((Inst.getOperand(2).isImm() || Inst.getOperand(2).isExpr()) &&
+ "expected immediate operand kind");
+
+ return expandLoadAddress(
+ Inst.getOperand(0).getReg(), Inst.getOperand(1).getReg(), Inst.getOperand(2),
+ Inst.getOpcode() == Mips::LoadAddrReg32, IDLoc, Instructions);
case Mips::B_MM_Pseudo:
return expandUncondBranchMMPseudo(Inst, IDLoc, Instructions);
case Mips::SWM_MM:
}
namespace {
-void emitRX(unsigned Opcode, unsigned DstReg, MCOperand Imm, SMLoc IDLoc,
+void emitRX(unsigned Opcode, unsigned Reg0, MCOperand Op1, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
tmpInst.setOpcode(Opcode);
- tmpInst.addOperand(MCOperand::createReg(DstReg));
- tmpInst.addOperand(Imm);
+ tmpInst.addOperand(MCOperand::createReg(Reg0));
+ tmpInst.addOperand(Op1);
tmpInst.setLoc(IDLoc);
Instructions.push_back(tmpInst);
}
-void emitRI(unsigned Opcode, unsigned DstReg, int16_t Imm, SMLoc IDLoc,
+void emitRI(unsigned Opcode, unsigned Reg0, int16_t Imm, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
- emitRX(Opcode, DstReg, MCOperand::createImm(Imm), IDLoc, Instructions);
+ emitRX(Opcode, Reg0, MCOperand::createImm(Imm), IDLoc, Instructions);
}
-
-void emitRRX(unsigned Opcode, unsigned DstReg, unsigned SrcReg, MCOperand Imm,
+void emitRRX(unsigned Opcode, unsigned Reg0, unsigned Reg1, MCOperand Op2,
SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
tmpInst.setOpcode(Opcode);
- tmpInst.addOperand(MCOperand::createReg(DstReg));
- tmpInst.addOperand(MCOperand::createReg(SrcReg));
- tmpInst.addOperand(Imm);
+ tmpInst.addOperand(MCOperand::createReg(Reg0));
+ tmpInst.addOperand(MCOperand::createReg(Reg1));
+ tmpInst.addOperand(Op2);
tmpInst.setLoc(IDLoc);
Instructions.push_back(tmpInst);
}
-void emitRRR(unsigned Opcode, unsigned DstReg, unsigned SrcReg,
- unsigned SrcReg2, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- emitRRX(Opcode, DstReg, SrcReg, MCOperand::createReg(SrcReg2), IDLoc,
+void emitRRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, unsigned Reg2,
+ SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
+ emitRRX(Opcode, Reg0, Reg1, MCOperand::createReg(Reg2), IDLoc,
Instructions);
}
-void emitRRI(unsigned Opcode, unsigned DstReg, unsigned SrcReg, int16_t Imm,
+void emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm,
SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
- emitRRX(Opcode, DstReg, SrcReg, MCOperand::createImm(Imm), IDLoc,
+ emitRRX(Opcode, Reg0, Reg1, MCOperand::createImm(Imm), IDLoc,
Instructions);
}
-template <int16_t ShiftAmount>
-void createLShiftOri(MCOperand Operand, unsigned RegNo, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- if (ShiftAmount >= 32)
- emitRRI(Mips::DSLL32, RegNo, RegNo, ShiftAmount - 32, IDLoc, Instructions);
- else if (ShiftAmount > 0)
- emitRRI(Mips::DSLL, RegNo, RegNo, ShiftAmount, IDLoc, Instructions);
-
- // There's no need for an ORi if the immediate is 0.
- if (Operand.isImm() && Operand.getImm() == 0)
+void emitAppropriateDSLL(unsigned DstReg, unsigned SrcReg, int16_t ShiftAmount,
+ SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
+ if (ShiftAmount >= 32) {
+ emitRRI(Mips::DSLL32, DstReg, SrcReg, ShiftAmount - 32, IDLoc,
+ Instructions);
return;
+ }
- emitRRX(Mips::ORi, RegNo, RegNo, Operand, IDLoc, Instructions);
-}
-
-template <unsigned ShiftAmount>
-void createLShiftOri(int64_t Value, unsigned RegNo, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- createLShiftOri<ShiftAmount>(MCOperand::createImm(Value), RegNo, IDLoc,
- Instructions);
-}
+ emitRRI(Mips::DSLL, DstReg, SrcReg, ShiftAmount, IDLoc, Instructions);
}
+} // end anonymous namespace.
bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
return false;
}
+/// Can the value be represented by a unsigned N-bit value and a shift left?
+template<unsigned N>
+bool isShiftedUIntAtAnyPosition(uint64_t x) {
+ unsigned BitNum = findFirstSet(x);
+
+ return (x == x >> BitNum << BitNum) && isUInt<N>(x >> BitNum);
+}
+
+/// Load (or add) an immediate into a register.
+///
+/// @param ImmValue The immediate to load.
+/// @param DstReg The register that will hold the immediate.
+/// @param SrcReg A register to add to the immediate or Mips::NoRegister
+/// for a simple initialization.
+/// @param Is32BitImm Is ImmValue 32-bit or 64-bit?
+/// @param IsAddress True if the immediate represents an address. False if it
+/// is an integer.
+/// @param IDLoc Location of the immediate in the source file.
+/// @param Instructions The instructions emitted by this expansion.
bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
- unsigned SrcReg, bool Is32BitImm, SMLoc IDLoc,
+ unsigned SrcReg, bool Is32BitImm,
+ bool IsAddress, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
if (!Is32BitImm && !isGP64bit()) {
Error(IDLoc, "instruction requires a 64-bit architecture");
}
}
+ unsigned ZeroReg = IsAddress ? ABI.GetNullPtr() : ABI.GetZeroReg();
+ unsigned AdduOp = !Is32BitImm ? Mips::DADDu : Mips::ADDu;
+
bool UseSrcReg = false;
if (SrcReg != Mips::NoRegister)
UseSrcReg = true;
TmpReg = ATReg;
}
- // FIXME: gas has a special case for values that are 000...1111, which
- // becomes a li -1 and then a dsrl
if (isInt<16>(ImmValue)) {
- // li d,j => addiu d,$zero,j
if (!UseSrcReg)
- SrcReg = Mips::ZERO;
+ SrcReg = ZeroReg;
+
+ // This doesn't quite follow the usual ABI expectations for N32 but matches
+ // traditional assembler behaviour. N32 would normally use addiu for both
+ // integers and addresses.
+ if (IsAddress && !Is32BitImm) {
+ emitRRI(Mips::DADDiu, DstReg, SrcReg, ImmValue, IDLoc, Instructions);
+ return false;
+ }
+
emitRRI(Mips::ADDiu, DstReg, SrcReg, ImmValue, IDLoc, Instructions);
- } else if (isUInt<16>(ImmValue)) {
- // li d,j => ori d,$zero,j
+ return false;
+ }
+
+ if (isUInt<16>(ImmValue)) {
unsigned TmpReg = DstReg;
if (SrcReg == DstReg) {
- unsigned ATReg = getATReg(IDLoc);
- if (!ATReg)
+ TmpReg = getATReg(IDLoc);
+ if (!TmpReg)
return true;
- TmpReg = ATReg;
}
- emitRRI(Mips::ORi, TmpReg, Mips::ZERO, ImmValue, IDLoc, Instructions);
+ emitRRI(Mips::ORi, TmpReg, ZeroReg, ImmValue, IDLoc, Instructions);
if (UseSrcReg)
- emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
- } else if (isInt<32>(ImmValue) || isUInt<32>(ImmValue)) {
+ emitRRR(ABI.GetPtrAdduOp(), DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ return false;
+ }
+
+ if (isInt<32>(ImmValue) || isUInt<32>(ImmValue)) {
warnIfNoMacro(IDLoc);
- // For all other values which are representable as a 32-bit integer:
- // li d,j => lui d,hi16(j)
- // ori d,d,lo16(j)
uint16_t Bits31To16 = (ImmValue >> 16) & 0xffff;
uint16_t Bits15To0 = ImmValue & 0xffff;
if (!Is32BitImm && !isInt<32>(ImmValue)) {
- // For DLI, expand to an ORi instead of a LUi to avoid sign-extending the
+ // Traditional behaviour seems to special case this particular value. It's
+ // not clear why other masks are handled differently.
+ if (ImmValue == 0xffffffff) {
+ emitRI(Mips::LUi, TmpReg, 0xffff, IDLoc, Instructions);
+ emitRRI(Mips::DSRL32, TmpReg, TmpReg, 0, IDLoc, Instructions);
+ if (UseSrcReg)
+ emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ return false;
+ }
+
+ // Expand to an ORi instead of a LUi to avoid sign-extending into the
// upper 32 bits.
- emitRRI(Mips::ORi, TmpReg, Mips::ZERO, Bits31To16, IDLoc, Instructions);
+ emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits31To16, IDLoc, Instructions);
emitRRI(Mips::DSLL, TmpReg, TmpReg, 16, IDLoc, Instructions);
- } else
- emitRI(Mips::LUi, TmpReg, Bits31To16, IDLoc, Instructions);
- createLShiftOri<0>(Bits15To0, TmpReg, IDLoc, Instructions);
+ if (Bits15To0)
+ emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, Instructions);
+ if (UseSrcReg)
+ emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ return false;
+ }
+ emitRI(Mips::LUi, TmpReg, Bits31To16, IDLoc, Instructions);
+ if (Bits15To0)
+ emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, Instructions);
if (UseSrcReg)
- createAddu(DstReg, TmpReg, SrcReg, !Is32BitImm, Instructions);
-
- } else if ((ImmValue & (0xffffLL << 48)) == 0) {
- warnIfNoMacro(IDLoc);
+ emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ return false;
+ }
- // <------- lo32 ------>
- // <------- hi32 ------>
- // <- hi16 -> <- lo16 ->
- // _________________________________
- // | | | |
- // | 16-bits | 16-bits | 16-bits |
- // |__________|__________|__________|
- //
- // For any 64-bit value that is representable as a 48-bit integer:
- // li d,j => lui d,hi16(j)
- // ori d,d,hi16(lo32(j))
- // dsll d,d,16
- // ori d,d,lo16(lo32(j))
- uint16_t Bits47To32 = (ImmValue >> 32) & 0xffff;
- uint16_t Bits31To16 = (ImmValue >> 16) & 0xffff;
- uint16_t Bits15To0 = ImmValue & 0xffff;
+ if (isShiftedUIntAtAnyPosition<16>(ImmValue)) {
+ if (Is32BitImm) {
+ Error(IDLoc, "instruction requires a 32-bit immediate");
+ return true;
+ }
- emitRI(Mips::LUi, TmpReg, Bits47To32, IDLoc, Instructions);
- createLShiftOri<0>(Bits31To16, TmpReg, IDLoc, Instructions);
- createLShiftOri<16>(Bits15To0, TmpReg, IDLoc, Instructions);
+ // Traditionally, these immediates are shifted as little as possible and as
+ // such we align the most significant bit to bit 15 of our temporary.
+ unsigned FirstSet = findFirstSet((uint64_t)ImmValue);
+ unsigned LastSet = findLastSet((uint64_t)ImmValue);
+ unsigned ShiftAmount = FirstSet - (15 - (LastSet - FirstSet));
+ uint16_t Bits = (ImmValue >> ShiftAmount) & 0xffff;
+ emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits, IDLoc, Instructions);
+ emitRRI(Mips::DSLL, TmpReg, TmpReg, ShiftAmount, IDLoc, Instructions);
if (UseSrcReg)
- createAddu(DstReg, TmpReg, SrcReg, !Is32BitImm, Instructions);
+ emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
- } else {
- warnIfNoMacro(IDLoc);
+ return false;
+ }
- // <------- hi32 ------> <------- lo32 ------>
- // <- hi16 -> <- lo16 ->
- // ___________________________________________
- // | | | | |
- // | 16-bits | 16-bits | 16-bits | 16-bits |
- // |__________|__________|__________|__________|
- //
- // For all other values which are representable as a 64-bit integer:
- // li d,j => lui d,hi16(j)
- // ori d,d,lo16(hi32(j))
- // dsll d,d,16
- // ori d,d,hi16(lo32(j))
- // dsll d,d,16
- // ori d,d,lo16(lo32(j))
- uint16_t Bits63To48 = (ImmValue >> 48) & 0xffff;
- uint16_t Bits47To32 = (ImmValue >> 32) & 0xffff;
- uint16_t Bits31To16 = (ImmValue >> 16) & 0xffff;
- uint16_t Bits15To0 = ImmValue & 0xffff;
+ warnIfNoMacro(IDLoc);
- emitRI(Mips::LUi, TmpReg, Bits63To48, IDLoc, Instructions);
- createLShiftOri<0>(Bits47To32, TmpReg, IDLoc, Instructions);
+ // The remaining case is packed with a sequence of dsll and ori with zeros
+ // being omitted and any neighbouring dsll's being coalesced.
+ // The highest 32-bit's are equivalent to a 32-bit immediate load.
- // When Bits31To16 is 0, do a left shift of 32 bits instead of doing
- // two left shifts of 16 bits.
- if (Bits31To16 == 0) {
- createLShiftOri<32>(Bits15To0, TmpReg, IDLoc, Instructions);
- } else {
- createLShiftOri<16>(Bits31To16, TmpReg, IDLoc, Instructions);
- createLShiftOri<16>(Bits15To0, TmpReg, IDLoc, Instructions);
+ // Load bits 32-63 of ImmValue into bits 0-31 of the temporary register.
+ if (loadImmediate(ImmValue >> 32, TmpReg, Mips::NoRegister, true, false,
+ IDLoc, Instructions))
+ return false;
+
+ // Shift and accumulate into the register. If a 16-bit chunk is zero, then
+ // skip it and defer the shift to the next chunk.
+ unsigned ShiftCarriedForwards = 16;
+ for (int BitNum = 16; BitNum >= 0; BitNum -= 16) {
+ uint16_t ImmChunk = (ImmValue >> BitNum) & 0xffff;
+
+ if (ImmChunk != 0) {
+ emitAppropriateDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc,
+ Instructions);
+ emitRRI(Mips::ORi, TmpReg, TmpReg, ImmChunk, IDLoc, Instructions);
+ ShiftCarriedForwards = 0;
}
- if (UseSrcReg)
- createAddu(DstReg, TmpReg, SrcReg, !Is32BitImm, Instructions);
+ ShiftCarriedForwards += 16;
}
+ ShiftCarriedForwards -= 16;
+
+ // Finish any remaining shifts left by trailing zeros.
+ if (ShiftCarriedForwards)
+ emitAppropriateDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc,
+ Instructions);
+
+ if (UseSrcReg)
+ emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+
return false;
}
assert(DstRegOp.isReg() && "expected register operand kind");
if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), Mips::NoRegister,
- Is32BitImm, IDLoc, Instructions))
+ Is32BitImm, false, IDLoc, Instructions))
return true;
return false;
}
-bool
-MipsAsmParser::expandLoadAddressReg(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- const MCOperand &DstRegOp = Inst.getOperand(0);
- assert(DstRegOp.isReg() && "expected register operand kind");
-
- const MCOperand &SrcRegOp = Inst.getOperand(1);
- assert(SrcRegOp.isReg() && "expected register operand kind");
-
- const MCOperand &ImmOp = Inst.getOperand(2);
- assert((ImmOp.isImm() || ImmOp.isExpr()) &&
- "expected immediate operand kind");
- if (!ImmOp.isImm()) {
- if (loadAndAddSymbolAddress(ImmOp.getExpr(), DstRegOp.getReg(),
- SrcRegOp.getReg(), Is32BitImm, IDLoc,
- Instructions))
- return true;
-
- return false;
- }
-
- if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), SrcRegOp.getReg(),
- Is32BitImm, IDLoc, Instructions))
+bool MipsAsmParser::expandLoadAddress(unsigned DstReg, unsigned BaseReg,
+ const MCOperand &Offset,
+ bool Is32BitAddress, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
+ // la can't produce a usable address when addresses are 64-bit.
+ if (Is32BitAddress && ABI.ArePtrs64bit()) {
+ // FIXME: Demote this to a warning and continue as if we had 'dla' instead.
+ // We currently can't do this because we depend on the equality
+ // operator and N64 can end up with a GPR32/GPR64 mismatch.
+ Error(IDLoc, "la used to load 64-bit address");
+ // Continue as if we had 'dla' instead.
+ Is32BitAddress = false;
+ }
+
+ // dla requires 64-bit addresses.
+ if (!Is32BitAddress && !ABI.ArePtrs64bit()) {
+ Error(IDLoc, "instruction requires a 64-bit architecture");
return true;
-
- return false;
-}
-
-bool
-MipsAsmParser::expandLoadAddressImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- const MCOperand &DstRegOp = Inst.getOperand(0);
- assert(DstRegOp.isReg() && "expected register operand kind");
-
- const MCOperand &ImmOp = Inst.getOperand(1);
- assert((ImmOp.isImm() || ImmOp.isExpr()) &&
- "expected immediate operand kind");
- if (!ImmOp.isImm()) {
- if (loadAndAddSymbolAddress(ImmOp.getExpr(), DstRegOp.getReg(),
- Mips::NoRegister, Is32BitImm, IDLoc,
- Instructions))
- return true;
-
- return false;
}
- if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), Mips::NoRegister,
- Is32BitImm, IDLoc, Instructions))
- return true;
+ if (!Offset.isImm())
+ return loadAndAddSymbolAddress(Offset.getExpr(), DstReg, BaseReg,
+ Is32BitAddress, IDLoc, Instructions);
- return false;
+ return loadImmediate(Offset.getImm(), DstReg, BaseReg, Is32BitAddress, true,
+ IDLoc, Instructions);
}
bool MipsAsmParser::loadAndAddSymbolAddress(
SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
warnIfNoMacro(IDLoc);
- if (Is32BitSym && isABI_N64())
- Warning(IDLoc, "instruction loads the 32-bit address of a 64-bit symbol");
-
- MCInst tmpInst;
+ // FIXME: The way we're handling symbols right now prevents simple expressions
+ // like foo+8. We'll be able to fix this once our unary operators (%hi
+ // and similar) are treated as operators rather than as fixup types.
const MCSymbolRefExpr *Symbol = cast<MCSymbolRefExpr>(SymExpr);
const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::create(
&Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_ABS_HI, getContext());
bool UseSrcReg = SrcReg != Mips::NoRegister;
- unsigned TmpReg = DstReg;
- if (UseSrcReg && (DstReg == SrcReg)) {
- // At this point we need AT to perform the expansions and we exit if it is
- // not available.
+ // This is the 64-bit symbol address expansion.
+ if (ABI.ArePtrs64bit() && isGP64bit()) {
+ // We always need AT for the 64-bit expansion.
+ // If it is not available we exit.
unsigned ATReg = getATReg(IDLoc);
if (!ATReg)
return true;
- TmpReg = ATReg;
- }
- if (!Is32BitSym) {
- // If it's a 64-bit architecture, expand to:
- // la d,sym => lui d,highest(sym)
- // ori d,d,higher(sym)
- // dsll d,d,16
- // ori d,d,hi16(sym)
- // dsll d,d,16
- // ori d,d,lo16(sym)
const MCSymbolRefExpr *HighestExpr = MCSymbolRefExpr::create(
&Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_HIGHEST, getContext());
const MCSymbolRefExpr *HigherExpr = MCSymbolRefExpr::create(
&Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_HIGHER, getContext());
- tmpInst.setOpcode(Mips::LUi);
- tmpInst.addOperand(MCOperand::createReg(TmpReg));
- tmpInst.addOperand(MCOperand::createExpr(HighestExpr));
- Instructions.push_back(tmpInst);
+ if (UseSrcReg && (DstReg == SrcReg)) {
+ // If $rs is the same as $rd:
+ // (d)la $rd, sym($rd) => lui $at, %highest(sym)
+ // daddiu $at, $at, %higher(sym)
+ // dsll $at, $at, 16
+ // daddiu $at, $at, %hi(sym)
+ // dsll $at, $at, 16
+ // daddiu $at, $at, %lo(sym)
+ // daddu $rd, $at, $rd
+ emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HighestExpr), IDLoc,
+ Instructions);
+ emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HigherExpr),
+ IDLoc, Instructions);
+ emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, Instructions);
+ emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HiExpr), IDLoc,
+ Instructions);
+ emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, Instructions);
+ emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr), IDLoc,
+ Instructions);
+ emitRRR(Mips::DADDu, DstReg, ATReg, SrcReg, IDLoc, Instructions);
- createLShiftOri<0>(MCOperand::createExpr(HigherExpr), TmpReg, SMLoc(),
- Instructions);
- createLShiftOri<16>(MCOperand::createExpr(HiExpr), TmpReg, SMLoc(),
- Instructions);
- createLShiftOri<16>(MCOperand::createExpr(LoExpr), TmpReg, SMLoc(),
- Instructions);
- } else {
- // Otherwise, expand to:
- // la d,sym => lui d,hi16(sym)
- // ori d,d,lo16(sym)
- tmpInst.setOpcode(Mips::LUi);
- tmpInst.addOperand(MCOperand::createReg(TmpReg));
- tmpInst.addOperand(MCOperand::createExpr(HiExpr));
- Instructions.push_back(tmpInst);
-
- emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr), SMLoc(),
+ return false;
+ }
+
+ // Otherwise, if the $rs is different from $rd or if $rs isn't specified:
+ // (d)la $rd, sym/sym($rs) => lui $rd, %highest(sym)
+ // lui $at, %hi(sym)
+ // daddiu $rd, $rd, %higher(sym)
+ // daddiu $at, $at, %lo(sym)
+ // dsll32 $rd, $rd, 0
+ // daddu $rd, $rd, $at
+ // (daddu $rd, $rd, $rs)
+ emitRX(Mips::LUi, DstReg, MCOperand::createExpr(HighestExpr), IDLoc,
+ Instructions);
+ emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HiExpr), IDLoc,
+ Instructions);
+ emitRRX(Mips::DADDiu, DstReg, DstReg, MCOperand::createExpr(HigherExpr),
+ IDLoc, Instructions);
+ emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr), IDLoc,
Instructions);
+ emitRRI(Mips::DSLL32, DstReg, DstReg, 0, IDLoc, Instructions);
+ emitRRR(Mips::DADDu, DstReg, DstReg, ATReg, IDLoc, Instructions);
+ if (UseSrcReg)
+ emitRRR(Mips::DADDu, DstReg, DstReg, SrcReg, IDLoc, Instructions);
+
+ return false;
+ }
+
+ // And now, the 32-bit symbol address expansion:
+ // If $rs is the same as $rd:
+ // (d)la $rd, sym($rd) => lui $at, %hi(sym)
+ // ori $at, $at, %lo(sym)
+ // addu $rd, $at, $rd
+ // Otherwise, if the $rs is different from $rd or if $rs isn't specified:
+ // (d)la $rd, sym/sym($rs) => lui $rd, %hi(sym)
+ // ori $rd, $rd, %lo(sym)
+ // (addu $rd, $rd, $rs)
+ unsigned TmpReg = DstReg;
+ if (UseSrcReg && (DstReg == SrcReg)) {
+ // If $rs is the same as $rd, we need to use AT.
+ // If it is not available we exit.
+ unsigned ATReg = getATReg(IDLoc);
+ if (!ATReg)
+ return true;
+ TmpReg = ATReg;
}
+ emitRX(Mips::LUi, TmpReg, MCOperand::createExpr(HiExpr), IDLoc, Instructions);
+ emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr), IDLoc,
+ Instructions);
+
if (UseSrcReg)
- createAddu(DstReg, TmpReg, SrcReg, !Is32BitSym, Instructions);
+ emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ else
+ assert(DstReg == TmpReg);
return false;
}
if (!ATReg)
return true;
- if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, !isGP64bit(), IDLoc,
- Instructions))
+ if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, !isGP64bit(), true,
+ IDLoc, Instructions))
return true;
MCInst BranchInst;
LoadedOffsetInAT = true;
if (loadImmediate(OffsetValue, ATReg, Mips::NoRegister, !ABI.ArePtrs64bit(),
- IDLoc, Instructions))
+ true, IDLoc, Instructions))
return true;
// NOTE: We do this (D)ADDu here instead of doing it in loadImmediate()
warnIfNoMacro(IDLoc);
if (loadImmediate(OffsetValue, ATReg, Mips::NoRegister, !ABI.ArePtrs64bit(),
- IDLoc, Instructions))
+ true, IDLoc, Instructions))
return true;
// NOTE: We do this (D)ADDu here instead of doing it in loadImmediate()
const AsmToken &Tok = Parser.getTok(); // Get the next token.
if (Tok.isNot(AsmToken::LParen)) {
MipsOperand &Mnemonic = static_cast<MipsOperand &>(*Operands[0]);
- if (Mnemonic.getToken() == "la") {
+ if (Mnemonic.getToken() == "la" || Mnemonic.getToken() == "dla") {
SMLoc E =
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(IdVal, S, E, *this));