#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Target/MRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
namespace {
Statistic<>
NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
-}
-/// BMI - A special BuildMI variant that takes an iterator to insert the
-/// instruction at as well as a basic block. This is the version for when you
-/// have a destination register in mind.
-inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- int Opcode, unsigned NumOperands,
- unsigned DestReg) {
- MachineInstr *MI = new MachineInstr(Opcode, NumOperands+1, true, true);
- MBB->insert(I, MI);
- return MachineInstrBuilder(MI).addReg(DestReg, MachineOperand::Def);
+ /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
+ /// Representation.
+ ///
+ enum TypeClass {
+ cByte, cShort, cInt, cFP, cLong
+ };
}
-/// BMI - A special BuildMI variant that takes an iterator to insert the
-/// instruction at as well as a basic block.
-inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- int Opcode, unsigned NumOperands) {
- MachineInstr *MI = new MachineInstr(Opcode, NumOperands, true, true);
- MBB->insert(I, MI);
- return MachineInstrBuilder(MI);
+/// getClass - Turn a primitive type into a "class" number which is based on the
+/// size of the type, and whether or not it is floating point.
+///
+static inline TypeClass getClass(const Type *Ty) {
+ switch (Ty->getPrimitiveID()) {
+ case Type::SByteTyID:
+ case Type::UByteTyID: return cByte; // Byte operands are class #0
+ case Type::ShortTyID:
+ case Type::UShortTyID: return cShort; // Short operands are class #1
+ case Type::IntTyID:
+ case Type::UIntTyID:
+ case Type::PointerTyID: return cInt; // Int's and pointers are class #2
+
+ case Type::FloatTyID:
+ case Type::DoubleTyID: return cFP; // Floating Point is #3
+
+ case Type::LongTyID:
+ case Type::ULongTyID: return cLong; // Longs are class #4
+ default:
+ assert(0 && "Invalid type to getClass!");
+ return cByte; // not reached
+ }
}
+// getClassB - Just like getClass, but treat boolean values as bytes.
+static inline TypeClass getClassB(const Type *Ty) {
+ if (Ty == Type::BoolTy) return cByte;
+ return getClass(Ty);
+}
namespace {
struct ISel : public FunctionPass, InstVisitor<ISel> {
/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
/// function, lowering any calls to unknown intrinsic functions into the
/// equivalent LLVM code.
+ ///
void LowerUnknownIntrinsicFunctionCalls(Function &F);
/// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
- void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
- unsigned DestReg, const Type *DestTy,
- unsigned Op0Reg, unsigned Op1Reg);
- void doMultiplyConst(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, const Type *DestTy,
- unsigned Op0Reg, unsigned Op1Val);
void visitMul(BinaryOperator &B);
void visitDiv(BinaryOperator &B) { visitDivRem(B); }
unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
MachineBasicBlock *MBB,
MachineBasicBlock::iterator MBBI);
+ void visitSelectInst(SelectInst &SI);
+
// Memory Instructions
void visitLoadInst(LoadInst &I);
///
void promote32(unsigned targetReg, const ValueRecord &VR);
+ /// getAddressingMode - Get the addressing mode to use to address the
+ /// specified value. The returned value should be used with addFullAddress.
+ void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
+ unsigned &IndexReg, unsigned &Disp);
+
+
+ /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
+ /// expressions.
+ void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
+ std::vector<Value*> &GEPOps,
+ std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
+ unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
+
+ /// isGEPFoldable - Return true if the specified GEP can be completely
+ /// folded into the addressing mode of a load/store or lea instruction.
+ bool isGEPFoldable(MachineBasicBlock *MBB,
+ Value *Src, User::op_iterator IdxBegin,
+ User::op_iterator IdxEnd, unsigned &BaseReg,
+ unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
+
/// emitGEPOperation - Common code shared between visitGetElementPtrInst and
/// constant expression GEP support.
///
/// emitCastOperation - Common code shared between visitCastInst and
/// constant expression cast support.
+ ///
void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
Value *Src, const Type *DestTy, unsigned TargetReg);
/// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
/// and constant expression support.
+ ///
void emitSimpleBinaryOperation(MachineBasicBlock *BB,
MachineBasicBlock::iterator IP,
Value *Op0, Value *Op1,
unsigned OperatorClass, unsigned TargetReg);
+ void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1, unsigned TargetReg);
+
+ void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, const Type *DestTy,
+ unsigned Op0Reg, unsigned Op1Reg);
+ void doMultiplyConst(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, const Type *DestTy,
+ unsigned Op0Reg, unsigned Op1Val);
+
void emitDivRemOperation(MachineBasicBlock *BB,
MachineBasicBlock::iterator IP,
- unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
- const Type *Ty, unsigned TargetReg);
+ Value *Op0, Value *Op1, bool isDiv,
+ unsigned TargetReg);
/// emitSetCCOperation - Common code shared between visitSetCondInst and
/// constant expression support.
+ ///
void emitSetCCOperation(MachineBasicBlock *BB,
MachineBasicBlock::iterator IP,
Value *Op0, Value *Op1, unsigned Opcode,
/// emitShiftOperation - Common code shared between visitShiftInst and
/// constant expression support.
+ ///
void emitShiftOperation(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
Value *Op, Value *ShiftAmount, bool isLeftShift,
const Type *ResultTy, unsigned DestReg);
+ /// emitSelectOperation - Common code shared between visitSelectInst and the
+ /// constant expression support.
+ void emitSelectOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Cond, Value *TrueVal, Value *FalseVal,
+ unsigned DestReg);
/// copyConstantToRegister - Output the instructions required to put the
/// specified constant into the specified register.
}
unsigned getReg(Value *V, MachineBasicBlock *MBB,
MachineBasicBlock::iterator IPt) {
- unsigned &Reg = RegMap[V];
- if (Reg == 0) {
- Reg = makeAnotherReg(V->getType());
- RegMap[V] = Reg;
- }
-
// If this operand is a constant, emit the code to copy the constant into
// the register here...
//
if (Constant *C = dyn_cast<Constant>(V)) {
+ unsigned Reg = makeAnotherReg(V->getType());
copyConstantToRegister(MBB, IPt, C, Reg);
- RegMap.erase(V); // Assign a new name to this constant if ref'd again
+ return Reg;
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ unsigned Reg = makeAnotherReg(V->getType());
// Move the address of the global into the register
- BMI(MBB, IPt, X86::MOVri32, 1, Reg).addGlobalAddress(GV);
- RegMap.erase(V); // Assign a new name to this address if ref'd again
+ BuildMI(*MBB, IPt, X86::MOV32ri, 1, Reg).addGlobalAddress(GV);
+ return Reg;
+ } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
+ // Do not emit noop casts at all.
+ if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()))
+ return getReg(CI->getOperand(0), MBB, IPt);
+ }
+
+ unsigned &Reg = RegMap[V];
+ if (Reg == 0) {
+ Reg = makeAnotherReg(V->getType());
+ RegMap[V] = Reg;
}
return Reg;
};
}
-/// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
-/// Representation.
-///
-enum TypeClass {
- cByte, cShort, cInt, cFP, cLong
-};
-
-/// getClass - Turn a primitive type into a "class" number which is based on the
-/// size of the type, and whether or not it is floating point.
-///
-static inline TypeClass getClass(const Type *Ty) {
- switch (Ty->getPrimitiveID()) {
- case Type::SByteTyID:
- case Type::UByteTyID: return cByte; // Byte operands are class #0
- case Type::ShortTyID:
- case Type::UShortTyID: return cShort; // Short operands are class #1
- case Type::IntTyID:
- case Type::UIntTyID:
- case Type::PointerTyID: return cInt; // Int's and pointers are class #2
-
- case Type::FloatTyID:
- case Type::DoubleTyID: return cFP; // Floating Point is #3
-
- case Type::LongTyID:
- case Type::ULongTyID: return cLong; // Longs are class #4
- default:
- assert(0 && "Invalid type to getClass!");
- return cByte; // not reached
- }
-}
-
-// getClassB - Just like getClass, but treat boolean values as bytes.
-static inline TypeClass getClassB(const Type *Ty) {
- if (Ty == Type::BoolTy) return cByte;
- return getClass(Ty);
-}
-
-
/// copyConstantToRegister - Output the instructions required to put the
/// specified constant into the specified register.
///
Class, R);
return;
- case Instruction::Mul: {
- unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
- unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
- doMultiply(MBB, IP, R, CE->getType(), Op0Reg, Op1Reg);
+ case Instruction::Mul:
+ emitMultiply(MBB, IP, CE->getOperand(0), CE->getOperand(1), R);
return;
- }
+
case Instruction::Div:
- case Instruction::Rem: {
- unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
- unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
- emitDivRemOperation(MBB, IP, Op0Reg, Op1Reg,
- CE->getOpcode() == Instruction::Div,
- CE->getType(), R);
+ case Instruction::Rem:
+ emitDivRemOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
+ CE->getOpcode() == Instruction::Div, R);
return;
- }
case Instruction::SetNE:
case Instruction::SetEQ:
CE->getOpcode() == Instruction::Shl, CE->getType(), R);
return;
+ case Instruction::Select:
+ emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
+ CE->getOperand(2), R);
+ return;
+
default:
std::cerr << "Offending expr: " << C << "\n";
assert(0 && "Constant expression not yet handled!\n");
if (Class == cLong) {
// Copy the value into the register pair.
uint64_t Val = cast<ConstantInt>(C)->getRawValue();
- BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(Val & 0xFFFFFFFF);
- BMI(MBB, IP, X86::MOVri32, 1, R+1).addZImm(Val >> 32);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
return;
}
assert(Class <= cInt && "Type not handled yet!");
static const unsigned IntegralOpcodeTab[] = {
- X86::MOVri8, X86::MOVri16, X86::MOVri32
+ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
};
if (C->getType() == Type::BoolTy) {
- BMI(MBB, IP, X86::MOVri8, 1, R).addZImm(C == ConstantBool::True);
+ BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
} else {
ConstantInt *CI = cast<ConstantInt>(C);
- BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue());
+ BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
}
} else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->isExactlyValue(+0.0))
- BMI(MBB, IP, X86::FLD0, 0, R);
+ BuildMI(*MBB, IP, X86::FLD0, 0, R);
else if (CFP->isExactlyValue(+1.0))
- BMI(MBB, IP, X86::FLD1, 0, R);
+ BuildMI(*MBB, IP, X86::FLD1, 0, R);
else {
// Otherwise we need to spill the constant to memory...
MachineConstantPool *CP = F->getConstantPool();
const Type *Ty = CFP->getType();
assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
- unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLDr32 : X86::FLDr64;
- addConstantPoolReference(BMI(MBB, IP, LoadOpcode, 4, R), CPI);
+ unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
+ addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
}
} else if (isa<ConstantPointerNull>(C)) {
// Copy zero (null pointer) to the register.
- BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(0);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
} else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
- BMI(MBB, IP, X86::MOVri32, 1, R).addGlobalAddress(CPR->getValue());
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(CPR->getValue());
} else {
std::cerr << "Offending constant: " << C << "\n";
assert(0 && "Type not handled yet!");
MachineFrameInfo *MFI = F->getFrameInfo();
for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
- unsigned Reg = getReg(*I);
-
+ bool ArgLive = !I->use_empty();
+ unsigned Reg = ArgLive ? getReg(*I) : 0;
int FI; // Frame object index
+
switch (getClassB(I->getType())) {
case cByte:
- FI = MFI->CreateFixedObject(1, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm8, 4, Reg), FI);
+ if (ArgLive) {
+ FI = MFI->CreateFixedObject(1, ArgOffset);
+ addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
+ }
break;
case cShort:
- FI = MFI->CreateFixedObject(2, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm16, 4, Reg), FI);
+ if (ArgLive) {
+ FI = MFI->CreateFixedObject(2, ArgOffset);
+ addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
+ }
break;
case cInt:
- FI = MFI->CreateFixedObject(4, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
+ if (ArgLive) {
+ FI = MFI->CreateFixedObject(4, ArgOffset);
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
+ }
break;
case cLong:
- FI = MFI->CreateFixedObject(8, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg+1), FI, 4);
+ if (ArgLive) {
+ FI = MFI->CreateFixedObject(8, ArgOffset);
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
+ }
ArgOffset += 4; // longs require 4 additional bytes
break;
case cFP:
- unsigned Opcode;
- if (I->getType() == Type::FloatTy) {
- Opcode = X86::FLDr32;
- FI = MFI->CreateFixedObject(4, ArgOffset);
- } else {
- Opcode = X86::FLDr64;
- FI = MFI->CreateFixedObject(8, ArgOffset);
- ArgOffset += 4; // doubles require 4 additional bytes
+ if (ArgLive) {
+ unsigned Opcode;
+ if (I->getType() == Type::FloatTy) {
+ Opcode = X86::FLD32m;
+ FI = MFI->CreateFixedObject(4, ArgOffset);
+ } else {
+ Opcode = X86::FLD64m;
+ FI = MFI->CreateFixedObject(8, ArgOffset);
+ }
+ addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
}
- addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
+ if (I->getType() == Type::DoubleTy)
+ ArgOffset += 4; // doubles require 4 additional bytes
break;
default:
assert(0 && "Unhandled argument type!");
const Function &LF = *F->getFunction(); // The LLVM function...
for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
const BasicBlock *BB = I;
- MachineBasicBlock *MBB = MBBMap[I];
+ MachineBasicBlock &MBB = *MBBMap[I];
// Loop over all of the PHI nodes in the LLVM basic block...
- MachineBasicBlock::iterator instr = MBB->begin();
+ MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
for (BasicBlock::const_iterator I = BB->begin();
PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
// Create a new machine instr PHI node, and insert it.
unsigned PHIReg = getReg(*PN);
- MachineInstr *PhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg);
- MBB->insert(instr, PhiMI);
+ MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint,
+ X86::PHI, PN->getNumOperands(), PHIReg);
MachineInstr *LongPhiMI = 0;
- if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy) {
- LongPhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg+1);
- MBB->insert(instr, LongPhiMI);
- }
+ if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy)
+ LongPhiMI = BuildMI(MBB, PHIInsertPoint,
+ X86::PHI, PN->getNumOperands(), PHIReg+1);
// PHIValues - Map of blocks to incoming virtual registers. We use this
// so that we only initialize one incoming value for a particular block,
// If this is a constant or GlobalValue, we may have to insert code
// into the basic block to compute it into a virtual register.
if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
- // Because we don't want to clobber any values which might be in
- // physical registers with the computation of this constant (which
- // might be arbitrarily complex if it is a constant expression),
- // just insert the computation at the top of the basic block.
- MachineBasicBlock::iterator PI = PredMBB->begin();
-
- // Skip over any PHI nodes though!
- while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
- ++PI;
-
- ValReg = getReg(Val, PredMBB, PI);
+ if (isa<ConstantExpr>(Val)) {
+ // Because we don't want to clobber any values which might be in
+ // physical registers with the computation of this constant (which
+ // might be arbitrarily complex if it is a constant expression),
+ // just insert the computation at the top of the basic block.
+ MachineBasicBlock::iterator PI = PredMBB->begin();
+
+ // Skip over any PHI nodes though!
+ while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
+ ++PI;
+
+ ValReg = getReg(Val, PredMBB, PI);
+ } else {
+ // Simple constants get emitted at the end of the basic block,
+ // before any terminator instructions. We "know" that the code to
+ // move a constant into a register will never clobber any flags.
+ ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
+ }
} else {
ValReg = getReg(Val);
}
LongPhiMI->addMachineBasicBlockOperand(PredMBB);
}
}
+
+ // Now that we emitted all of the incoming values for the PHI node, make
+ // sure to reposition the InsertPoint after the PHI that we just added.
+ // This is needed because we might have inserted a constant into this
+ // block, right after the PHI's which is before the old insert point!
+ PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI;
+ ++PHIInsertPoint;
}
}
}
//
void ISel::InsertFPRegKills() {
SSARegMap &RegMap = *F->getSSARegMap();
- const TargetInstrInfo &TII = TM.getInstrInfo();
for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
- bool UsesFPReg = false;
for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- if (I->getOperand(i).isRegister()) {
- unsigned Reg = I->getOperand(i).getReg();
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
+ MachineOperand& MO = I->getOperand(i);
+ if (MO.isRegister() && MO.getReg()) {
+ unsigned Reg = MO.getReg();
if (MRegisterInfo::isVirtualRegister(Reg))
- if (RegMap.getRegClass(Reg)->getSize() == 10) {
- UsesFPReg = true;
- break;
- }
+ if (RegMap.getRegClass(Reg)->getSize() == 10)
+ goto UsesFPReg;
}
- if (UsesFPReg) {
- // Okay, this block uses an FP register. If the block has successors (ie,
- // it's not an unwind/return), insert the FP_REG_KILL instruction.
- if (BB->getBasicBlock()->getTerminator()->getNumSuccessors() &&
- RequiresFPRegKill(BB->getBasicBlock())) {
- // Rewind past any terminator instructions that might exist.
- MachineBasicBlock::iterator I = BB->end();
- while (I != BB->begin() && TII.isTerminatorInstr((--I)->getOpcode()));
- ++I;
- BMI(BB, I, X86::FP_REG_KILL, 0);
- ++NumFPKill;
+ }
+ // If we haven't found an FP register use or def in this basic block, check
+ // to see if any of our successors has an FP PHI node, which will cause a
+ // copy to be inserted into this block.
+ for (succ_const_iterator SI = succ_begin(BB->getBasicBlock()),
+ E = succ_end(BB->getBasicBlock()); SI != E; ++SI) {
+ MachineBasicBlock *SBB = MBBMap[*SI];
+ for (MachineBasicBlock::iterator I = SBB->begin();
+ I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
+ if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
+ goto UsesFPReg;
}
}
+ continue;
+ UsesFPReg:
+ // Okay, this block uses an FP register. If the block has successors (ie,
+ // it's not an unwind/return), insert the FP_REG_KILL instruction.
+ if (BB->getBasicBlock()->getTerminator()->getNumSuccessors() &&
+ RequiresFPRegKill(BB->getBasicBlock())) {
+ BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
+ ++NumFPKill;
+ }
}
}
-// canFoldSetCCIntoBranch - Return the setcc instruction if we can fold it into
-// the conditional branch instruction which is the only user of the cc
-// instruction. This is the case if the conditional branch is the only user of
-// the setcc, and if the setcc is in the same basic block as the conditional
-// branch. We also don't handle long arguments below, so we reject them here as
-// well.
+// canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
+// it into the conditional branch or select instruction which is the only user
+// of the cc instruction. This is the case if the conditional branch is the
+// only user of the setcc, and if the setcc is in the same basic block as the
+// conditional branch. We also don't handle long arguments below, so we reject
+// them here as well.
//
-static SetCondInst *canFoldSetCCIntoBranch(Value *V) {
+static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
- if (SCI->hasOneUse() && isa<BranchInst>(SCI->use_back()) &&
- SCI->getParent() == cast<BranchInst>(SCI->use_back())->getParent()) {
- const Type *Ty = SCI->getOperand(0)->getType();
- if (Ty != Type::LongTy && Ty != Type::ULongTy)
+ if (SCI->hasOneUse()) {
+ Instruction *User = cast<Instruction>(SCI->use_back());
+ if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
+ SCI->getParent() == User->getParent() &&
+ (getClassB(SCI->getOperand(0)->getType()) != cLong ||
+ SCI->getOpcode() == Instruction::SetEQ ||
+ SCI->getOpcode() == Instruction::SetNE))
return SCI;
}
return 0;
unsigned Op0r = getReg(Op0, MBB, IP);
// Special case handling of: cmp R, i
- if (Class == cByte || Class == cShort || Class == cInt)
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- uint64_t Op1v = cast<ConstantInt>(CI)->getRawValue();
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
+ if (Class == cByte || Class == cShort || Class == cInt) {
+ unsigned Op1v = CI->getRawValue();
// Mask off any upper bits of the constant, if there are any...
Op1v &= (1ULL << (8 << Class)) - 1;
// !=. These should have been strength reduced already anyway.
if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
static const unsigned TESTTab[] = {
- X86::TESTrr8, X86::TESTrr16, X86::TESTrr32
+ X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
};
- BMI(MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
+ BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
if (OpNum == 2) return 6; // Map jl -> js
if (OpNum == 3) return 7; // Map jg -> jns
}
static const unsigned CMPTab[] = {
- X86::CMPri8, X86::CMPri16, X86::CMPri32
+ X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
};
- BMI(MBB, IP, CMPTab[Class], 2).addReg(Op0r).addZImm(Op1v);
+ BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
return OpNum;
+ } else {
+ assert(Class == cLong && "Unknown integer class!");
+ unsigned LowCst = CI->getRawValue();
+ unsigned HiCst = CI->getRawValue() >> 32;
+ if (OpNum < 2) { // seteq, setne
+ unsigned LoTmp = Op0r;
+ if (LowCst != 0) {
+ LoTmp = makeAnotherReg(Type::IntTy);
+ BuildMI(*MBB, IP, X86::XOR32ri, 2, LoTmp).addReg(Op0r).addImm(LowCst);
+ }
+ unsigned HiTmp = Op0r+1;
+ if (HiCst != 0) {
+ HiTmp = makeAnotherReg(Type::IntTy);
+ BuildMI(*MBB, IP, X86::XOR32ri, 2,HiTmp).addReg(Op0r+1).addImm(HiCst);
+ }
+ unsigned FinalTmp = makeAnotherReg(Type::IntTy);
+ BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
+ return OpNum;
+ } else {
+ // Emit a sequence of code which compares the high and low parts once
+ // each, then uses a conditional move to handle the overflow case. For
+ // example, a setlt for long would generate code like this:
+ //
+ // AL = lo(op1) < lo(op2) // Signedness depends on operands
+ // BL = hi(op1) < hi(op2) // Always unsigned comparison
+ // dest = hi(op1) == hi(op2) ? AL : BL;
+ //
+
+ // FIXME: This would be much better if we had hierarchical register
+ // classes! Until then, hardcode registers so that we can deal with
+ // their aliases (because we don't have conditional byte moves).
+ //
+ BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(LowCst);
+ BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
+ BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r+1).addImm(HiCst);
+ BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0,X86::BL);
+ BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
+ BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
+ BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
+ .addReg(X86::AX);
+ // NOTE: visitSetCondInst knows that the value is dumped into the BL
+ // register at this point for long values...
+ return OpNum;
+ }
}
+ }
// Special case handling of comparison against +/- 0.0
if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
- BMI(MBB, IP, X86::FTST, 1).addReg(Op0r);
- BMI(MBB, IP, X86::FNSTSWr8, 0);
- BMI(MBB, IP, X86::SAHF, 1);
+ BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
+ BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
+ BuildMI(*MBB, IP, X86::SAHF, 1);
return OpNum;
}
// compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
// 32-bit.
case cByte:
- BMI(MBB, IP, X86::CMPrr8, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
break;
case cShort:
- BMI(MBB, IP, X86::CMPrr16, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
break;
case cInt:
- BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
break;
case cFP:
- BMI(MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
- BMI(MBB, IP, X86::FNSTSWr8, 0);
- BMI(MBB, IP, X86::SAHF, 1);
+ BuildMI(*MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
+ BuildMI(*MBB, IP, X86::SAHF, 1);
break;
case cLong:
unsigned LoTmp = makeAnotherReg(Type::IntTy);
unsigned HiTmp = makeAnotherReg(Type::IntTy);
unsigned FinalTmp = makeAnotherReg(Type::IntTy);
- BMI(MBB, IP, X86::XORrr32, 2, LoTmp).addReg(Op0r).addReg(Op1r);
- BMI(MBB, IP, X86::XORrr32, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
- BMI(MBB, IP, X86::ORrr32, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
+ BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
+ BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
break; // Allow the sete or setne to be generated from flags set by OR
} else {
// Emit a sequence of code which compares the high and low parts once
// classes! Until then, hardcode registers so that we can deal with their
// aliases (because we don't have conditional byte moves).
//
- BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
- BMI(MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
- BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r+1).addReg(Op1r+1);
- BMI(MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
- BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
- BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
- BMI(MBB, IP, X86::CMOVErr16, 2, X86::BX).addReg(X86::BX).addReg(X86::AX);
+ BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
+ BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
+ BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
+ BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
+ BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
+ BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
+ .addReg(X86::AX);
// NOTE: visitSetCondInst knows that the value is dumped into the BL
// register at this point for long values...
return OpNum;
return OpNum;
}
-
/// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
/// register, then move it to wherever the result should be.
///
void ISel::visitSetCondInst(SetCondInst &I) {
- if (canFoldSetCCIntoBranch(&I)) return; // Fold this into a branch...
+ if (canFoldSetCCIntoBranchOrSelect(&I))
+ return; // Fold this into a branch or select.
unsigned DestReg = getReg(I);
MachineBasicBlock::iterator MII = BB->end();
/// emitSetCCOperation - Common code shared between visitSetCondInst and
/// constant expression support.
+///
void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
Value *Op0, Value *Op1, unsigned Opcode,
if (CompClass != cLong || OpNum < 2) {
// Handle normal comparisons with a setcc instruction...
- BMI(MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
+ BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
} else {
// Handle long comparisons by copying the value which is already in BL into
// the register we want...
- BMI(MBB, IP, X86::MOVrr8, 1, TargetReg).addReg(X86::BL);
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
}
}
+void ISel::visitSelectInst(SelectInst &SI) {
+ unsigned DestReg = getReg(SI);
+ MachineBasicBlock::iterator MII = BB->end();
+ emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
+ SI.getFalseValue(), DestReg);
+}
+
+/// emitSelect - Common code shared between visitSelectInst and the constant
+/// expression support.
+void ISel::emitSelectOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Cond, Value *TrueVal, Value *FalseVal,
+ unsigned DestReg) {
+ unsigned SelectClass = getClassB(TrueVal->getType());
+
+ // We don't support 8-bit conditional moves. If we have incoming constants,
+ // transform them into 16-bit constants to avoid having a run-time conversion.
+ if (SelectClass == cByte) {
+ if (Constant *T = dyn_cast<Constant>(TrueVal))
+ TrueVal = ConstantExpr::getCast(T, Type::ShortTy);
+ if (Constant *F = dyn_cast<Constant>(FalseVal))
+ FalseVal = ConstantExpr::getCast(F, Type::ShortTy);
+ }
+
+
+ unsigned Opcode;
+ if (SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(Cond)) {
+ // We successfully folded the setcc into the select instruction.
+
+ unsigned OpNum = getSetCCNumber(SCI->getOpcode());
+ OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), MBB,
+ IP);
+
+ const Type *CompTy = SCI->getOperand(0)->getType();
+ bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
+
+ // LLVM -> X86 signed X86 unsigned
+ // ----- ---------- ------------
+ // seteq -> cmovNE cmovNE
+ // setne -> cmovE cmovE
+ // setlt -> cmovGE cmovAE
+ // setge -> cmovL cmovB
+ // setgt -> cmovLE cmovBE
+ // setle -> cmovG cmovA
+ // ----
+ // cmovNS // Used by comparison with 0 optimization
+ // cmovS
+
+ switch (SelectClass) {
+ default: assert(0 && "Unknown value class!");
+ case cFP: {
+ // Annoyingly, we don't have a full set of floating point conditional
+ // moves. :(
+ static const unsigned OpcodeTab[2][8] = {
+ { X86::FCMOVNE, X86::FCMOVE, X86::FCMOVAE, X86::FCMOVB,
+ X86::FCMOVBE, X86::FCMOVA, 0, 0 },
+ { X86::FCMOVNE, X86::FCMOVE, 0, 0, 0, 0, 0, 0 },
+ };
+ Opcode = OpcodeTab[isSigned][OpNum];
+
+ // If opcode == 0, we hit a case that we don't support. Output a setcc
+ // and compare the result against zero.
+ if (Opcode == 0) {
+ unsigned CompClass = getClassB(CompTy);
+ unsigned CondReg;
+ if (CompClass != cLong || OpNum < 2) {
+ CondReg = makeAnotherReg(Type::BoolTy);
+ // Handle normal comparisons with a setcc instruction...
+ BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, CondReg);
+ } else {
+ // Long comparisons end up in the BL register.
+ CondReg = X86::BL;
+ }
+
+ BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
+ Opcode = X86::FCMOVE;
+ }
+ break;
+ }
+ case cByte:
+ case cShort: {
+ static const unsigned OpcodeTab[2][8] = {
+ { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVAE16rr, X86::CMOVB16rr,
+ X86::CMOVBE16rr, X86::CMOVA16rr, 0, 0 },
+ { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVGE16rr, X86::CMOVL16rr,
+ X86::CMOVLE16rr, X86::CMOVG16rr, X86::CMOVNS16rr, X86::CMOVS16rr },
+ };
+ Opcode = OpcodeTab[isSigned][OpNum];
+ break;
+ }
+ case cInt:
+ case cLong: {
+ static const unsigned OpcodeTab[2][8] = {
+ { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVAE32rr, X86::CMOVB32rr,
+ X86::CMOVBE32rr, X86::CMOVA32rr, 0, 0 },
+ { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVGE32rr, X86::CMOVL32rr,
+ X86::CMOVLE32rr, X86::CMOVG32rr, X86::CMOVNS32rr, X86::CMOVS32rr },
+ };
+ Opcode = OpcodeTab[isSigned][OpNum];
+ break;
+ }
+ }
+ } else {
+ // Get the value being branched on, and use it to set the condition codes.
+ unsigned CondReg = getReg(Cond, MBB, IP);
+ BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
+ switch (SelectClass) {
+ default: assert(0 && "Unknown value class!");
+ case cFP: Opcode = X86::FCMOVE; break;
+ case cByte:
+ case cShort: Opcode = X86::CMOVE16rr; break;
+ case cInt:
+ case cLong: Opcode = X86::CMOVE32rr; break;
+ }
+ }
+
+ unsigned TrueReg = getReg(TrueVal, MBB, IP);
+ unsigned FalseReg = getReg(FalseVal, MBB, IP);
+ unsigned RealDestReg = DestReg;
+
+
+ // Annoyingly enough, X86 doesn't HAVE 8-bit conditional moves. Because of
+ // this, we have to promote the incoming values to 16 bits, perform a 16-bit
+ // cmove, then truncate the result.
+ if (SelectClass == cByte) {
+ DestReg = makeAnotherReg(Type::ShortTy);
+ if (getClassB(TrueVal->getType()) == cByte) {
+ // Promote the true value, by storing it into AL, and reading from AX.
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::AL).addReg(TrueReg);
+ BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::AH).addImm(0);
+ TrueReg = makeAnotherReg(Type::ShortTy);
+ BuildMI(*MBB, IP, X86::MOV16rr, 1, TrueReg).addReg(X86::AX);
+ }
+ if (getClassB(FalseVal->getType()) == cByte) {
+ // Promote the true value, by storing it into CL, and reading from CX.
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(FalseReg);
+ BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::CH).addImm(0);
+ FalseReg = makeAnotherReg(Type::ShortTy);
+ BuildMI(*MBB, IP, X86::MOV16rr, 1, FalseReg).addReg(X86::CX);
+ }
+ }
+
+ BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(TrueReg).addReg(FalseReg);
+
+ switch (SelectClass) {
+ case cByte:
+ // We did the computation with 16-bit registers. Truncate back to our
+ // result by copying into AX then copying out AL.
+ BuildMI(*MBB, IP, X86::MOV16rr, 1, X86::AX).addReg(DestReg);
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, RealDestReg).addReg(X86::AL);
+ break;
+ case cLong:
+ // Move the upper half of the value as well.
+ BuildMI(*MBB, IP, Opcode, 2,DestReg+1).addReg(TrueReg+1).addReg(FalseReg+1);
+ break;
+ }
+}
/// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
/// operand, in the specified target register.
+///
void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
bool isUnsigned = VR.Ty->isUnsigned();
+ Value *Val = VR.Val;
+ const Type *Ty = VR.Ty;
+ if (Val) {
+ if (Constant *C = dyn_cast<Constant>(Val)) {
+ Val = ConstantExpr::getCast(C, Type::IntTy);
+ Ty = Type::IntTy;
+ }
+
+ // If this is a simple constant, just emit a MOVri directly to avoid the
+ // copy.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
+ int TheVal = CI->getRawValue() & 0xFFFFFFFF;
+ BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
+ return;
+ }
+ }
+
// Make sure we have the register number for this value...
- unsigned Reg = VR.Val ? getReg(VR.Val) : VR.Reg;
+ unsigned Reg = Val ? getReg(Val) : VR.Reg;
- switch (getClassB(VR.Ty)) {
+ switch (getClassB(Ty)) {
case cByte:
// Extend value into target register (8->32)
if (isUnsigned)
- BuildMI(BB, X86::MOVZXr32r8, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
else
- BuildMI(BB, X86::MOVSXr32r8, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
break;
case cShort:
// Extend value into target register (16->32)
if (isUnsigned)
- BuildMI(BB, X86::MOVZXr32r16, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
else
- BuildMI(BB, X86::MOVSXr32r16, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
break;
case cInt:
// Move value into target register (32->32)
- BuildMI(BB, X86::MOVrr32, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
break;
default:
assert(0 && "Unpromotable operand class in promote32");
}
Value *RetVal = I.getOperand(0);
- unsigned RetReg = getReg(RetVal);
switch (getClassB(RetVal->getType())) {
case cByte: // integral return values: extend or move into EAX and return
case cShort:
case cInt:
- promote32(X86::EAX, ValueRecord(RetReg, RetVal->getType()));
+ promote32(X86::EAX, ValueRecord(RetVal));
// Declare that EAX is live on exit
BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
break;
- case cFP: // Floats & Doubles: Return in ST(0)
+ case cFP: { // Floats & Doubles: Return in ST(0)
+ unsigned RetReg = getReg(RetVal);
BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
// Declare that top-of-stack is live on exit
BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
break;
- case cLong:
- BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(RetReg);
- BuildMI(BB, X86::MOVrr32, 1, X86::EDX).addReg(RetReg+1);
+ }
+ case cLong: {
+ unsigned RetReg = getReg(RetVal);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
// Declare that EAX & EDX are live on exit
BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
.addReg(X86::ESP);
break;
+ }
default:
visitInstruction(I);
}
}
// See if we can fold the setcc into the branch itself...
- SetCondInst *SCI = canFoldSetCCIntoBranch(BI.getCondition());
+ SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition());
if (SCI == 0) {
// Nope, cannot fold setcc into this branch. Emit a branch on a condition
// computed some other way...
unsigned condReg = getReg(BI.getCondition());
- BuildMI(BB, X86::CMPri8, 2).addReg(condReg).addZImm(0);
+ BuildMI(BB, X86::TEST8rr, 2).addReg(condReg).addReg(condReg);
if (BI.getSuccessor(1) == NextBB) {
if (BI.getSuccessor(0) != NextBB)
BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
}
// Adjust the stack pointer for the new arguments...
- BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(NumBytes);
+ BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(NumBytes);
// Arguments go on the stack in reverse order, as specified by the ABI.
unsigned ArgOffset = 0;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
- unsigned ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
+ unsigned ArgReg;
switch (getClassB(Args[i].Ty)) {
case cByte:
- case cShort: {
- // Promote arg to 32 bits wide into a temporary register...
- unsigned R = makeAnotherReg(Type::UIntTy);
- promote32(R, Args[i]);
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
- X86::ESP, ArgOffset).addReg(R);
+ case cShort:
+ if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
+ // Zero/Sign extend constant, then stuff into memory.
+ ConstantInt *Val = cast<ConstantInt>(Args[i].Val);
+ Val = cast<ConstantInt>(ConstantExpr::getCast(Val, Type::IntTy));
+ addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
+ .addImm(Val->getRawValue() & 0xFFFFFFFF);
+ } else {
+ // Promote arg to 32 bits wide into a temporary register...
+ ArgReg = makeAnotherReg(Type::UIntTy);
+ promote32(ArgReg, Args[i]);
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
+ X86::ESP, ArgOffset).addReg(ArgReg);
+ }
break;
- }
case cInt:
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
- X86::ESP, ArgOffset).addReg(ArgReg);
+ if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
+ unsigned Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
+ addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
+ X86::ESP, ArgOffset).addImm(Val);
+ } else {
+ ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
+ X86::ESP, ArgOffset).addReg(ArgReg);
+ }
break;
case cLong:
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
- X86::ESP, ArgOffset).addReg(ArgReg);
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
- X86::ESP, ArgOffset+4).addReg(ArgReg+1);
+ if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
+ uint64_t Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
+ addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
+ X86::ESP, ArgOffset).addImm(Val & ~0U);
+ addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
+ X86::ESP, ArgOffset+4).addImm(Val >> 32ULL);
+ } else {
+ ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
+ X86::ESP, ArgOffset).addReg(ArgReg);
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
+ X86::ESP, ArgOffset+4).addReg(ArgReg+1);
+ }
ArgOffset += 4; // 8 byte entry, not 4.
break;
case cFP:
+ ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
if (Args[i].Ty == Type::FloatTy) {
- addRegOffset(BuildMI(BB, X86::FSTr32, 5),
+ addRegOffset(BuildMI(BB, X86::FST32m, 5),
X86::ESP, ArgOffset).addReg(ArgReg);
} else {
assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
- addRegOffset(BuildMI(BB, X86::FSTr64, 5),
+ addRegOffset(BuildMI(BB, X86::FST64m, 5),
X86::ESP, ArgOffset).addReg(ArgReg);
ArgOffset += 4; // 8 byte entry, not 4.
}
ArgOffset += 4;
}
} else {
- BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(0);
+ BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(0);
}
BB->push_back(CallMI);
- BuildMI(BB, X86::ADJCALLSTACKUP, 1).addZImm(NumBytes);
+ BuildMI(BB, X86::ADJCALLSTACKUP, 1).addImm(NumBytes);
// If there is a return value, scavenge the result from the location the call
// leaves it in...
// Integral results are in %eax, or the appropriate portion
// thereof.
static const unsigned regRegMove[] = {
- X86::MOVrr8, X86::MOVrr16, X86::MOVrr32
+ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
};
static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
break;
case cLong: // Long values are left in EDX:EAX
- BuildMI(BB, X86::MOVrr32, 1, Ret.Reg).addReg(X86::EAX);
- BuildMI(BB, X86::MOVrr32, 1, Ret.Reg+1).addReg(X86::EDX);
+ BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
+ BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
break;
default: assert(0 && "Unknown class!");
}
TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
} else { // Emit an indirect call...
unsigned Reg = getReg(CI.getCalledValue());
- TheCall = BuildMI(X86::CALLr32, 1).addReg(Reg);
+ TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
}
std::vector<ValueRecord> Args;
/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
/// function, lowering any calls to unknown intrinsic functions into the
/// equivalent LLVM code.
+///
void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
if (Function *F = CI->getCalledFunction())
switch (F->getIntrinsicID()) {
case Intrinsic::not_intrinsic:
- case Intrinsic::va_start:
- case Intrinsic::va_copy:
- case Intrinsic::va_end:
+ case Intrinsic::vastart:
+ case Intrinsic::vacopy:
+ case Intrinsic::vaend:
case Intrinsic::returnaddress:
case Intrinsic::frameaddress:
case Intrinsic::memcpy:
case Intrinsic::memset:
+ case Intrinsic::readport:
+ case Intrinsic::writeport:
// We directly implement these intrinsics
break;
default:
void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
unsigned TmpReg1, TmpReg2;
switch (ID) {
- case Intrinsic::va_start:
+ case Intrinsic::vastart:
// Get the address of the first vararg value...
TmpReg1 = getReg(CI);
- addFrameReference(BuildMI(BB, X86::LEAr32, 5, TmpReg1), VarArgsFrameIndex);
+ addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
return;
- case Intrinsic::va_copy:
+ case Intrinsic::vacopy:
TmpReg1 = getReg(CI);
TmpReg2 = getReg(CI.getOperand(1));
- BuildMI(BB, X86::MOVrr32, 1, TmpReg1).addReg(TmpReg2);
+ BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
return;
- case Intrinsic::va_end: return; // Noop on X86
+ case Intrinsic::vaend: return; // Noop on X86
case Intrinsic::returnaddress:
case Intrinsic::frameaddress:
if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
if (ID == Intrinsic::returnaddress) {
// Just load the return address
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, TmpReg1),
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
ReturnAddressIndex);
} else {
- addFrameReference(BuildMI(BB, X86::LEAr32, 4, TmpReg1),
+ addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
ReturnAddressIndex, -4);
}
} else {
// Values other than zero are not implemented yet.
- BuildMI(BB, X86::MOVri32, 1, TmpReg1).addZImm(0);
+ BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
}
return;
}
// Turn the byte code into # iterations
- unsigned ByteReg;
unsigned CountReg;
unsigned Opcode;
switch (Align & 3) {
CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
} else {
CountReg = makeAnotherReg(Type::IntTy);
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
+ unsigned ByteReg = getReg(CI.getOperand(3));
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
}
Opcode = X86::REP_MOVSW;
break;
CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
} else {
CountReg = makeAnotherReg(Type::IntTy);
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
+ unsigned ByteReg = getReg(CI.getOperand(3));
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
}
Opcode = X86::REP_MOVSD;
break;
- case 1: // BYTE aligned
- case 3: // BYTE aligned
+ default: // BYTE aligned
CountReg = getReg(CI.getOperand(3));
Opcode = X86::REP_MOVSB;
break;
// destination in EDI, and the count in ECX.
TmpReg1 = getReg(CI.getOperand(1));
TmpReg2 = getReg(CI.getOperand(2));
- BuildMI(BB, X86::MOVrr32, 1, X86::ECX).addReg(CountReg);
- BuildMI(BB, X86::MOVrr32, 1, X86::EDI).addReg(TmpReg1);
- BuildMI(BB, X86::MOVrr32, 1, X86::ESI).addReg(TmpReg2);
+ BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
+ BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
BuildMI(BB, Opcode, 0);
return;
}
}
// Turn the byte code into # iterations
- unsigned ByteReg;
unsigned CountReg;
unsigned Opcode;
if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
} else {
CountReg = makeAnotherReg(Type::IntTy);
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
+ unsigned ByteReg = getReg(CI.getOperand(3));
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
}
- BuildMI(BB, X86::MOVri16, 1, X86::AX).addZImm((Val << 8) | Val);
+ BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
Opcode = X86::REP_STOSW;
break;
case 0: // DWORD aligned
CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
} else {
CountReg = makeAnotherReg(Type::IntTy);
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
+ unsigned ByteReg = getReg(CI.getOperand(3));
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
}
Val = (Val << 8) | Val;
- BuildMI(BB, X86::MOVri32, 1, X86::EAX).addZImm((Val << 16) | Val);
+ BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
Opcode = X86::REP_STOSD;
break;
- case 1: // BYTE aligned
- case 3: // BYTE aligned
+ default: // BYTE aligned
CountReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::MOVri8, 1, X86::AL).addZImm(Val);
+ BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
Opcode = X86::REP_STOSB;
break;
}
// If it's not a constant value we are storing, just fall back. We could
// try to be clever to form 16 bit and 32 bit values, but we don't yet.
unsigned ValReg = getReg(CI.getOperand(2));
- BuildMI(BB, X86::MOVrr8, 1, X86::AL).addReg(ValReg);
+ BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
CountReg = getReg(CI.getOperand(3));
Opcode = X86::REP_STOSB;
}
// destination in EDI, and the count in ECX.
TmpReg1 = getReg(CI.getOperand(1));
//TmpReg2 = getReg(CI.getOperand(2));
- BuildMI(BB, X86::MOVrr32, 1, X86::ECX).addReg(CountReg);
- BuildMI(BB, X86::MOVrr32, 1, X86::EDI).addReg(TmpReg1);
+ BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
BuildMI(BB, Opcode, 0);
return;
}
+ case Intrinsic::readport:
+ //
+ // First, determine that the size of the operand falls within the
+ // acceptable range for this architecture.
+ //
+ if ((CI.getOperand(1)->getType()->getPrimitiveSize()) != 2) {
+ std::cerr << "llvm.readport: Address size is not 16 bits\n";
+ exit (1);
+ }
+
+ //
+ // Now, move the I/O port address into the DX register and use the IN
+ // instruction to get the input data.
+ //
+ BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(getReg(CI.getOperand(1)));
+ switch (CI.getCalledFunction()->getReturnType()->getPrimitiveSize()) {
+ case 1:
+ BuildMI(BB, X86::IN8, 0);
+ break;
+ case 2:
+ BuildMI(BB, X86::IN16, 0);
+ break;
+ case 4:
+ BuildMI(BB, X86::IN32, 0);
+ break;
+ default:
+ std::cerr << "Cannot do input on this data type";
+ exit (1);
+ }
+ return;
+
+ case Intrinsic::writeport:
+ //
+ // First, determine that the size of the operand falls within the
+ // acceptable range for this architecture.
+ //
+ //
+ if ((CI.getOperand(2)->getType()->getPrimitiveSize()) != 2) {
+ std::cerr << "llvm.writeport: Address size is not 16 bits\n";
+ exit (1);
+ }
+
+ //
+ // Now, move the I/O port address into the DX register and the value to
+ // write into the AL/AX/EAX register.
+ //
+ BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(getReg(CI.getOperand(2)));
+ switch (CI.getOperand(1)->getType()->getPrimitiveSize()) {
+ case 1:
+ BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(getReg(CI.getOperand(1)));
+ BuildMI(BB, X86::OUT8, 0);
+ break;
+ case 2:
+ BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(getReg(CI.getOperand(1)));
+ BuildMI(BB, X86::OUT16, 0);
+ break;
+ case 4:
+ BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(getReg(CI.getOperand(1)));
+ BuildMI(BB, X86::OUT32, 0);
+ break;
+ default:
+ std::cerr << "Cannot do output on this data type";
+ exit (1);
+ }
+ return;
+
default: assert(0 && "Error: unknown intrinsics should have been lowered!");
}
}
+static bool isSafeToFoldLoadIntoInstruction(LoadInst &LI, Instruction &User) {
+ if (LI.getParent() != User.getParent())
+ return false;
+ BasicBlock::iterator It = &LI;
+ // Check all of the instructions between the load and the user. We should
+ // really use alias analysis here, but for now we just do something simple.
+ for (++It; It != BasicBlock::iterator(&User); ++It) {
+ switch (It->getOpcode()) {
+ case Instruction::Free:
+ case Instruction::Store:
+ case Instruction::Call:
+ case Instruction::Invoke:
+ return false;
+ }
+ }
+ return true;
+}
+
/// visitSimpleBinary - Implement simple binary operators for integral types...
/// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
/// Xor.
+///
void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
unsigned DestReg = getReg(B);
MachineBasicBlock::iterator MI = BB->end();
- emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
- OperatorClass, DestReg);
-}
+ Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
-/// emitSimpleBinaryOperation - Implement simple binary operators for integral
-/// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
-/// Or, 4 for Xor.
-///
-/// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
-/// and constant expression support.
-///
-void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- Value *Op0, Value *Op1,
- unsigned OperatorClass, unsigned DestReg) {
- unsigned Class = getClassB(Op0->getType());
+ // Special case: op Reg, load [mem]
+ if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
+ if (!B.swapOperands())
+ std::swap(Op0, Op1); // Make sure any loads are in the RHS.
- // sub 0, X -> neg X
- if (OperatorClass == 1 && Class != cLong)
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0)) {
- if (CI->isNullValue()) {
- unsigned op1Reg = getReg(Op1, MBB, IP);
- switch (Class) {
- default: assert(0 && "Unknown class for this function!");
- case cByte:
- BMI(MBB, IP, X86::NEGr8, 1, DestReg).addReg(op1Reg);
- return;
- case cShort:
- BMI(MBB, IP, X86::NEGr16, 1, DestReg).addReg(op1Reg);
- return;
- case cInt:
- BMI(MBB, IP, X86::NEGr32, 1, DestReg).addReg(op1Reg);
- return;
- }
- }
- } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
- if (CFP->isExactlyValue(-0.0)) {
- // -0.0 - X === -X
- unsigned op1Reg = getReg(Op1, MBB, IP);
- BMI(MBB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
- return;
- }
+ unsigned Class = getClassB(B.getType());
+ if (isa<LoadInst>(Op1) && Class < cFP &&
+ isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op1), B)) {
- if (!isa<ConstantInt>(Op1) || Class == cLong) {
- static const unsigned OpcodeTab[][4] = {
+ static const unsigned OpcodeTab[][3] = {
// Arithmetic operators
- { X86::ADDrr8, X86::ADDrr16, X86::ADDrr32, X86::FpADD }, // ADD
- { X86::SUBrr8, X86::SUBrr16, X86::SUBrr32, X86::FpSUB }, // SUB
+ { X86::ADD8rm, X86::ADD16rm, X86::ADD32rm }, // ADD
+ { X86::SUB8rm, X86::SUB16rm, X86::SUB32rm }, // SUB
// Bitwise operators
- { X86::ANDrr8, X86::ANDrr16, X86::ANDrr32, 0 }, // AND
- { X86:: ORrr8, X86:: ORrr16, X86:: ORrr32, 0 }, // OR
- { X86::XORrr8, X86::XORrr16, X86::XORrr32, 0 }, // XOR
+ { X86::AND8rm, X86::AND16rm, X86::AND32rm }, // AND
+ { X86:: OR8rm, X86:: OR16rm, X86:: OR32rm }, // OR
+ { X86::XOR8rm, X86::XOR16rm, X86::XOR32rm }, // XOR
};
-
- bool isLong = false;
- if (Class == cLong) {
- isLong = true;
- Class = cInt; // Bottom 32 bits are handled just like ints
- }
-
+
+ assert(Class < cFP && "General code handles 64-bit integer types!");
unsigned Opcode = OpcodeTab[OperatorClass][Class];
- assert(Opcode && "Floating point arguments to logical inst?");
+
+ unsigned BaseReg, Scale, IndexReg, Disp;
+ getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), BaseReg,
+ Scale, IndexReg, Disp);
+
+ unsigned Op0r = getReg(Op0);
+ addFullAddress(BuildMI(BB, Opcode, 2, DestReg).addReg(Op0r),
+ BaseReg, Scale, IndexReg, Disp);
+ return;
+ }
+
+ emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
+}
+
+/// emitSimpleBinaryOperation - Implement simple binary operators for integral
+/// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
+/// Or, 4 for Xor.
+///
+/// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
+/// and constant expression support.
+///
+void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1,
+ unsigned OperatorClass, unsigned DestReg) {
+ unsigned Class = getClassB(Op0->getType());
+
+ // sub 0, X -> neg X
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
+ if (OperatorClass == 1 && CI->isNullValue()) {
+ unsigned op1Reg = getReg(Op1, MBB, IP);
+ static unsigned const NEGTab[] = {
+ X86::NEG8r, X86::NEG16r, X86::NEG32r, 0, X86::NEG32r
+ };
+ BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
+
+ if (Class == cLong) {
+ // We just emitted: Dl = neg Sl
+ // Now emit : T = addc Sh, 0
+ // : Dh = neg T
+ unsigned T = makeAnotherReg(Type::IntTy);
+ BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
+ BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
+ }
+ return;
+ }
+
+ // Special case: op Reg, <const int>
+ if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
unsigned Op0r = getReg(Op0, MBB, IP);
- unsigned Op1r = getReg(Op1, MBB, IP);
- BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
+
+ // xor X, -1 -> not X
+ if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
+ static unsigned const NOTTab[] = {
+ X86::NOT8r, X86::NOT16r, X86::NOT32r, 0, X86::NOT32r
+ };
+ BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
+ if (Class == cLong) // Invert the top part too
+ BuildMI(*MBB, IP, X86::NOT32r, 1, DestReg+1).addReg(Op0r+1);
+ return;
+ }
+
+ // add X, -1 -> dec X
+ if (OperatorClass == 0 && Op1C->isAllOnesValue() && Class != cLong) {
+ // Note that we can't use dec for 64-bit decrements, because it does not
+ // set the carry flag!
+ static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
+ BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
+ return;
+ }
+
+ // add X, 1 -> inc X
+ if (OperatorClass == 0 && Op1C->equalsInt(1) && Class != cLong) {
+ // Note that we can't use inc for 64-bit increments, because it does not
+ // set the carry flag!
+ static unsigned const INCTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
+ BuildMI(*MBB, IP, INCTab[Class], 1, DestReg).addReg(Op0r);
+ return;
+ }
+
+ static const unsigned OpcodeTab[][5] = {
+ // Arithmetic operators
+ { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri }, // ADD
+ { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, X86::SUB32ri }, // SUB
- if (isLong) { // Handle the upper 32 bits of long values...
+ // Bitwise operators
+ { X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, X86::AND32ri }, // AND
+ { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri, 0, X86::OR32ri }, // OR
+ { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, X86::XOR32ri }, // XOR
+ };
+
+ unsigned Opcode = OpcodeTab[OperatorClass][Class];
+ unsigned Op1l = cast<ConstantInt>(Op1C)->getRawValue();
+
+ if (Class != cLong) {
+ BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
+ return;
+ } else {
+ // If this is a long value and the high or low bits have a special
+ // property, emit some special cases.
+ unsigned Op1h = cast<ConstantInt>(Op1C)->getRawValue() >> 32LL;
+
+ // If the constant is zero in the low 32-bits, just copy the low part
+ // across and apply the normal 32-bit operation to the high parts. There
+ // will be no carry or borrow into the top.
+ if (Op1l == 0) {
+ if (OperatorClass != 2) // All but and...
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0r);
+ else
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
+ BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg+1)
+ .addReg(Op0r+1).addImm(Op1h);
+ return;
+ }
+
+ // If this is a logical operation and the top 32-bits are zero, just
+ // operate on the lower 32.
+ if (Op1h == 0 && OperatorClass > 1) {
+ BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg)
+ .addReg(Op0r).addImm(Op1l);
+ if (OperatorClass != 2) // All but and
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(Op0r+1);
+ else
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
+ return;
+ }
+
+ // TODO: We could handle lots of other special cases here, such as AND'ing
+ // with 0xFFFFFFFF00000000 -> noop, etc.
+
+ // Otherwise, code generate the full operation with a constant.
static const unsigned TopTab[] = {
- X86::ADCrr32, X86::SBBrr32, X86::ANDrr32, X86::ORrr32, X86::XORrr32
+ X86::ADC32ri, X86::SBB32ri, X86::AND32ri, X86::OR32ri, X86::XOR32ri
};
- BMI(MBB, IP, TopTab[OperatorClass], 2,
- DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
+
+ BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
+ BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1)
+ .addReg(Op0r+1).addImm(Op1h);
+ return;
}
- return;
}
- // Special case: op Reg, <const>
- ConstantInt *Op1C = cast<ConstantInt>(Op1);
- unsigned Op0r = getReg(Op0, MBB, IP);
-
- // xor X, -1 -> not X
- if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
- static unsigned const NOTTab[] = { X86::NOTr8, X86::NOTr16, X86::NOTr32 };
- BMI(MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
- return;
- }
+ // Special case: op Reg, <const fp>
+ if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1))
+ if (!Op1C->isExactlyValue(+0.0) && !Op1C->isExactlyValue(+1.0)) {
+ assert(OperatorClass < 2 && "FP operations only support add/sub!");
+
+ // Create a constant pool entry for this constant.
+ MachineConstantPool *CP = F->getConstantPool();
+ unsigned CPI = CP->getConstantPoolIndex(Op1C);
+ const Type *Ty = Op1->getType();
- // add X, -1 -> dec X
- if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
- static unsigned const DECTab[] = { X86::DECr8, X86::DECr16, X86::DECr32 };
- BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
- return;
- }
+ static const unsigned OpcodeTab[][2] = {
+ { X86::FADD32m, X86::FSUB32m }, // Float
+ { X86::FADD64m, X86::FSUB64m }, // Double
+ };
- // add X, 1 -> inc X
- if (OperatorClass == 0 && Op1C->equalsInt(1)) {
- static unsigned const DECTab[] = { X86::INCr8, X86::INCr16, X86::INCr32 };
- BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
- return;
- }
+ assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
+ unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
+ unsigned Op0r = getReg(Op0, MBB, IP);
+ addConstantPoolReference(BuildMI(*MBB, IP, Opcode, 5,
+ DestReg).addReg(Op0r), CPI);
+ return;
+ }
- static const unsigned OpcodeTab[][3] = {
+ // Special case: R1 = sub <const fp>, R2
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
+ if (OperatorClass == 1) { // sub only
+ if (CFP->isExactlyValue(-0.0)) {
+ // -0.0 - X === -X
+ unsigned op1Reg = getReg(Op1, MBB, IP);
+ BuildMI(*MBB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
+ return;
+ } else if (!CFP->isExactlyValue(+0.0) && !CFP->isExactlyValue(+1.0)) {
+ // R1 = sub CST, R2 --> R1 = subr R2, CST
+
+ // Create a constant pool entry for this constant.
+ MachineConstantPool *CP = F->getConstantPool();
+ unsigned CPI = CP->getConstantPoolIndex(CFP);
+ const Type *Ty = CFP->getType();
+
+ static const unsigned OpcodeTab[2] = { X86::FSUBR32m, X86::FSUBR64m };
+
+ assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
+ unsigned Opcode = OpcodeTab[Ty != Type::FloatTy];
+ unsigned Op1r = getReg(Op1, MBB, IP);
+ addConstantPoolReference(BuildMI(*MBB, IP, Opcode, 5,
+ DestReg).addReg(Op1r), CPI);
+ return;
+ }
+ }
+
+ // Finally, handle the general case now.
+ static const unsigned OpcodeTab[][5] = {
// Arithmetic operators
- { X86::ADDri8, X86::ADDri16, X86::ADDri32 }, // ADD
- { X86::SUBri8, X86::SUBri16, X86::SUBri32 }, // SUB
-
+ { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD, X86::ADD32rr },// ADD
+ { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB, X86::SUB32rr },// SUB
+
// Bitwise operators
- { X86::ANDri8, X86::ANDri16, X86::ANDri32 }, // AND
- { X86:: ORri8, X86:: ORri16, X86:: ORri32 }, // OR
- { X86::XORri8, X86::XORri16, X86::XORri32 }, // XOR
+ { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, X86::AND32rr }, // AND
+ { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0, X86:: OR32rr }, // OR
+ { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, X86::XOR32rr }, // XOR
};
-
- assert(Class < 3 && "General code handles 64-bit integer types!");
+
unsigned Opcode = OpcodeTab[OperatorClass][Class];
- uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
-
- // Mask off any upper bits of the constant, if there are any...
- Op1v &= (1ULL << (8 << Class)) - 1;
- BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addZImm(Op1v);
+ assert(Opcode && "Floating point arguments to logical inst?");
+ unsigned Op0r = getReg(Op0, MBB, IP);
+ unsigned Op1r = getReg(Op1, MBB, IP);
+ BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
+
+ if (Class == cLong) { // Handle the upper 32 bits of long values...
+ static const unsigned TopTab[] = {
+ X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
+ };
+ BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
+ DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
+ }
}
/// doMultiply - Emit appropriate instructions to multiply together the
unsigned Class = getClass(DestTy);
switch (Class) {
case cFP: // Floating point multiply
- BMI(BB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg);
+ BuildMI(*MBB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg);
return;
case cInt:
case cShort:
- BMI(BB, MBBI, Class == cInt ? X86::IMULrr32 : X86::IMULrr16, 2, DestReg)
+ BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
.addReg(op0Reg).addReg(op1Reg);
return;
case cByte:
// Must use the MUL instruction, which forces use of AL...
- BMI(MBB, MBBI, X86::MOVrr8, 1, X86::AL).addReg(op0Reg);
- BMI(MBB, MBBI, X86::MULr8, 1).addReg(op1Reg);
- BMI(MBB, MBBI, X86::MOVrr8, 1, DestReg).addReg(X86::AL);
+ BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
+ BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
+ BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
return;
default:
case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
return Count+1;
}
+
+/// doMultiplyConst - This function is specialized to efficiently codegen an 8,
+/// 16, or 32-bit integer multiply by a constant.
void ISel::doMultiplyConst(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
unsigned DestReg, const Type *DestTy,
unsigned op0Reg, unsigned ConstRHS) {
+ static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
+ static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
+
unsigned Class = getClass(DestTy);
+ if (ConstRHS == 0) {
+ BuildMI(*MBB, IP, MOVriTab[Class], 1, DestReg).addImm(0);
+ return;
+ } else if (ConstRHS == 1) {
+ BuildMI(*MBB, IP, MOVrrTab[Class], 1, DestReg).addReg(op0Reg);
+ return;
+ }
+
// If the element size is exactly a power of 2, use a shift to get it.
if (unsigned Shift = ExactLog2(ConstRHS)) {
switch (Class) {
default: assert(0 && "Unknown class for this function!");
case cByte:
- BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
case cShort:
- BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
case cInt:
- BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
}
}
if (Class == cShort) {
- BMI(MBB, IP, X86::IMULrri16, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
+ BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
return;
} else if (Class == cInt) {
- BMI(MBB, IP, X86::IMULrri32, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
+ BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
return;
}
// Most general case, emit a normal multiply...
- static const unsigned MOVriTab[] = {
- X86::MOVri8, X86::MOVri16, X86::MOVri32
- };
-
unsigned TmpReg = makeAnotherReg(DestTy);
- BMI(MBB, IP, MOVriTab[Class], 1, TmpReg).addZImm(ConstRHS);
+ BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
// Emit a MUL to multiply the register holding the index by
// elementSize, putting the result in OffsetReg.
/// with the EAX register explicitly.
///
void ISel::visitMul(BinaryOperator &I) {
- unsigned Op0Reg = getReg(I.getOperand(0));
- unsigned DestReg = getReg(I);
+ unsigned ResultReg = getReg(I);
+
+ MachineBasicBlock::iterator IP = BB->end();
+ emitMultiply(BB, IP, I.getOperand(0), I.getOperand(1), ResultReg);
+}
+
+void ISel::emitMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1, unsigned DestReg) {
+ MachineBasicBlock &BB = *MBB;
+ TypeClass Class = getClass(Op0->getType());
// Simple scalar multiply?
- if (I.getType() != Type::LongTy && I.getType() != Type::ULongTy) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1))) {
- unsigned Val = (unsigned)CI->getRawValue(); // Cannot be 64-bit constant
- MachineBasicBlock::iterator MBBI = BB->end();
- doMultiplyConst(BB, MBBI, DestReg, I.getType(), Op0Reg, Val);
+ switch (Class) {
+ case cByte:
+ case cShort:
+ case cInt:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
+ unsigned Op0Reg = getReg(Op0, &BB, IP);
+ unsigned Val = (unsigned)CI->getRawValue(); // Isn't a 64-bit constant
+ doMultiplyConst(&BB, IP, DestReg, Op0->getType(), Op0Reg, Val);
} else {
- unsigned Op1Reg = getReg(I.getOperand(1));
- MachineBasicBlock::iterator MBBI = BB->end();
- doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg);
+ unsigned Op0Reg = getReg(Op0, &BB, IP);
+ unsigned Op1Reg = getReg(Op1, &BB, IP);
+ doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg);
}
- } else {
- unsigned Op1Reg = getReg(I.getOperand(1));
-
- // Long value. We have to do things the hard way...
- // Multiply the two low parts... capturing carry into EDX
- BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(Op0Reg);
- BuildMI(BB, X86::MULr32, 1).addReg(Op1Reg); // AL*BL
+ return;
+ case cFP:
+ {
+ unsigned Op0Reg = getReg(Op0, &BB, IP);
+ unsigned Op1Reg = getReg(Op1, &BB, IP);
+ doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg);
+ return;
+ }
+ case cLong:
+ break;
+ }
- unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
- BuildMI(BB, X86::MOVrr32, 1, DestReg).addReg(X86::EAX); // AL*BL
- BuildMI(BB, X86::MOVrr32, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
+ unsigned Op0Reg = getReg(Op0, &BB, IP);
- MachineBasicBlock::iterator MBBI = BB->end();
+ // Long value. We have to do things the hard way...
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
+ unsigned CLow = CI->getRawValue();
+ unsigned CHi = CI->getRawValue() >> 32;
+
+ if (CLow == 0) {
+ // If the low part of the constant is all zeros, things are simple.
+ BuildMI(BB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
+ doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi);
+ return;
+ }
+
+ // Multiply the two low parts... capturing carry into EDX
+ unsigned OverflowReg = 0;
+ if (CLow == 1) {
+ BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
+ } else {
+ unsigned Op1RegL = makeAnotherReg(Type::UIntTy);
+ OverflowReg = makeAnotherReg(Type::UIntTy);
+ BuildMI(BB, IP, X86::MOV32ri, 1, Op1RegL).addImm(CLow);
+ BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
+ BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1RegL); // AL*BL
+
+ BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
+ BuildMI(BB, IP, X86::MOV32rr, 1,
+ OverflowReg).addReg(X86::EDX); // AL*BL >> 32
+ }
+
unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
- BMI(BB, MBBI, X86::IMULrr32, 2, AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
-
- unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
- BuildMI(BB, X86::ADDrr32, 2, // AH*BL+(AL*BL >> 32)
- AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
+ doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow);
- MBBI = BB->end();
- unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
- BMI(BB, MBBI, X86::IMULrr32, 2, ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
+ unsigned AHBLplusOverflowReg;
+ if (OverflowReg) {
+ AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
+ BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
+ AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
+ } else {
+ AHBLplusOverflowReg = AHBLReg;
+ }
- BuildMI(BB, X86::ADDrr32, 2, // AL*BH + AH*BL + (AL*BL >> 32)
- DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
+ if (CHi == 0) {
+ BuildMI(BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(AHBLplusOverflowReg);
+ } else {
+ unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
+ doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi);
+
+ BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
+ DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
+ }
+ return;
}
+
+ // General 64x64 multiply
+
+ unsigned Op1Reg = getReg(Op1, &BB, IP);
+ // Multiply the two low parts... capturing carry into EDX
+ BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
+ BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
+
+ unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
+ BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
+ BuildMI(BB, IP, X86::MOV32rr, 1,
+ OverflowReg).addReg(X86::EDX); // AL*BL >> 32
+
+ unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
+ BuildMI(BB, IP, X86::IMUL32rr, 2,
+ AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
+
+ unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
+ BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
+ AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
+
+ unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
+ BuildMI(BB, IP, X86::IMUL32rr, 2,
+ ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
+
+ BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
+ DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
}
/// instructions work differently for signed and unsigned operands.
///
void ISel::visitDivRem(BinaryOperator &I) {
- unsigned Op0Reg = getReg(I.getOperand(0));
- unsigned Op1Reg = getReg(I.getOperand(1));
unsigned ResultReg = getReg(I);
MachineBasicBlock::iterator IP = BB->end();
- emitDivRemOperation(BB, IP, Op0Reg, Op1Reg, I.getOpcode() == Instruction::Div,
- I.getType(), ResultReg);
+ emitDivRemOperation(BB, IP, I.getOperand(0), I.getOperand(1),
+ I.getOpcode() == Instruction::Div, ResultReg);
+
}
void ISel::emitDivRemOperation(MachineBasicBlock *BB,
MachineBasicBlock::iterator IP,
- unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
- const Type *Ty, unsigned ResultReg) {
- unsigned Class = getClass(Ty);
+ Value *Op0, Value *Op1, bool isDiv,
+ unsigned ResultReg) {
+ unsigned Class = getClass(Op0->getType());
switch (Class) {
case cFP: // Floating point divide
if (isDiv) {
- BMI(BB, IP, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ unsigned Op1Reg = getReg(Op1, BB, IP);
+ BuildMI(*BB, IP, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
} else { // Floating point remainder...
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ unsigned Op1Reg = getReg(Op1, BB, IP);
MachineInstr *TheCall =
BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
std::vector<ValueRecord> Args;
case cLong: {
static const char *FnName[] =
{ "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
-
- unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ unsigned Op1Reg = getReg(Op1, BB, IP);
+ unsigned NameIdx = Op0->getType()->isUnsigned()*2 + isDiv;
MachineInstr *TheCall =
BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
}
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
- static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
- static const unsigned SarOpcode[]={ X86::SARri8, X86::SARri16, X86::SARri32 };
- static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
+ static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
+ static const unsigned SarOpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
+ static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
static const unsigned DivOpcode[][4] = {
- { X86::DIVr8 , X86::DIVr16 , X86::DIVr32 , 0 }, // Unsigned division
- { X86::IDIVr8, X86::IDIVr16, X86::IDIVr32, 0 }, // Signed division
+ { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
+ { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
};
- bool isSigned = Ty->isSigned();
+ bool isSigned = Op0->getType()->isSigned();
unsigned Reg = Regs[Class];
unsigned ExtReg = ExtRegs[Class];
// Put the first operand into one of the A registers...
- BMI(BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ unsigned Op1Reg = getReg(Op1, BB, IP);
+ BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
if (isSigned) {
// Emit a sign extension instruction...
- unsigned ShiftResult = makeAnotherReg(Ty);
- BMI(BB, IP, SarOpcode[Class], 2, ShiftResult).addReg(Op0Reg).addZImm(31);
- BMI(BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
+ unsigned ShiftResult = makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, SarOpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
+ BuildMI(*BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
} else {
// If unsigned, emit a zeroing instruction... (reg = 0)
- BMI(BB, IP, ClrOpcode[Class], 2, ExtReg).addZImm(0);
+ BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
}
// Emit the appropriate divide or remainder instruction...
- BMI(BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
+ BuildMI(*BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
// Figure out which register we want to pick the result out of...
unsigned DestReg = isDiv ? Reg : ExtReg;
// Put the result into the destination register...
- BMI(BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
+ BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
}
unsigned Class = getClass (ResultTy);
static const unsigned ConstantOperand[][4] = {
- { X86::SHRri8, X86::SHRri16, X86::SHRri32, X86::SHRDri32 }, // SHR
- { X86::SARri8, X86::SARri16, X86::SARri32, X86::SHRDri32 }, // SAR
- { X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SHL
- { X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SAL = SHL
+ { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
+ { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
+ { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
+ { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
};
static const unsigned NonConstantOperand[][4] = {
- { X86::SHRrr8, X86::SHRrr16, X86::SHRrr32 }, // SHR
- { X86::SARrr8, X86::SARrr16, X86::SARrr32 }, // SAR
- { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SHL
- { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SAL = SHL
+ { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
+ { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
+ { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
+ { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
};
// Longs, as usual, are handled specially...
if (Amount < 32) {
const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
if (isLeftShift) {
- BMI(MBB, IP, Opc[3], 3,
- DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addZImm(Amount);
- BMI(MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addZImm(Amount);
+ BuildMI(*MBB, IP, Opc[3], 3,
+ DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addImm(Amount);
+ BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
} else {
- BMI(MBB, IP, Opc[3], 3,
- DestReg).addReg(SrcReg ).addReg(SrcReg+1).addZImm(Amount);
- BMI(MBB, IP, Opc[2], 2, DestReg+1).addReg(SrcReg+1).addZImm(Amount);
+ BuildMI(*MBB, IP, Opc[3], 3,
+ DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
+ BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
}
} else { // Shifting more than 32 bits
Amount -= 32;
if (isLeftShift) {
- BMI(MBB, IP, X86::SHLri32, 2,
- DestReg + 1).addReg(SrcReg).addZImm(Amount);
- BMI(MBB, IP, X86::MOVri32, 1,
- DestReg).addZImm(0);
+ if (Amount != 0) {
+ BuildMI(*MBB, IP, X86::SHL32ri, 2,
+ DestReg + 1).addReg(SrcReg).addImm(Amount);
+ } else {
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
+ }
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
} else {
- unsigned Opcode = isSigned ? X86::SARri32 : X86::SHRri32;
- BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
- BMI(MBB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
+ if (Amount != 0) {
+ BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
+ DestReg).addReg(SrcReg+1).addImm(Amount);
+ } else {
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
+ }
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
}
}
} else {
// If this is a SHR of a Long, then we need to do funny sign extension
// stuff. TmpReg gets the value to use as the high-part if we are
// shifting more than 32 bits.
- BMI(MBB, IP, X86::SARri32, 2, TmpReg).addReg(SrcReg).addZImm(31);
+ BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
- BMI(MBB, IP, X86::MOVri32, 1, TmpReg).addZImm(0);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
}
// Initialize CL with the shift amount...
unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
- BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
if (isLeftShift) {
// TmpReg2 = shld inHi, inLo
- BMI(MBB, IP, X86::SHLDrr32, 2, TmpReg2).addReg(SrcReg+1).addReg(SrcReg);
+ BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
+ .addReg(SrcReg);
// TmpReg3 = shl inLo, CL
- BMI(MBB, IP, X86::SHLrr32, 1, TmpReg3).addReg(SrcReg);
+ BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
// Set the flags to indicate whether the shift was by more than 32 bits.
- BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
+ BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
// DestHi = (>32) ? TmpReg3 : TmpReg2;
- BMI(MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
// DestLo = (>32) ? TmpReg : TmpReg3;
- BMI(MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg).addReg(TmpReg3).addReg(TmpReg);
} else {
// TmpReg2 = shrd inLo, inHi
- BMI(MBB, IP, X86::SHRDrr32, 2, TmpReg2).addReg(SrcReg).addReg(SrcReg+1);
+ BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
+ .addReg(SrcReg+1);
// TmpReg3 = s[ah]r inHi, CL
- BMI(MBB, IP, isSigned ? X86::SARrr32 : X86::SHRrr32, 1, TmpReg3)
+ BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
.addReg(SrcReg+1);
// Set the flags to indicate whether the shift was by more than 32 bits.
- BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
+ BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
// DestLo = (>32) ? TmpReg3 : TmpReg2;
- BMI(MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg).addReg(TmpReg2).addReg(TmpReg3);
// DestHi = (>32) ? TmpReg : TmpReg3;
- BMI(MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg+1).addReg(TmpReg3).addReg(TmpReg);
}
}
assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
- BMI(MBB, IP, Opc[Class], 2,
- DestReg).addReg(SrcReg).addZImm(CUI->getValue());
+ BuildMI(*MBB, IP, Opc[Class], 2,
+ DestReg).addReg(SrcReg).addImm(CUI->getValue());
} else { // The shift amount is non-constant.
unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
- BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
- BMI(MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
+ BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
+ }
+}
+
+
+void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
+ unsigned &IndexReg, unsigned &Disp) {
+ BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
+ if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
+ BaseReg, Scale, IndexReg, Disp))
+ return;
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
+ if (CE->getOpcode() == Instruction::GetElementPtr)
+ if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
+ BaseReg, Scale, IndexReg, Disp))
+ return;
}
+
+ // If it's not foldable, reset addr mode.
+ BaseReg = getReg(Addr);
+ Scale = 1; IndexReg = 0; Disp = 0;
}
/// need to worry about the memory layout of the target machine.
///
void ISel::visitLoadInst(LoadInst &I) {
- unsigned SrcAddrReg = getReg(I.getOperand(0));
+ // Check to see if this load instruction is going to be folded into a binary
+ // instruction, like add. If so, we don't want to emit it. Wouldn't a real
+ // pattern matching instruction selector be nice?
+ if (I.hasOneUse() && getClassB(I.getType()) < cFP) {
+ Instruction *User = cast<Instruction>(I.use_back());
+ switch (User->getOpcode()) {
+ default: User = 0; break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ break;
+ }
+
+ if (User) {
+ // Okay, we found a user. If the load is the first operand and there is
+ // no second operand load, reverse the operand ordering. Note that this
+ // can fail for a subtract (ie, no change will be made).
+ if (!isa<LoadInst>(User->getOperand(1)))
+ cast<BinaryOperator>(User)->swapOperands();
+
+ // Okay, now that everything is set up, if this load is used by the second
+ // operand, and if there are no instructions that invalidate the load
+ // before the binary operator, eliminate the load.
+ if (User->getOperand(1) == &I &&
+ isSafeToFoldLoadIntoInstruction(I, *User))
+ return; // Eliminate the load!
+ }
+ }
+
unsigned DestReg = getReg(I);
+ unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
+ getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
unsigned Class = getClassB(I.getType());
-
if (Class == cLong) {
- addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), SrcAddrReg);
- addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), SrcAddrReg, 4);
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
+ BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
+ BaseReg, Scale, IndexReg, Disp+4);
return;
}
static const unsigned Opcodes[] = {
- X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FLDr32
+ X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m
};
unsigned Opcode = Opcodes[Class];
- if (I.getType() == Type::DoubleTy) Opcode = X86::FLDr64;
- addDirectMem(BuildMI(BB, Opcode, 4, DestReg), SrcAddrReg);
+ if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
+ addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
+ BaseReg, Scale, IndexReg, Disp);
}
/// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
/// instruction.
///
void ISel::visitStoreInst(StoreInst &I) {
- unsigned ValReg = getReg(I.getOperand(0));
- unsigned AddressReg = getReg(I.getOperand(1));
-
+ unsigned BaseReg, Scale, IndexReg, Disp;
+ getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
+
const Type *ValTy = I.getOperand(0)->getType();
unsigned Class = getClassB(ValTy);
- if (Class == cLong) {
- addDirectMem(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg).addReg(ValReg);
- addRegOffset(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg,4).addReg(ValReg+1);
- return;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
+ uint64_t Val = CI->getRawValue();
+ if (Class == cLong) {
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
+ BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
+ BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
+ } else {
+ static const unsigned Opcodes[] = {
+ X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
+ };
+ unsigned Opcode = Opcodes[Class];
+ addFullAddress(BuildMI(BB, Opcode, 5),
+ BaseReg, Scale, IndexReg, Disp).addImm(Val);
+ }
+ } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
+ addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
+ BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
+ } else {
+ if (Class == cLong) {
+ unsigned ValReg = getReg(I.getOperand(0));
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
+ BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
+ BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
+ } else {
+ unsigned ValReg = getReg(I.getOperand(0));
+ static const unsigned Opcodes[] = {
+ X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
+ };
+ unsigned Opcode = Opcodes[Class];
+ if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
+ addFullAddress(BuildMI(BB, Opcode, 1+4),
+ BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
+ }
}
-
- static const unsigned Opcodes[] = {
- X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FSTr32
- };
- unsigned Opcode = Opcodes[Class];
- if (ValTy == Type::DoubleTy) Opcode = X86::FSTr64;
- addDirectMem(BuildMI(BB, Opcode, 1+4), AddressReg).addReg(ValReg);
}
-/// visitCastInst - Here we have various kinds of copying with or without
-/// sign extension going on.
+/// visitCastInst - Here we have various kinds of copying with or without sign
+/// extension going on.
+///
void ISel::visitCastInst(CastInst &CI) {
Value *Op = CI.getOperand(0);
+
+ // Noop casts are not even emitted.
+ if (getClassB(CI.getType()) == getClassB(Op->getType()))
+ return;
+
// If this is a cast from a 32-bit integer to a Long type, and the only uses
// of the case are GEP instructions, then the cast does not need to be
// generated explicitly, it will be folded into the GEP.
emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
}
-/// emitCastOperation - Common code shared between visitCastInst and
-/// constant expression cast support.
+/// emitCastOperation - Common code shared between visitCastInst and constant
+/// expression cast support.
+///
void ISel::emitCastOperation(MachineBasicBlock *BB,
MachineBasicBlock::iterator IP,
Value *Src, const Type *DestTy,
if (DestTy == Type::BoolTy) {
switch (SrcClass) {
case cByte:
- BMI(BB, IP, X86::TESTrr8, 2).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
break;
case cShort:
- BMI(BB, IP, X86::TESTrr16, 2).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
break;
case cInt:
- BMI(BB, IP, X86::TESTrr32, 2).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
break;
case cLong: {
unsigned TmpReg = makeAnotherReg(Type::IntTy);
- BMI(BB, IP, X86::ORrr32, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
+ BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
break;
}
case cFP:
- assert(0 && "FIXME: implement cast FP to bool");
- abort();
+ BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::FNSTSW8r, 0);
+ BuildMI(*BB, IP, X86::SAHF, 1);
+ break;
}
// If the zero flag is not set, then the value is true, set the byte to
// true.
- BMI(BB, IP, X86::SETNEr, 1, DestReg);
+ BuildMI(*BB, IP, X86::SETNEr, 1, DestReg);
return;
}
static const unsigned RegRegMove[] = {
- X86::MOVrr8, X86::MOVrr16, X86::MOVrr32, X86::FpMOV, X86::MOVrr32
+ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
};
// Implement casts between values of the same type class (as determined by
// getClass) by using a register-to-register move.
if (SrcClass == DestClass) {
if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
- BMI(BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
+ BuildMI(*BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
} else if (SrcClass == cFP) {
if (SrcTy == Type::FloatTy) { // double -> float
assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
- BMI(BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
} else { // float -> double
assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
"Unknown cFP member!");
// reading it back.
unsigned FltAlign = TM.getTargetData().getFloatAlignment();
int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
- addFrameReference(BMI(BB, IP, X86::FSTr32, 5), FrameIdx).addReg(SrcReg);
- addFrameReference(BMI(BB, IP, X86::FLDr32, 5, DestReg), FrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5), FrameIdx).addReg(SrcReg);
+ addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
}
} else if (SrcClass == cLong) {
- BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
- BMI(BB, IP, X86::MOVrr32, 1, DestReg+1).addReg(SrcReg+1);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
} else {
assert(0 && "Cannot handle this type of cast instruction!");
abort();
if (isLong) DestClass = cInt;
static const unsigned Opc[][4] = {
- { X86::MOVSXr16r8, X86::MOVSXr32r8, X86::MOVSXr32r16, X86::MOVrr32 }, // s
- { X86::MOVZXr16r8, X86::MOVZXr32r8, X86::MOVZXr32r16, X86::MOVrr32 } // u
+ { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
+ { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
};
bool isUnsigned = SrcTy->isUnsigned();
- BMI(BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
+ BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
DestReg).addReg(SrcReg);
if (isLong) { // Handle upper 32 bits as appropriate...
if (isUnsigned) // Zero out top bits...
- BMI(BB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
+ BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
else // Sign extend bottom half...
- BMI(BB, IP, X86::SARri32, 2, DestReg+1).addReg(DestReg).addZImm(31);
+ BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
}
return;
}
// Special case long -> int ...
if (SrcClass == cLong && DestClass == cInt) {
- BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
return;
}
if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
&& SrcClass > DestClass) {
static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
- BMI(BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
- BMI(BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
+ BuildMI(*BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
+ BuildMI(*BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
return;
}
// a larger signed value, then use FLD on the larger value.
//
const Type *PromoteType = 0;
- unsigned PromoteOpcode;
+ unsigned PromoteOpcode = 0;
unsigned RealDestReg = DestReg;
switch (SrcTy->getPrimitiveID()) {
case Type::BoolTyID:
// We don't have the facilities for directly loading byte sized data from
// memory (even signed). Promote it to 16 bits.
PromoteType = Type::ShortTy;
- PromoteOpcode = X86::MOVSXr16r8;
+ PromoteOpcode = X86::MOVSX16rr8;
break;
case Type::UByteTyID:
PromoteType = Type::ShortTy;
- PromoteOpcode = X86::MOVZXr16r8;
+ PromoteOpcode = X86::MOVZX16rr8;
break;
case Type::UShortTyID:
PromoteType = Type::IntTy;
- PromoteOpcode = X86::MOVZXr32r16;
+ PromoteOpcode = X86::MOVZX32rr16;
break;
case Type::UIntTyID: {
// Make a 64 bit temporary... and zero out the top of it...
unsigned TmpReg = makeAnotherReg(Type::LongTy);
- BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
- BMI(BB, IP, X86::MOVri32, 1, TmpReg+1).addZImm(0);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, TmpReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::MOV32ri, 1, TmpReg+1).addImm(0);
SrcTy = Type::LongTy;
SrcClass = cLong;
SrcReg = TmpReg;
if (PromoteType) {
unsigned TmpReg = makeAnotherReg(PromoteType);
- unsigned Opc = SrcTy->isSigned() ? X86::MOVSXr16r8 : X86::MOVZXr16r8;
- BMI(BB, IP, Opc, 1, TmpReg).addReg(SrcReg);
+ BuildMI(*BB, IP, PromoteOpcode, 1, TmpReg).addReg(SrcReg);
SrcTy = PromoteType;
SrcClass = getClass(PromoteType);
SrcReg = TmpReg;
}
// Spill the integer to memory and reload it from there...
- int FrameIdx = F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
+ int FrameIdx =
+ F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
if (SrcClass == cLong) {
- addFrameReference(BMI(BB, IP, X86::MOVmr32, 5), FrameIdx).addReg(SrcReg);
- addFrameReference(BMI(BB, IP, X86::MOVmr32, 5),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
+ FrameIdx).addReg(SrcReg);
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
FrameIdx, 4).addReg(SrcReg+1);
} else {
- static const unsigned Op1[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
- addFrameReference(BMI(BB, IP, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg);
+ static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
+ addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
+ FrameIdx).addReg(SrcReg);
}
static const unsigned Op2[] =
- { 0/*byte*/, X86::FILDr16, X86::FILDr32, 0/*FP*/, X86::FILDr64 };
- addFrameReference(BMI(BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
+ { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
+ addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
// We need special handling for unsigned 64-bit integer sources. If the
// input number has the "sign bit" set, then we loaded it incorrectly as a
// negative 64-bit number. In this case, add an offset value.
if (SrcTy == Type::ULongTy) {
// Emit a test instruction to see if the dynamic input value was signed.
- BMI(BB, IP, X86::TESTrr32, 2).addReg(SrcReg+1).addReg(SrcReg+1);
+ BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
- // If the sign bit is set, get a pointer to an offset, otherwise get a pointer to a zero.
+ // If the sign bit is set, get a pointer to an offset, otherwise get a
+ // pointer to a zero.
MachineConstantPool *CP = F->getConstantPool();
unsigned Zero = makeAnotherReg(Type::IntTy);
- addConstantPoolReference(BMI(BB, IP, X86::LEAr32, 5, Zero),
- CP->getConstantPoolIndex(Constant::getNullValue(Type::UIntTy)));
+ Constant *Null = Constant::getNullValue(Type::UIntTy);
+ addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
+ CP->getConstantPoolIndex(Null));
unsigned Offset = makeAnotherReg(Type::IntTy);
- addConstantPoolReference(BMI(BB, IP, X86::LEAr32, 5, Offset),
- CP->getConstantPoolIndex(ConstantUInt::get(Type::UIntTy,
- 0x5f800000)));
+ Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
+
+ addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
+ CP->getConstantPoolIndex(OffsetCst));
unsigned Addr = makeAnotherReg(Type::IntTy);
- BMI(BB, IP, X86::CMOVSrr32, 2, Addr).addReg(Zero).addReg(Offset);
+ BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
// Load the constant for an add. FIXME: this could make an 'fadd' that
// reads directly from memory, but we don't support these yet.
unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
- addDirectMem(BMI(BB, IP, X86::FLDr32, 4, ConstReg), Addr);
+ addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
- BMI(BB, IP, X86::FpADD, 2, RealDestReg).addReg(ConstReg).addReg(DestReg);
+ BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
+ .addReg(ConstReg).addReg(DestReg);
}
return;
// mode when truncating to an integer value.
//
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
- addFrameReference(BMI(BB, IP, X86::FNSTCWm16, 4), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
- addFrameReference(BMI(BB, IP, X86::MOVrm8, 4, HighPartOfCW), CWFrameIdx, 1);
+ addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
+ CWFrameIdx, 1);
// Set the high part to be round to zero...
- addFrameReference(BMI(BB, IP, X86::MOVmi8, 5), CWFrameIdx, 1).addZImm(12);
+ addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
+ CWFrameIdx, 1).addImm(12);
// Reload the modified control word now...
- addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
// Restore the memory image of control word to original value
- addFrameReference(BMI(BB, IP, X86::MOVmr8, 5),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
CWFrameIdx, 1).addReg(HighPartOfCW);
// We don't have the facilities for directly storing byte sized data to
F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
static const unsigned Op1[] =
- { 0, X86::FISTr16, X86::FISTr32, 0, X86::FISTPr64 };
- addFrameReference(BMI(BB, IP, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg);
+ { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
+ addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
+ FrameIdx).addReg(SrcReg);
if (DestClass == cLong) {
- addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg), FrameIdx);
- addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg+1), FrameIdx, 4);
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
+ FrameIdx, 4);
} else {
- static const unsigned Op2[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
- addFrameReference(BMI(BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
+ static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
+ addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
}
// Reload the original control word now...
- addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
return;
}
}
// Increment the VAList pointer...
- BuildMI(BB, X86::ADDri32, 2, DestReg).addReg(VAList).addZImm(Size);
+ BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
}
void ISel::visitVAArgInst(VAArgInst &I) {
case Type::PointerTyID:
case Type::UIntTyID:
case Type::IntTyID:
- addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
+ addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
break;
case Type::ULongTyID:
case Type::LongTyID:
- addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
- addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), VAList, 4);
+ addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
+ addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
break;
case Type::DoubleTyID:
- addDirectMem(BuildMI(BB, X86::FLDr64, 4, DestReg), VAList);
+ addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
break;
}
}
-
+/// visitGetElementPtrInst - instruction-select GEP instructions
+///
void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
+ // If this GEP instruction will be folded into all of its users, we don't need
+ // to explicitly calculate it!
+ unsigned A, B, C, D;
+ if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
+ // Check all of the users of the instruction to see if they are loads and
+ // stores.
+ bool AllWillFold = true;
+ for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI)
+ if (cast<Instruction>(*UI)->getOpcode() != Instruction::Load)
+ if (cast<Instruction>(*UI)->getOpcode() != Instruction::Store ||
+ cast<Instruction>(*UI)->getOperand(0) == &I) {
+ AllWillFold = false;
+ break;
+ }
+
+ // If the instruction is foldable, and will be folded into all users, don't
+ // emit it!
+ if (AllWillFold) return;
+ }
+
unsigned outputReg = getReg(I);
emitGEPOperation(BB, BB->end(), I.getOperand(0),
I.op_begin()+1, I.op_end(), outputReg);
}
+/// getGEPIndex - Inspect the getelementptr operands specified with GEPOps and
+/// GEPTypes (the derived types being stepped through at each level). On return
+/// from this function, if some indexes of the instruction are representable as
+/// an X86 lea instruction, the machine operands are put into the Ops
+/// instruction and the consumed indexes are poped from the GEPOps/GEPTypes
+/// lists. Otherwise, GEPOps.size() is returned. If this returns a an
+/// addressing mode that only partially consumes the input, the BaseReg input of
+/// the addressing mode must be left free.
+///
+/// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
+///
+void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
+ std::vector<Value*> &GEPOps,
+ std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
+ unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
+ const TargetData &TD = TM.getTargetData();
+
+ // Clear out the state we are working with...
+ BaseReg = 0; // No base register
+ Scale = 1; // Unit scale
+ IndexReg = 0; // No index register
+ Disp = 0; // No displacement
+
+ // While there are GEP indexes that can be folded into the current address,
+ // keep processing them.
+ while (!GEPTypes.empty()) {
+ if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
+ // It's a struct access. CUI is the index into the structure,
+ // which names the field. This index must have unsigned type.
+ const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
+
+ // Use the TargetData structure to pick out what the layout of the
+ // structure is in memory. Since the structure index must be constant, we
+ // can get its value and use it to find the right byte offset from the
+ // StructLayout class's list of structure member offsets.
+ Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
+ GEPOps.pop_back(); // Consume a GEP operand
+ GEPTypes.pop_back();
+ } else {
+ // It's an array or pointer access: [ArraySize x ElementType].
+ const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
+ Value *idx = GEPOps.back();
+
+ // idx is the index into the array. Unlike with structure
+ // indices, we may not know its actual value at code-generation
+ // time.
+
+ // If idx is a constant, fold it into the offset.
+ unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
+ if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
+ Disp += TypeSize*CSI->getValue();
+ } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
+ Disp += TypeSize*CUI->getValue();
+ } else {
+ // If the index reg is already taken, we can't handle this index.
+ if (IndexReg) return;
+
+ // If this is a size that we can handle, then add the index as
+ switch (TypeSize) {
+ case 1: case 2: case 4: case 8:
+ // These are all acceptable scales on X86.
+ Scale = TypeSize;
+ break;
+ default:
+ // Otherwise, we can't handle this scale
+ return;
+ }
+
+ if (CastInst *CI = dyn_cast<CastInst>(idx))
+ if (CI->getOperand(0)->getType() == Type::IntTy ||
+ CI->getOperand(0)->getType() == Type::UIntTy)
+ idx = CI->getOperand(0);
+
+ IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
+ }
+
+ GEPOps.pop_back(); // Consume a GEP operand
+ GEPTypes.pop_back();
+ }
+ }
+
+ // GEPTypes is empty, which means we have a single operand left. See if we
+ // can set it as the base register.
+ //
+ // FIXME: When addressing modes are more powerful/correct, we could load
+ // global addresses directly as 32-bit immediates.
+ assert(BaseReg == 0);
+ BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
+ GEPOps.pop_back(); // Consume the last GEP operand
+}
+
+
+/// isGEPFoldable - Return true if the specified GEP can be completely
+/// folded into the addressing mode of a load/store or lea instruction.
+bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
+ Value *Src, User::op_iterator IdxBegin,
+ User::op_iterator IdxEnd, unsigned &BaseReg,
+ unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
+ if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
+ Src = CPR->getValue();
+
+ std::vector<Value*> GEPOps;
+ GEPOps.resize(IdxEnd-IdxBegin+1);
+ GEPOps[0] = Src;
+ std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
+
+ std::vector<const Type*> GEPTypes;
+ GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
+ gep_type_end(Src->getType(), IdxBegin, IdxEnd));
+
+ MachineBasicBlock::iterator IP;
+ if (MBB) IP = MBB->end();
+ getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
+
+ // We can fold it away iff the getGEPIndex call eliminated all operands.
+ return GEPOps.empty();
+}
+
void ISel::emitGEPOperation(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
Value *Src, User::op_iterator IdxBegin,
User::op_iterator IdxEnd, unsigned TargetReg) {
const TargetData &TD = TM.getTargetData();
-
if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
Src = CPR->getValue();
// Keep emitting instructions until we consume the entire GEP instruction.
while (!GEPOps.empty()) {
unsigned OldSize = GEPOps.size();
+ unsigned BaseReg, Scale, IndexReg, Disp;
+ getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
- if (GEPTypes.empty()) {
+ if (GEPOps.size() != OldSize) {
+ // getGEPIndex consumed some of the input. Build an LEA instruction here.
+ unsigned NextTarget = 0;
+ if (!GEPOps.empty()) {
+ assert(BaseReg == 0 &&
+ "getGEPIndex should have left the base register open for chaining!");
+ NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
+ }
+
+ if (IndexReg == 0 && Disp == 0)
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
+ else
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
+ BaseReg, Scale, IndexReg, Disp);
+ --IP;
+ TargetReg = NextTarget;
+ } else if (GEPTypes.empty()) {
// The getGEPIndex operation didn't want to build an LEA. Check to see if
// all operands are consumed but the base pointer. If so, just load it
// into the register.
if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
- BMI(MBB, IP, X86::MOVri32, 1, TargetReg).addGlobalAddress(GV);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
} else {
unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
- BMI(MBB, IP, X86::MOVrr32, 1, TargetReg).addReg(BaseReg);
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
}
break; // we are now done
- } else if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
- // It's a struct access. CUI is the index into the structure,
- // which names the field. This index must have unsigned type.
- const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
- GEPOps.pop_back(); // Consume a GEP operand
- GEPTypes.pop_back();
- // Use the TargetData structure to pick out what the layout of the
- // structure is in memory. Since the structure index must be constant, we
- // can get its value and use it to find the right byte offset from the
- // StructLayout class's list of structure member offsets.
- unsigned idxValue = CUI->getValue();
- unsigned FieldOff = TD.getStructLayout(StTy)->MemberOffsets[idxValue];
- if (FieldOff) {
- unsigned Reg = makeAnotherReg(Type::UIntTy);
- // Emit an ADD to add FieldOff to the basePtr.
- BMI(MBB, IP, X86::ADDri32, 2, TargetReg).addReg(Reg).addZImm(FieldOff);
- --IP; // Insert the next instruction before this one.
- TargetReg = Reg; // Codegen the rest of the GEP into this
- }
-
} else {
// It's an array or pointer access: [ArraySize x ElementType].
const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
GEPOps.pop_back(); // Consume a GEP operand
GEPTypes.pop_back();
- // idx is the index into the array. Unlike with structure
- // indices, we may not know its actual value at code-generation
- // time.
- assert(idx->getType() == Type::LongTy && "Bad GEP array index!");
-
- // Most GEP instructions use a [cast (int/uint) to LongTy] as their
+ // Many GEP instructions use a [cast (int/uint) to LongTy] as their
// operand on X86. Handle this case directly now...
if (CastInst *CI = dyn_cast<CastInst>(idx))
if (CI->getOperand(0)->getType() == Type::IntTy ||
unsigned elementSize = TD.getTypeSize(ElTy);
// If idxReg is a constant, we don't need to perform the multiply!
- if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
+ if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) {
if (!CSI->isNullValue()) {
- unsigned Offset = elementSize*CSI->getValue();
+ unsigned Offset = elementSize*CSI->getRawValue();
unsigned Reg = makeAnotherReg(Type::UIntTy);
- BMI(MBB, IP, X86::ADDri32, 2, TargetReg).addReg(Reg).addZImm(Offset);
+ BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
+ .addReg(Reg).addImm(Offset);
--IP; // Insert the next instruction before this one.
TargetReg = Reg; // Codegen the rest of the GEP into this
}
// If the element size is 1, we don't have to multiply, just add
unsigned idxReg = getReg(idx, MBB, IP);
unsigned Reg = makeAnotherReg(Type::UIntTy);
- BMI(MBB, IP, X86::ADDrr32, 2, TargetReg).addReg(Reg).addReg(idxReg);
+ BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
--IP; // Insert the next instruction before this one.
TargetReg = Reg; // Codegen the rest of the GEP into this
} else {
// Emit an ADD to add OffsetReg to the basePtr.
unsigned Reg = makeAnotherReg(Type::UIntTy);
- BMI(MBB, IP, X86::ADDrr32, 2, TargetReg).addReg(Reg).addReg(OffsetReg);
+ BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
+ .addReg(Reg).addReg(OffsetReg);
// Step to the first instruction of the multiply.
if (BeforeIt == MBB->end())
// Create a new stack object using the frame manager...
int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
- addFrameReference(BuildMI(BB, X86::LEAr32, 5, getReg(I)), FrameIdx);
+ addFrameReference(BuildMI(BB, X86::LEA32r, 5, getReg(I)), FrameIdx);
return;
}
}
// AddedSize = add <TotalSizeReg>, 15
unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
- BuildMI(BB, X86::ADDri32, 2, AddedSizeReg).addReg(TotalSizeReg).addZImm(15);
+ BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
// AlignedSize = and <AddedSize>, ~15
unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
- BuildMI(BB, X86::ANDri32, 2, AlignedSize).addReg(AddedSizeReg).addZImm(~15);
+ BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
// Subtract size from stack pointer, thereby allocating some space.
- BuildMI(BB, X86::SUBrr32, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
+ BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
// Put a pointer to the space into the result register, by copying
// the stack pointer.
- BuildMI(BB, X86::MOVrr32, 1, getReg(I)).addReg(X86::ESP);
+ BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
// Inform the Frame Information that we have just allocated a variable-sized
// object.