#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/InstVisitor.h"
-#include "Support/Statistic.h"
+#include "llvm/ADT/Statistic.h"
using namespace llvm;
namespace {
}
namespace {
- struct ISel : public FunctionPass, InstVisitor<ISel> {
+ struct X86ISel : public FunctionPass, InstVisitor<X86ISel> {
TargetMachine &TM;
MachineFunction *F; // The function we are compiling into
MachineBasicBlock *BB; // The current MBB we are compiling
// FrameIndex for the alloca.
std::map<AllocaInst*, unsigned> AllocaMap;
- ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
+ X86ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
/// runOnFunction - Top level implementation of instruction selection for
/// the entire function.
// Control flow operators
void visitReturnInst(ReturnInst &RI);
void visitBranchInst(BranchInst &BI);
+ void visitUnreachableInst(UnreachableInst &UI) {}
struct ValueRecord {
Value *Val;
/// getAddressingMode - Get the addressing mode to use to address the
/// specified value. The returned value should be used with addFullAddress.
- void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
- unsigned &IndexReg, unsigned &Disp);
+ void getAddressingMode(Value *Addr, X86AddressMode &AM);
/// getGEPIndex - This is used to fold GEP instructions into X86 addressing
/// expressions.
void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
std::vector<Value*> &GEPOps,
- std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
+ std::vector<const Type*> &GEPTypes,
+ X86AddressMode &AM);
/// isGEPFoldable - Return true if the specified GEP can be completely
/// folded into the addressing mode of a load/store or lea instruction.
bool isGEPFoldable(MachineBasicBlock *MBB,
Value *Src, User::op_iterator IdxBegin,
- User::op_iterator IdxEnd, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
+ User::op_iterator IdxEnd, X86AddressMode &AM);
/// emitGEPOperation - Common code shared between visitGetElementPtrInst and
/// constant expression GEP support.
MachineBasicBlock::iterator IP,
Value *Op, Value *ShiftAmount, bool isLeftShift,
const Type *ResultTy, unsigned DestReg);
+
+ // Emit code for a 'SHLD DestReg, Op0, Op1, Amt' operation, where Amt is a
+ // constant.
+ void doSHLDConst(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, unsigned Op0Reg, unsigned Op1Reg,
+ unsigned Op1Val);
/// emitSelectOperation - Common code shared between visitSelectInst and the
/// constant expression support.
/// getReg - This method turns an LLVM value into a register number.
///
-unsigned ISel::getReg(Value *V, MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IPt) {
+unsigned X86ISel::getReg(Value *V, MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IPt) {
// If this operand is a constant, emit the code to copy the constant into
// the register here...
if (Constant *C = dyn_cast<Constant>(V)) {
/// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
/// that is to be statically allocated with the initial stack frame
/// adjustment.
-unsigned ISel::getFixedSizedAllocaFI(AllocaInst *AI) {
+unsigned X86ISel::getFixedSizedAllocaFI(AllocaInst *AI) {
// Already computed this?
std::map<AllocaInst*, unsigned>::iterator I = AllocaMap.lower_bound(AI);
if (I != AllocaMap.end() && I->first == AI) return I->second;
/// copyConstantToRegister - Output the instructions required to put the
/// specified constant into the specified register.
///
-void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- Constant *C, unsigned R) {
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+void X86ISel::copyConstantToRegister(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Constant *C, unsigned R) {
+ if (isa<UndefValue>(C)) {
+ switch (getClassB(C->getType())) {
+ case cFP:
+ // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
+ BuildMI(*MBB, IP, X86::FLD0, 0, R);
+ return;
+ case cLong:
+ BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, R+1);
+ // FALL THROUGH
+ default:
+ BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, R);
+ return;
+ }
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
unsigned Class = 0;
switch (CE->getOpcode()) {
case Instruction::GetElementPtr:
/// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
/// the stack into virtual registers.
///
-void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
+void X86ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
// Emit instructions to load the arguments... On entry to a function on the
// X86, the stack frame looks like this:
//
/// because we have to generate our sources into the source basic blocks, not
/// the current one.
///
-void ISel::SelectPHINodes() {
+void X86ISel::SelectPHINodes() {
const TargetInstrInfo &TII = *TM.getInstrInfo();
const Function &LF = *F->getFunction(); // The LLVM function...
for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
// Loop over all of the PHI nodes in the LLVM basic block...
MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
- for (BasicBlock::const_iterator I = BB->begin();
- PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
+ for (BasicBlock::const_iterator I = BB->begin(); isa<PHINode>(I); ++I) {
+ PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I));
// Create a new machine instr PHI node, and insert it.
unsigned PHIReg = getReg(*PN);
// break critical edges as needed (to make a place to put compensation code),
// but this will require some infrastructure improvements as well.
//
-void ISel::InsertFPRegKills() {
+void X86ISel::InsertFPRegKills() {
SSARegMap &RegMap = *F->getSSARegMap();
for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
}
-void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
- unsigned &IndexReg, unsigned &Disp) {
- BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
+void X86ISel::getAddressingMode(Value *Addr, X86AddressMode &AM) {
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = 0; AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
- BaseReg, Scale, IndexReg, Disp))
+ AM))
return;
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
if (CE->getOpcode() == Instruction::GetElementPtr)
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
- BaseReg, Scale, IndexReg, Disp))
+ AM))
return;
+ } else if (AllocaInst *AI = dyn_castFixedAlloca(Addr)) {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
+ return;
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
+ AM.GV = GV;
+ return;
}
// If it's not foldable, reset addr mode.
- BaseReg = getReg(Addr);
- Scale = 1; IndexReg = 0; Disp = 0;
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = getReg(Addr);
+ AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
}
// canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
(getClassB(SCI->getOperand(0)->getType()) != cLong ||
SCI->getOpcode() == Instruction::SetEQ ||
- SCI->getOpcode() == Instruction::SetNE))
+ SCI->getOpcode() == Instruction::SetNE) &&
+ (isa<BranchInst>(User) || User->getOperand(0) == V))
return SCI;
}
return 0;
/// emitUCOMr - In the future when we support processors before the P6, this
/// wraps the logic for emitting an FUCOMr vs FUCOMIr.
-void ISel::emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
- unsigned LHS, unsigned RHS) {
+void X86ISel::emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
+ unsigned LHS, unsigned RHS) {
if (0) { // for processors prior to the P6
BuildMI(*MBB, IP, X86::FUCOMr, 2).addReg(LHS).addReg(RHS);
BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
// EmitComparison - This function emits a comparison of the two operands,
// returning the extended setcc code to use.
-unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
- MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP) {
+unsigned X86ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP) {
// The arguments are already supposed to be of the same type.
const Type *CompTy = Op0->getType();
unsigned Class = getClassB(CompTy);
- unsigned Op0r = getReg(Op0, MBB, IP);
// Special case handling of: cmp R, i
if (isa<ConstantPointerNull>(Op1)) {
+ unsigned Op0r = getReg(Op0, MBB, IP);
if (OpNum < 2) // seteq/setne -> test
BuildMI(*MBB, IP, X86::TEST32rr, 2).addReg(Op0r).addReg(Op0r);
else
// can't handle unsigned comparisons against zero unless they are == or
// !=. These should have been strength reduced already anyway.
if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
+
+ // If this is a comparison against zero and the LHS is an and of a
+ // register with a constant, use the test to do the and.
+ if (Instruction *Op0I = dyn_cast<Instruction>(Op0))
+ if (Op0I->getOpcode() == Instruction::And && Op0->hasOneUse() &&
+ isa<ConstantInt>(Op0I->getOperand(1))) {
+ static const unsigned TESTTab[] = {
+ X86::TEST8ri, X86::TEST16ri, X86::TEST32ri
+ };
+
+ // Emit test X, i
+ unsigned LHS = getReg(Op0I->getOperand(0), MBB, IP);
+ unsigned Imm =
+ cast<ConstantInt>(Op0I->getOperand(1))->getRawValue();
+ BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(LHS).addImm(Imm);
+
+ if (OpNum == 2) return 6; // Map jl -> js
+ if (OpNum == 3) return 7; // Map jg -> jns
+ return OpNum;
+ }
+
+ unsigned Op0r = getReg(Op0, MBB, IP);
static const unsigned TESTTab[] = {
X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
};
X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
};
+ unsigned Op0r = getReg(Op0, MBB, IP);
BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
return OpNum;
} else {
+ unsigned Op0r = getReg(Op0, MBB, IP);
assert(Class == cLong && "Unknown integer class!");
unsigned LowCst = CI->getRawValue();
unsigned HiCst = CI->getRawValue() >> 32;
}
}
+ unsigned Op0r = getReg(Op0, MBB, IP);
+
// Special case handling of comparison against +/- 0.0
if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
/// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
/// register, then move it to wherever the result should be.
///
-void ISel::visitSetCondInst(SetCondInst &I) {
+void X86ISel::visitSetCondInst(SetCondInst &I) {
if (canFoldSetCCIntoBranchOrSelect(&I))
return; // Fold this into a branch or select.
/// emitSetCCOperation - Common code shared between visitSetCondInst and
/// constant expression support.
///
-void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- Value *Op0, Value *Op1, unsigned Opcode,
- unsigned TargetReg) {
+void X86ISel::emitSetCCOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1, unsigned Opcode,
+ unsigned TargetReg) {
unsigned OpNum = getSetCCNumber(Opcode);
OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
}
}
-void ISel::visitSelectInst(SelectInst &SI) {
+void X86ISel::visitSelectInst(SelectInst &SI) {
unsigned DestReg = getReg(SI);
MachineBasicBlock::iterator MII = BB->end();
emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
/// emitSelect - Common code shared between visitSelectInst and the constant
/// expression support.
-void ISel::emitSelectOperation(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- Value *Cond, Value *TrueVal, Value *FalseVal,
- unsigned DestReg) {
+void X86ISel::emitSelectOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Cond, Value *TrueVal, Value *FalseVal,
+ unsigned DestReg) {
unsigned SelectClass = getClassB(TrueVal->getType());
// We don't support 8-bit conditional moves. If we have incoming constants,
/// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
/// operand, in the specified target register.
///
-void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
+void X86ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
bool isUnsigned = VR.Ty->isUnsigned() || VR.Ty == Type::BoolTy;
Value *Val = VR.Val;
/// ret long, ulong : Move value into EAX/EDX and return
/// ret float/double : Top of FP stack
///
-void ISel::visitReturnInst(ReturnInst &I) {
+void X86ISel::visitReturnInst(ReturnInst &I) {
if (I.getNumOperands() == 0) {
BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
return;
/// jump to a block that is the immediate successor of the current block, we can
/// just make a fall-through (but we don't currently).
///
-void ISel::visitBranchInst(BranchInst &BI) {
+void X86ISel::visitBranchInst(BranchInst &BI) {
// Update machine-CFG edges
BB->addSuccessor (MBBMap[BI.getSuccessor(0)]);
if (BI.isConditional())
/// and the return value as appropriate. For the actual function call itself,
/// it inserts the specified CallMI instruction into the stream.
///
-void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
- const std::vector<ValueRecord> &Args) {
-
+void X86ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
+ const std::vector<ValueRecord> &Args) {
// Count how many bytes are to be pushed on the stack...
unsigned NumBytes = 0;
/// visitCallInst - Push args on stack and do a procedure call instruction.
-void ISel::visitCallInst(CallInst &CI) {
+void X86ISel::visitCallInst(CallInst &CI) {
MachineInstr *TheCall;
if (Function *F = CI.getCalledFunction()) {
// Is it an intrinsic function call?
/// function, lowering any calls to unknown intrinsic functions into the
/// equivalent LLVM code.
///
-void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
+void X86ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
if (CallInst *CI = dyn_cast<CallInst>(I++))
}
}
-void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
+void X86ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
unsigned TmpReg1, TmpReg2;
switch (ID) {
case Intrinsic::vastart:
/// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
/// Xor.
///
-void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
+void X86ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
unsigned DestReg = getReg(B);
MachineBasicBlock::iterator MI = BB->end();
Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
unsigned Class = getClassB(B.getType());
+ // If this is AND X, C, and it is only used by a setcc instruction, it will
+ // be folded. There is no need to emit this instruction.
+ if (B.hasOneUse() && OperatorClass == 2 && isa<ConstantInt>(Op1))
+ if (Class == cByte || Class == cShort || Class == cInt) {
+ Instruction *Use = cast<Instruction>(B.use_back());
+ if (isa<SetCondInst>(Use) &&
+ Use->getOperand(1) == Constant::getNullValue(B.getType())) {
+ switch (getSetCCNumber(Use->getOpcode())) {
+ case 0:
+ case 1:
+ return;
+ default:
+ if (B.getType()->isSigned()) return;
+ }
+ }
+ }
+
// Special case: op Reg, load [mem]
if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1) && Class != cLong &&
Op0->hasOneUse() &&
addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), FI);
} else {
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), AM);
- addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r),
- BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), AM);
}
return;
}
unsigned FI = getFixedSizedAllocaFI(AI);
addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), FI);
} else {
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), AM);
- addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r),
- BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), AM);
}
return;
}
/// emitBinaryFPOperation - This method handles emission of floating point
/// Add (0), Sub (1), Mul (2), and Div (3) operations.
-void ISel::emitBinaryFPOperation(MachineBasicBlock *BB,
- MachineBasicBlock::iterator IP,
- Value *Op0, Value *Op1,
- unsigned OperatorClass, unsigned DestReg) {
-
+void X86ISel::emitBinaryFPOperation(MachineBasicBlock *BB,
+ MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1,
+ unsigned OperatorClass, unsigned DestReg) {
// Special case: op Reg, <const fp>
if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1))
if (!Op1C->isExactlyValue(+0.0) && !Op1C->isExactlyValue(+1.0)) {
/// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
/// and constant expression support.
///
-void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- Value *Op0, Value *Op1,
- unsigned OperatorClass, unsigned DestReg) {
+void X86ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1,
+ unsigned OperatorClass,
+ unsigned DestReg) {
unsigned Class = getClassB(Op0->getType());
if (Class == cFP) {
/// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
/// result should be given as DestTy.
///
-void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
- unsigned DestReg, const Type *DestTy,
- unsigned op0Reg, unsigned op1Reg) {
+void X86ISel::doMultiply(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, const Type *DestTy,
+ unsigned op0Reg, unsigned op1Reg) {
unsigned Class = getClass(DestTy);
switch (Class) {
case cInt:
/// doMultiplyConst - This function is specialized to efficiently codegen an 8,
/// 16, or 32-bit integer multiply by a constant.
-void ISel::doMultiplyConst(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- unsigned DestReg, const Type *DestTy,
- unsigned op0Reg, unsigned ConstRHS) {
+void X86ISel::doMultiplyConst(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ unsigned DestReg, const Type *DestTy,
+ unsigned op0Reg, unsigned ConstRHS) {
static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
static const unsigned ADDrrTab[] = {X86::ADD8rr, X86::ADD16rr, X86::ADD32rr};
case 5:
case 9:
if (Class == cInt) {
- addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg),
- op0Reg, ConstRHS-1, op0Reg, 0);
+ X86AddressMode AM;
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = op0Reg;
+ AM.Scale = ConstRHS-1;
+ AM.IndexReg = op0Reg;
+ AM.Disp = 0;
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg), AM);
return;
}
case -3:
case -9:
if (Class == cInt) {
TmpReg = makeAnotherReg(DestTy);
- addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TmpReg),
- op0Reg, -ConstRHS-1, op0Reg, 0);
+ X86AddressMode AM;
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = op0Reg;
+ AM.Scale = -ConstRHS-1;
+ AM.IndexReg = op0Reg;
+ AM.Disp = 0;
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TmpReg), AM);
BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(TmpReg);
return;
}
/// visitMul - Multiplies are not simple binary operators because they must deal
/// with the EAX register explicitly.
///
-void ISel::visitMul(BinaryOperator &I) {
+void X86ISel::visitMul(BinaryOperator &I) {
unsigned ResultReg = getReg(I);
Value *Op0 = I.getOperand(0);
unsigned FI = getFixedSizedAllocaFI(AI);
addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
} else {
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(LI->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(LI->getOperand(0), AM);
- addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r),
- BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
}
return;
}
emitMultiply(BB, IP, Op0, Op1, ResultReg);
}
-void ISel::emitMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
- Value *Op0, Value *Op1, unsigned DestReg) {
+void X86ISel::emitMultiply(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1, unsigned DestReg) {
MachineBasicBlock &BB = *MBB;
TypeClass Class = getClass(Op0->getType());
/// select the result from a different register. Note that both of these
/// instructions work differently for signed and unsigned operands.
///
-void ISel::visitDivRem(BinaryOperator &I) {
+void X86ISel::visitDivRem(BinaryOperator &I) {
unsigned ResultReg = getReg(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
unsigned FI = getFixedSizedAllocaFI(AI);
addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
} else {
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(LI->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(LI->getOperand(0), AM);
- addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r),
- BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
}
return;
}
unsigned FI = getFixedSizedAllocaFI(AI);
addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), FI);
} else {
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(LI->getOperand(0), BaseReg, Scale, IndexReg, Disp);
- addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r),
- BaseReg, Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(LI->getOperand(0), AM);
+ addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), AM);
}
return;
}
I.getOpcode() == Instruction::Div, ResultReg);
}
-void ISel::emitDivRemOperation(MachineBasicBlock *BB,
- MachineBasicBlock::iterator IP,
- Value *Op0, Value *Op1, bool isDiv,
- unsigned ResultReg) {
+void X86ISel::emitDivRemOperation(MachineBasicBlock *BB,
+ MachineBasicBlock::iterator IP,
+ Value *Op0, Value *Op1, bool isDiv,
+ unsigned ResultReg) {
const Type *Ty = Op0->getType();
unsigned Class = getClass(Ty);
switch (Class) {
}
static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
- static const unsigned NEGOpcode[] = { X86::NEG8r, X86::NEG16r, X86::NEG32r };
+ static const unsigned NEGOpcode[]={ X86::NEG8r, X86::NEG16r, X86::NEG32r };
static const unsigned SAROpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
static const unsigned SHROpcode[]={ X86::SHR8ri, X86::SHR16ri, X86::SHR32ri };
static const unsigned ADDOpcode[]={ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
// Special case signed division by power of 2.
- if (isDiv)
- if (ConstantSInt *CI = dyn_cast<ConstantSInt>(Op1)) {
+ if (ConstantSInt *CI = dyn_cast<ConstantSInt>(Op1))
+ if (isDiv) {
assert(Class != cLong && "This doesn't handle 64-bit divides!");
int V = CI->getValue();
return;
}
+ if (V == 2 || V == -2) { // X /s 2
+ static const unsigned CMPOpcode[] = {
+ X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
+ };
+ static const unsigned SBBOpcode[] = {
+ X86::SBB8ri, X86::SBB16ri, X86::SBB32ri
+ };
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ unsigned SignBit = 1 << (CI->getType()->getPrimitiveSize()*8-1);
+ BuildMI(*BB, IP, CMPOpcode[Class], 2).addReg(Op0Reg).addImm(SignBit);
+
+ unsigned TmpReg = makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, SBBOpcode[Class], 2, TmpReg).addReg(Op0Reg).addImm(-1);
+
+ unsigned TmpReg2 = V == 2 ? ResultReg : makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg2).addReg(TmpReg).addImm(1);
+ if (V == -2) {
+ BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg2);
+ }
+ return;
+ }
+
bool isNeg = false;
if (V < 0) { // Not a positive power of 2?
V = -V;
--Log;
unsigned Op0Reg = getReg(Op0, BB, IP);
unsigned TmpReg = makeAnotherReg(Op0->getType());
- if (Log != 1)
- BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg)
- .addReg(Op0Reg).addImm(Log-1);
- else
- BuildMI(*BB, IP, MovOpcode[Class], 1, TmpReg).addReg(Op0Reg);
+ BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg)
+ .addReg(Op0Reg).addImm(Log-1);
unsigned TmpReg2 = makeAnotherReg(Op0->getType());
BuildMI(*BB, IP, SHROpcode[Class], 2, TmpReg2)
.addReg(TmpReg).addImm(32-Log);
unsigned TmpReg4 = isNeg ? makeAnotherReg(Op0->getType()) : ResultReg;
BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg4)
- .addReg(Op0Reg).addImm(Log);
+ .addReg(TmpReg3).addImm(Log);
if (isNeg)
BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg4);
return;
}
+ } else { // X % C
+ assert(Class != cLong && "This doesn't handle 64-bit remainder!");
+ int V = CI->getValue();
+
+ if (V == 2 || V == -2) { // X % 2, X % -2
+ static const unsigned SExtOpcode[] = { X86::CBW, X86::CWD, X86::CDQ };
+ static const unsigned BaseReg[] = { X86::AL , X86::AX , X86::EAX };
+ static const unsigned SExtReg[] = { X86::AH , X86::DX , X86::EDX };
+ static const unsigned ANDOpcode[] = {
+ X86::AND8ri, X86::AND16ri, X86::AND32ri
+ };
+ static const unsigned XOROpcode[] = {
+ X86::XOR8rr, X86::XOR16rr, X86::XOR32rr
+ };
+ static const unsigned SUBOpcode[] = {
+ X86::SUB8rr, X86::SUB16rr, X86::SUB32rr
+ };
+
+ // Sign extend result into reg of -1 or 0.
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ BuildMI(*BB, IP, MovOpcode[Class], 1, BaseReg[Class]).addReg(Op0Reg);
+ BuildMI(*BB, IP, SExtOpcode[Class], 0);
+ unsigned TmpReg0 = makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, MovOpcode[Class], 1, TmpReg0).addReg(SExtReg[Class]);
+
+ unsigned TmpReg1 = makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, ANDOpcode[Class], 2, TmpReg1).addReg(Op0Reg).addImm(1);
+
+ unsigned TmpReg2 = makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, XOROpcode[Class], 2,
+ TmpReg2).addReg(TmpReg1).addReg(TmpReg0);
+ BuildMI(*BB, IP, SUBOpcode[Class], 2,
+ ResultReg).addReg(TmpReg2).addReg(TmpReg0);
+ return;
+ }
}
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
/// shift values equal to 1. Even the general case is sort of special,
/// because the shift amount has to be in CL, not just any old register.
///
-void ISel::visitShiftInst(ShiftInst &I) {
+void X86ISel::visitShiftInst(ShiftInst &I) {
MachineBasicBlock::iterator IP = BB->end ();
emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
I.getOpcode () == Instruction::Shl, I.getType (),
getReg (I));
}
+/// Emit code for a 'SHLD DestReg, Op0, Op1, Amt' operation, where Amt is a
+/// constant.
+void X86ISel::doSHLDConst(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ unsigned DestReg, unsigned Op0Reg, unsigned Op1Reg,
+ unsigned Amt) {
+ // SHLD is a very inefficient operation on every processor, try to do
+ // somethign simpler for common values of 'Amt'.
+ if (Amt == 0) {
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
+ } else if (Amt == 1) {
+ unsigned Tmp = makeAnotherReg(Type::UIntTy);
+ BuildMI(*MBB, IP, X86::ADD32rr, 2, Tmp).addReg(Op1Reg).addReg(Op1Reg);
+ BuildMI(*MBB, IP, X86::ADC32rr, 2, DestReg).addReg(Op0Reg).addReg(Op0Reg);
+ } else if (Amt == 2 || Amt == 3) {
+ // On the P4 and Athlon it is cheaper to replace shld ..., 2|3 with a
+ // shift/lea pair. NOTE: This should not be done on the P6 family!
+ unsigned Tmp = makeAnotherReg(Type::UIntTy);
+ BuildMI(*MBB, IP, X86::SHR32ri, 2, Tmp).addReg(Op1Reg).addImm(32-Amt);
+ X86AddressMode AM;
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = Tmp;
+ AM.Scale = 1 << Amt;
+ AM.IndexReg = Op0Reg;
+ AM.Disp = 0;
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 4, DestReg), AM);
+ } else {
+ // NOTE: It is always cheaper on the P4 to emit SHLD as two shifts and an OR
+ // than it is to emit a real SHLD.
+
+ BuildMI(*MBB, IP, X86::SHLD32rri8, 3,
+ DestReg).addReg(Op0Reg).addReg(Op1Reg).addImm(Amt);
+ }
+}
+
/// emitShiftOperation - Common code shared between visitShiftInst and
/// constant expression support.
-void ISel::emitShiftOperation(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- Value *Op, Value *ShiftAmount, bool isLeftShift,
- const Type *ResultTy, unsigned DestReg) {
+void X86ISel::emitShiftOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Op, Value *ShiftAmount,
+ bool isLeftShift, const Type *ResultTy,
+ unsigned DestReg) {
unsigned SrcReg = getReg (Op, MBB, IP);
bool isSigned = ResultTy->isSigned ();
unsigned Class = getClass (ResultTy);
-
- static const unsigned ConstantOperand[][4] = {
- { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
- { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
- { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
- { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
+
+ static const unsigned ConstantOperand[][3] = {
+ { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri }, // SHR
+ { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri }, // SAR
+ { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri }, // SHL
+ { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri }, // SAL = SHL
};
- static const unsigned NonConstantOperand[][4] = {
+ static const unsigned NonConstantOperand[][3] = {
{ X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
{ X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
{ X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
{ X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
};
- // Longs, as usual, are handled specially...
+ // Longs, as usual, are handled specially.
if (Class == cLong) {
- // If we have a constant shift, we can generate much more efficient code
- // than otherwise...
- //
if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
unsigned Amount = CUI->getValue();
- if (Amount < 32) {
+ if (Amount == 1 && isLeftShift) { // X << 1 == X+X
+ BuildMI(*MBB, IP, X86::ADD32rr, 2,
+ DestReg).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(*MBB, IP, X86::ADC32rr, 2,
+ DestReg+1).addReg(SrcReg+1).addReg(SrcReg+1);
+ } else if (Amount < 32) {
const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
if (isLeftShift) {
- BuildMI(*MBB, IP, Opc[3], 3,
- DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addImm(Amount);
+ doSHLDConst(MBB, IP, DestReg+1, SrcReg+1, SrcReg, Amount);
BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
} else {
- BuildMI(*MBB, IP, Opc[3], 3,
- DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
+ BuildMI(*MBB, IP, X86::SHRD32rri8, 3,
+ DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
}
- } else { // Shifting more than 32 bits
- Amount -= 32;
+ } else if (Amount == 32) {
if (isLeftShift) {
- if (Amount != 0) {
- BuildMI(*MBB, IP, X86::SHL32ri, 2,
- DestReg + 1).addReg(SrcReg).addImm(Amount);
- } else {
- BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
- }
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
} else {
- if (Amount != 0) {
- BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
- DestReg).addReg(SrcReg+1).addImm(Amount);
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
+ if (!isSigned) {
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
} else {
- BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
+ BuildMI(*MBB, IP, X86::SAR32ri, 2,
+ DestReg+1).addReg(SrcReg).addImm(31);
}
+ }
+ } else { // Shifting more than 32 bits
+ Amount -= 32;
+ if (isLeftShift) {
+ BuildMI(*MBB, IP, X86::SHL32ri, 2,
+ DestReg + 1).addReg(SrcReg).addImm(Amount);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
+ } else {
+ BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
+ DestReg).addReg(SrcReg+1).addImm(Amount);
BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
}
}
} else {
unsigned TmpReg = makeAnotherReg(Type::IntTy);
-
if (!isLeftShift && isSigned) {
// If this is a SHR of a Long, then we need to do funny sign extension
// stuff. TmpReg gets the value to use as the high-part if we are
// The shift amount is constant, guaranteed to be a ubyte. Get its value.
assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
- const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
- BuildMI(*MBB, IP, Opc[Class], 2,
- DestReg).addReg(SrcReg).addImm(CUI->getValue());
+ if (CUI->getValue() == 1 && isLeftShift) { // X << 1 -> X+X
+ static const int AddOpC[] = { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
+ BuildMI(*MBB, IP, AddOpC[Class], 2,DestReg).addReg(SrcReg).addReg(SrcReg);
+ } else {
+ const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
+ BuildMI(*MBB, IP, Opc[Class], 2,
+ DestReg).addReg(SrcReg).addImm(CUI->getValue());
+ }
} else { // The shift amount is non-constant.
unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
/// instruction. The load and store instructions are the only place where we
/// need to worry about the memory layout of the target machine.
///
-void ISel::visitLoadInst(LoadInst &I) {
+void X86ISel::visitLoadInst(LoadInst &I) {
// Check to see if this load instruction is going to be folded into a binary
// instruction, like add. If so, we don't want to emit it. Wouldn't a real
// pattern matching instruction selector be nice?
unsigned FI = getFixedSizedAllocaFI(AI);
addFrameReference(BuildMI(BB, Opcode[Class], 4, DestReg), FI);
} else {
- unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
- getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
- addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg),
- BaseReg, Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(I.getOperand(0), AM);
+ addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg), AM);
}
return;
} else {
addFrameReference(BuildMI(BB, Opcode, 4, DestReg), FI);
}
} else {
- unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
- getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(I.getOperand(0), AM);
if (Class == cLong) {
- addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
- BaseReg, Scale, IndexReg, Disp);
- addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
- BaseReg, Scale, IndexReg, Disp+4);
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg), AM);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), AM);
} else {
- addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
- BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(BB, Opcode, 4, DestReg), AM);
}
}
}
/// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
/// instruction.
///
-void ISel::visitStoreInst(StoreInst &I) {
- unsigned BaseReg = ~0U, Scale = ~0U, IndexReg = ~0U, Disp = ~0U;
- unsigned AllocaFrameIdx = ~0U;
-
- if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(1)))
- AllocaFrameIdx = getFixedSizedAllocaFI(AI);
- else
- getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
+void X86ISel::visitStoreInst(StoreInst &I) {
+ X86AddressMode AM;
+ getAddressingMode(I.getOperand(1), AM);
const Type *ValTy = I.getOperand(0)->getType();
unsigned Class = getClassB(ValTy);
if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
uint64_t Val = CI->getRawValue();
if (Class == cLong) {
- if (AllocaFrameIdx != ~0U) {
- addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
- AllocaFrameIdx).addImm(Val & ~0U);
- addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
- AllocaFrameIdx, 4).addImm(Val>>32);
- } else {
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
- }
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val & ~0U);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val>>32);
} else {
static const unsigned Opcodes[] = {
X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
};
unsigned Opcode = Opcodes[Class];
- if (AllocaFrameIdx != ~0U)
- addFrameReference(BuildMI(BB, Opcode, 5), AllocaFrameIdx).addImm(Val);
- else
- addFullAddress(BuildMI(BB, Opcode, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(Val);
+ addFullAddress(BuildMI(BB, Opcode, 5), AM).addImm(Val);
}
} else if (isa<ConstantPointerNull>(I.getOperand(0))) {
- if (AllocaFrameIdx != ~0U)
- addFrameReference(BuildMI(BB, X86::MOV32mi, 5), AllocaFrameIdx).addImm(0);
- else
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(0);
-
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(0);
} else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
- if (AllocaFrameIdx != ~0U)
- addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
- AllocaFrameIdx).addImm(CB->getValue());
- else
- addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
+ addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CB->getValue());
} else if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) {
// Store constant FP values with integer instructions to avoid having to
// load the constants from the constant pool then do a store.
float F;
} V;
V.F = CFP->getValue();
- if (AllocaFrameIdx != ~0U)
- addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
- AllocaFrameIdx).addImm(V.I);
- else
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(V.I);
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(V.I);
} else {
union {
uint64_t I;
double F;
} V;
V.F = CFP->getValue();
- if (AllocaFrameIdx != ~0U) {
- addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
- AllocaFrameIdx).addImm((unsigned)V.I);
- addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
- AllocaFrameIdx, 4).addImm(unsigned(V.I >> 32));
- } else {
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp).addImm((unsigned)V.I);
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp+4).addImm(
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm((unsigned)V.I);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(
unsigned(V.I >> 32));
- }
}
} else if (Class == cLong) {
unsigned ValReg = getReg(I.getOperand(0));
- if (AllocaFrameIdx != ~0U) {
- addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
- AllocaFrameIdx).addReg(ValReg);
- addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
- AllocaFrameIdx, 4).addReg(ValReg+1);
- } else {
- addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
- BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
- addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
- BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
- }
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg+1);
} else {
+ // FIXME: stop emitting these two instructions:
+ // movl $global,%eax
+ // movl %eax,(%ebx)
+ // when one instruction will suffice. That includes when the global
+ // has an offset applied to it.
unsigned ValReg = getReg(I.getOperand(0));
static const unsigned Opcodes[] = {
X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
unsigned Opcode = Opcodes[Class];
if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
- if (AllocaFrameIdx != ~0U)
- addFrameReference(BuildMI(BB, Opcode, 5), AllocaFrameIdx).addReg(ValReg);
- else
- addFullAddress(BuildMI(BB, Opcode, 1+4),
- BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
+ addFullAddress(BuildMI(BB, Opcode, 1+4), AM).addReg(ValReg);
}
}
/// visitCastInst - Here we have various kinds of copying with or without sign
/// extension going on.
///
-void ISel::visitCastInst(CastInst &CI) {
+void X86ISel::visitCastInst(CastInst &CI) {
Value *Op = CI.getOperand(0);
unsigned SrcClass = getClassB(Op->getType());
/// emitCastOperation - Common code shared between visitCastInst and constant
/// expression cast support.
///
-void ISel::emitCastOperation(MachineBasicBlock *BB,
- MachineBasicBlock::iterator IP,
- Value *Src, const Type *DestTy,
- unsigned DestReg) {
+void X86ISel::emitCastOperation(MachineBasicBlock *BB,
+ MachineBasicBlock::iterator IP,
+ Value *Src, const Type *DestTy,
+ unsigned DestReg) {
const Type *SrcTy = Src->getType();
unsigned SrcClass = getClassB(SrcTy);
unsigned DestClass = getClassB(DestTy);
PromoteType = Type::IntTy;
PromoteOpcode = X86::MOVZX32rr16;
break;
- case Type::UIntTyID: {
- // Make a 64 bit temporary... and zero out the top of it...
- unsigned TmpReg = makeAnotherReg(Type::LongTy);
- BuildMI(*BB, IP, X86::MOV32rr, 1, TmpReg).addReg(SrcReg);
- BuildMI(*BB, IP, X86::MOV32ri, 1, TmpReg+1).addImm(0);
- SrcTy = Type::LongTy;
- SrcClass = cLong;
- SrcReg = TmpReg;
- break;
- }
case Type::ULongTyID:
+ case Type::UIntTyID:
// Don't fild into the read destination.
DestReg = makeAnotherReg(Type::DoubleTy);
break;
{ 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
- // We need special handling for unsigned 64-bit integer sources. If the
- // input number has the "sign bit" set, then we loaded it incorrectly as a
- // negative 64-bit number. In this case, add an offset value.
- if (SrcTy == Type::ULongTy) {
+ if (SrcTy == Type::UIntTy) {
+ // If this is a cast from uint -> double, we need to be careful about if
+ // the "sign" bit is set. If so, we don't want to make a negative number,
+ // we want to make a positive number. Emit code to add an offset if the
+ // sign bit is set.
+
+ // Compute whether the sign bit is set by shifting the reg right 31 bits.
+ unsigned IsNeg = makeAnotherReg(Type::IntTy);
+ BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(SrcReg).addImm(31);
+
+ // Create a CP value that has the offset in one word and 0 in the other.
+ static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
+ 0x4f80000000000000ULL);
+ unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
+ BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(DestReg)
+ .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
+
+ } else if (SrcTy == Type::ULongTy) {
+ // We need special handling for unsigned 64-bit integer sources. If the
+ // input number has the "sign bit" set, then we loaded it incorrectly as a
+ // negative 64-bit number. In this case, add an offset value.
+
// Emit a test instruction to see if the dynamic input value was signed.
BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
/// visitVANextInst - Implement the va_next instruction...
///
-void ISel::visitVANextInst(VANextInst &I) {
+void X86ISel::visitVANextInst(VANextInst &I) {
unsigned VAList = getReg(I.getOperand(0));
unsigned DestReg = getReg(I);
BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
}
-void ISel::visitVAArgInst(VAArgInst &I) {
+void X86ISel::visitVAArgInst(VAArgInst &I) {
unsigned VAList = getReg(I.getOperand(0));
unsigned DestReg = getReg(I);
/// visitGetElementPtrInst - instruction-select GEP instructions
///
-void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
+void X86ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
// If this GEP instruction will be folded into all of its users, we don't need
// to explicitly calculate it!
- unsigned A, B, C, D;
- if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
+ X86AddressMode AM;
+ if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), AM)) {
// Check all of the users of the instruction to see if they are loads and
// stores.
bool AllWillFold = true;
///
/// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
///
-void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
- std::vector<Value*> &GEPOps,
- std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
+void X86ISel::getGEPIndex(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ std::vector<Value*> &GEPOps,
+ std::vector<const Type*> &GEPTypes,
+ X86AddressMode &AM) {
const TargetData &TD = TM.getTargetData();
// Clear out the state we are working with...
- BaseReg = 0; // No base register
- Scale = 1; // Unit scale
- IndexReg = 0; // No index register
- Disp = 0; // No displacement
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = 0; // No base register
+ AM.Scale = 1; // Unit scale
+ AM.IndexReg = 0; // No index register
+ AM.Disp = 0; // No displacement
// While there are GEP indexes that can be folded into the current address,
// keep processing them.
// structure is in memory. Since the structure index must be constant, we
// can get its value and use it to find the right byte offset from the
// StructLayout class's list of structure member offsets.
- Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
+ AM.Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
GEPOps.pop_back(); // Consume a GEP operand
GEPTypes.pop_back();
} else {
// If idx is a constant, fold it into the offset.
unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
- Disp += TypeSize*CSI->getValue();
+ AM.Disp += TypeSize*CSI->getValue();
} else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
- Disp += TypeSize*CUI->getValue();
+ AM.Disp += TypeSize*CUI->getValue();
} else {
// If the index reg is already taken, we can't handle this index.
- if (IndexReg) return;
+ if (AM.IndexReg) return;
// If this is a size that we can handle, then add the index as
switch (TypeSize) {
case 1: case 2: case 4: case 8:
// These are all acceptable scales on X86.
- Scale = TypeSize;
+ AM.Scale = TypeSize;
break;
default:
// Otherwise, we can't handle this scale
CI->getOperand(0)->getType() == Type::UIntTy)
idx = CI->getOperand(0);
- IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
+ AM.IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
}
GEPOps.pop_back(); // Consume a GEP operand
// GEPTypes is empty, which means we have a single operand left. Set it as
// the base register.
//
- assert(BaseReg == 0);
+ assert(AM.Base.Reg == 0);
-#if 0 // FIXME: TODO!
- if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
- // FIXME: When we can add FrameIndex values as the first operand, we can
- // make GEP's of allocas MUCH more efficient!
- unsigned FI = getFixedSizedAllocaFI(AI);
+ if (AllocaInst *AI = dyn_castFixedAlloca(GEPOps.back())) {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
GEPOps.pop_back();
return;
- } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- // FIXME: When addressing modes are more powerful/correct, we could load
- // global addresses directly as 32-bit immediates.
}
-#endif
- BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
+ if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps.back())) {
+ AM.GV = GV;
+ GEPOps.pop_back();
+ return;
+ }
+
+ AM.Base.Reg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
GEPOps.pop_back(); // Consume the last GEP operand
}
/// isGEPFoldable - Return true if the specified GEP can be completely
/// folded into the addressing mode of a load/store or lea instruction.
-bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
- Value *Src, User::op_iterator IdxBegin,
- User::op_iterator IdxEnd, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
+bool X86ISel::isGEPFoldable(MachineBasicBlock *MBB,
+ Value *Src, User::op_iterator IdxBegin,
+ User::op_iterator IdxEnd, X86AddressMode &AM) {
std::vector<Value*> GEPOps;
GEPOps.resize(IdxEnd-IdxBegin+1);
MachineBasicBlock::iterator IP;
if (MBB) IP = MBB->end();
- getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
+ getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
// We can fold it away iff the getGEPIndex call eliminated all operands.
return GEPOps.empty();
}
-void ISel::emitGEPOperation(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IP,
- Value *Src, User::op_iterator IdxBegin,
- User::op_iterator IdxEnd, unsigned TargetReg) {
+void X86ISel::emitGEPOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IP,
+ Value *Src, User::op_iterator IdxBegin,
+ User::op_iterator IdxEnd, unsigned TargetReg) {
const TargetData &TD = TM.getTargetData();
// If this is a getelementptr null, with all constant integer indices, just
// Keep emitting instructions until we consume the entire GEP instruction.
while (!GEPOps.empty()) {
unsigned OldSize = GEPOps.size();
- unsigned BaseReg, Scale, IndexReg, Disp;
- getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
if (GEPOps.size() != OldSize) {
// getGEPIndex consumed some of the input. Build an LEA instruction here.
unsigned NextTarget = 0;
if (!GEPOps.empty()) {
- assert(BaseReg == 0 &&
+ assert(AM.Base.Reg == 0 &&
"getGEPIndex should have left the base register open for chaining!");
- NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
+ NextTarget = AM.Base.Reg = makeAnotherReg(Type::UIntTy);
}
- if (IndexReg == 0 && Disp == 0)
- BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
+ if (AM.BaseType == X86AddressMode::RegBase &&
+ AM.IndexReg == 0 && AM.Disp == 0 && !AM.GV)
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(AM.Base.Reg);
+ else if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0 &&
+ AM.IndexReg == 0 && AM.Disp == 0)
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(AM.GV);
else
- addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
- BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg), AM);
--IP;
TargetReg = NextTarget;
} else if (GEPTypes.empty()) {
/// visitAllocaInst - If this is a fixed size alloca, allocate space from the
/// frame manager, otherwise do it the hard way.
///
-void ISel::visitAllocaInst(AllocaInst &I) {
+void X86ISel::visitAllocaInst(AllocaInst &I) {
// If this is a fixed size alloca in the entry block for the function, we
// statically stack allocate the space, so we don't need to do anything here.
//
/// visitMallocInst - Malloc instructions are code generated into direct calls
/// to the library malloc.
///
-void ISel::visitMallocInst(MallocInst &I) {
+void X86ISel::visitMallocInst(MallocInst &I) {
unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
unsigned Arg;
/// visitFreeInst - Free instructions are code gen'd to call the free libc
/// function.
///
-void ISel::visitFreeInst(FreeInst &I) {
+void X86ISel::visitFreeInst(FreeInst &I) {
std::vector<ValueRecord> Args;
Args.push_back(ValueRecord(I.getOperand(0)));
MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
/// generated code sucks but the implementation is nice and simple.
///
FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
- return new ISel(TM);
+ return new X86ISel(TM);
}