-//===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
+//===-- X86ISelSimple.cpp - A simple instruction selector for x86 ---------===//
//
// The LLVM Compiler Infrastructure
//
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
-#include "llvm/IntrinsicLowering.h"
#include "llvm/Pass.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/InstVisitor.h"
-#include "llvm/Support/CFG.h"
#include "Support/Statistic.h"
using namespace llvm;
/// size of the type, and whether or not it is floating point.
///
static inline TypeClass getClass(const Type *Ty) {
- switch (Ty->getPrimitiveID()) {
+ switch (Ty->getTypeID()) {
case Type::SByteTyID:
case Type::UByteTyID: return cByte; // Byte operands are class #0
case Type::ShortTyID:
// MBBMap - Mapping between LLVM BB -> Machine BB
std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
+ // AllocaMap - Mapping from fixed sized alloca instructions to the
+ // FrameIndex for the alloca.
+ std::map<AllocaInst*, unsigned> AllocaMap;
+
ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
/// runOnFunction - Top level implementation of instruction selection for
RegMap.clear();
MBBMap.clear();
+ AllocaMap.clear();
F = 0;
// We always build a machine code representation for the function
return true;
/// getAddressingMode - Get the addressing mode to use to address the
/// specified value. The returned value should be used with addFullAddress.
- void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
- unsigned &IndexReg, unsigned &Disp);
+ void getAddressingMode(Value *Addr, X86AddressMode &AM);
/// getGEPIndex - This is used to fold GEP instructions into X86 addressing
/// expressions.
void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
std::vector<Value*> &GEPOps,
- std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
+ std::vector<const Type*> &GEPTypes,
+ X86AddressMode &AM);
/// isGEPFoldable - Return true if the specified GEP can be completely
/// folded into the addressing mode of a load/store or lea instruction.
bool isGEPFoldable(MachineBasicBlock *MBB,
Value *Src, User::op_iterator IdxBegin,
- User::op_iterator IdxEnd, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
+ User::op_iterator IdxEnd, X86AddressMode &AM);
/// emitGEPOperation - Common code shared between visitGetElementPtrInst and
/// constant expression GEP support.
MachineBasicBlock::iterator MBBI,
Constant *C, unsigned Reg);
+ void emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
+ unsigned LHS, unsigned RHS);
+
/// makeAnotherReg - This method returns the next register number we haven't
/// yet used.
///
return F->getSSARegMap()->createVirtualRegister(RC);
}
- /// getReg - This method turns an LLVM value into a register number. This
- /// is guaranteed to produce the same register number for a particular value
- /// every time it is queried.
+ /// getReg - This method turns an LLVM value into a register number.
///
unsigned getReg(Value &V) { return getReg(&V); } // Allow references
unsigned getReg(Value *V) {
return getReg(V, BB, It);
}
unsigned getReg(Value *V, MachineBasicBlock *MBB,
- MachineBasicBlock::iterator IPt) {
- // If this operand is a constant, emit the code to copy the constant into
- // the register here...
- //
- if (Constant *C = dyn_cast<Constant>(V)) {
- unsigned Reg = makeAnotherReg(V->getType());
- copyConstantToRegister(MBB, IPt, C, Reg);
- return Reg;
- } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- unsigned Reg = makeAnotherReg(V->getType());
- // Move the address of the global into the register
- BuildMI(*MBB, IPt, X86::MOV32ri, 1, Reg).addGlobalAddress(GV);
- return Reg;
- } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
- // Do not emit noop casts at all.
- if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()))
- return getReg(CI->getOperand(0), MBB, IPt);
- }
+ MachineBasicBlock::iterator IPt);
- unsigned &Reg = RegMap[V];
- if (Reg == 0) {
- Reg = makeAnotherReg(V->getType());
- RegMap[V] = Reg;
- }
-
- return Reg;
- }
+ /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
+ /// that is to be statically allocated with the initial stack frame
+ /// adjustment.
+ unsigned getFixedSizedAllocaFI(AllocaInst *AI);
};
}
+/// dyn_castFixedAlloca - If the specified value is a fixed size alloca
+/// instruction in the entry block, return it. Otherwise, return a null
+/// pointer.
+static AllocaInst *dyn_castFixedAlloca(Value *V) {
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
+ BasicBlock *BB = AI->getParent();
+ if (isa<ConstantUInt>(AI->getArraySize()) && BB ==&BB->getParent()->front())
+ return AI;
+ }
+ return 0;
+}
+
+/// getReg - This method turns an LLVM value into a register number.
+///
+unsigned ISel::getReg(Value *V, MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator IPt) {
+ // If this operand is a constant, emit the code to copy the constant into
+ // the register here...
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ unsigned Reg = makeAnotherReg(V->getType());
+ copyConstantToRegister(MBB, IPt, C, Reg);
+ return Reg;
+ } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
+ // Do not emit noop casts at all, unless it's a double -> float cast.
+ if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()) &&
+ (CI->getType() != Type::FloatTy ||
+ CI->getOperand(0)->getType() != Type::DoubleTy))
+ return getReg(CI->getOperand(0), MBB, IPt);
+ } else if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
+ // If the alloca address couldn't be folded into the instruction addressing,
+ // emit an explicit LEA as appropriate.
+ unsigned Reg = makeAnotherReg(V->getType());
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ addFrameReference(BuildMI(*MBB, IPt, X86::LEA32r, 4, Reg), FI);
+ return Reg;
+ }
+
+ unsigned &Reg = RegMap[V];
+ if (Reg == 0) {
+ Reg = makeAnotherReg(V->getType());
+ RegMap[V] = Reg;
+ }
+
+ return Reg;
+}
+
+/// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
+/// that is to be statically allocated with the initial stack frame
+/// adjustment.
+unsigned ISel::getFixedSizedAllocaFI(AllocaInst *AI) {
+ // Already computed this?
+ std::map<AllocaInst*, unsigned>::iterator I = AllocaMap.lower_bound(AI);
+ if (I != AllocaMap.end() && I->first == AI) return I->second;
+
+ const Type *Ty = AI->getAllocatedType();
+ ConstantUInt *CUI = cast<ConstantUInt>(AI->getArraySize());
+ unsigned TySize = TM.getTargetData().getTypeSize(Ty);
+ TySize *= CUI->getValue(); // Get total allocated size...
+ unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
+
+ // Create a new stack object using the frame manager...
+ int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
+ AllocaMap.insert(I, std::make_pair(AI, FrameIdx));
+ return FrameIdx;
+}
+
+
/// copyConstantToRegister - Output the instructions required to put the
/// specified constant into the specified register.
///
return;
default:
- std::cerr << "Offending expr: " << C << "\n";
+ std::cerr << "Offending expr: " << *C << "\n";
assert(0 && "Constant expression not yet handled!\n");
}
}
} else if (isa<ConstantPointerNull>(C)) {
// Copy zero (null pointer) to the register.
BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
- } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
- BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(CPR->getValue());
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(GV);
} else {
- std::cerr << "Offending constant: " << C << "\n";
+ std::cerr << "Offending constant: " << *C << "\n";
assert(0 && "Type not handled yet!");
}
}
/// the current one.
///
void ISel::SelectPHINodes() {
- const TargetInstrInfo &TII = TM.getInstrInfo();
+ const TargetInstrInfo &TII = *TM.getInstrInfo();
const Function &LF = *F->getFunction(); // The LLVM function...
for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
const BasicBlock *BB = I;
// If this is a constant or GlobalValue, we may have to insert code
// into the basic block to compute it into a virtual register.
- if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
- if (isa<ConstantExpr>(Val)) {
- // Because we don't want to clobber any values which might be in
- // physical registers with the computation of this constant (which
- // might be arbitrarily complex if it is a constant expression),
- // just insert the computation at the top of the basic block.
- MachineBasicBlock::iterator PI = PredMBB->begin();
-
- // Skip over any PHI nodes though!
- while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
- ++PI;
-
- ValReg = getReg(Val, PredMBB, PI);
- } else {
- // Simple constants get emitted at the end of the basic block,
- // before any terminator instructions. We "know" that the code to
- // move a constant into a register will never clobber any flags.
- ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
- }
+ if ((isa<Constant>(Val) && !isa<ConstantExpr>(Val))) {
+ // Simple constants get emitted at the end of the basic block,
+ // before any terminator instructions. We "know" that the code to
+ // move a constant into a register will never clobber any flags.
+ ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
} else {
- ValReg = getReg(Val);
+ // Because we don't want to clobber any values which might be in
+ // physical registers with the computation of this constant (which
+ // might be arbitrarily complex if it is a constant expression),
+ // just insert the computation at the top of the basic block.
+ MachineBasicBlock::iterator PI = PredMBB->begin();
+
+ // Skip over any PHI nodes though!
+ while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
+ ++PI;
+
+ ValReg = getReg(Val, PredMBB, PI);
}
// Remember that we inserted a value for this PHI for this predecessor
/// Note that this kill instruction will eventually be eliminated when
/// restrictions in the stackifier are relaxed.
///
-static bool RequiresFPRegKill(const BasicBlock *BB) {
+static bool RequiresFPRegKill(const MachineBasicBlock *MBB) {
#if 0
+ const BasicBlock *BB = MBB->getBasicBlock ();
for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
const BasicBlock *Succ = *SI;
pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
// If we haven't found an FP register use or def in this basic block, check
// to see if any of our successors has an FP PHI node, which will cause a
// copy to be inserted into this block.
- for (succ_const_iterator SI = succ_begin(BB->getBasicBlock()),
- E = succ_end(BB->getBasicBlock()); SI != E; ++SI) {
- MachineBasicBlock *SBB = MBBMap[*SI];
+ for (MachineBasicBlock::const_succ_iterator SI = BB->succ_begin(),
+ SE = BB->succ_end(); SI != SE; ++SI) {
+ MachineBasicBlock *SBB = *SI;
for (MachineBasicBlock::iterator I = SBB->begin();
I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
UsesFPReg:
// Okay, this block uses an FP register. If the block has successors (ie,
// it's not an unwind/return), insert the FP_REG_KILL instruction.
- if (BB->getBasicBlock()->getTerminator()->getNumSuccessors() &&
- RequiresFPRegKill(BB->getBasicBlock())) {
+ if (BB->succ_size () && RequiresFPRegKill(BB)) {
BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
++NumFPKill;
}
}
+void ISel::getAddressingMode(Value *Addr, X86AddressMode &AM) {
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = 0; AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
+ if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
+ AM))
+ return;
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
+ if (CE->getOpcode() == Instruction::GetElementPtr)
+ if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
+ AM))
+ return;
+ } else if (AllocaInst *AI = dyn_castFixedAlloca(Addr)) {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
+ return;
+ }
+
+ // If it's not foldable, reset addr mode.
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = getReg(Addr);
+ AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
+}
+
// canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
// it into the conditional branch or select instruction which is the only user
// of the cc instruction. This is the case if the conditional branch is the
-// only user of the setcc, and if the setcc is in the same basic block as the
-// conditional branch. We also don't handle long arguments below, so we reject
-// them here as well.
+// only user of the setcc. We also don't handle long arguments below, so we
+// reject them here as well.
//
static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
if (SCI->hasOneUse()) {
Instruction *User = cast<Instruction>(SCI->use_back());
if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
- SCI->getParent() == User->getParent() &&
(getClassB(SCI->getOperand(0)->getType()) != cLong ||
SCI->getOpcode() == Instruction::SetEQ ||
SCI->getOpcode() == Instruction::SetNE))
X86::SETSr, X86::SETNSr },
};
+/// emitUCOMr - In the future when we support processors before the P6, this
+/// wraps the logic for emitting an FUCOMr vs FUCOMIr.
+void ISel::emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
+ unsigned LHS, unsigned RHS) {
+ if (0) { // for processors prior to the P6
+ BuildMI(*MBB, IP, X86::FUCOMr, 2).addReg(LHS).addReg(RHS);
+ BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
+ BuildMI(*MBB, IP, X86::SAHF, 1);
+ } else {
+ BuildMI(*MBB, IP, X86::FUCOMIr, 2).addReg(LHS).addReg(RHS);
+ }
+}
+
// EmitComparison - This function emits a comparison of the two operands,
// returning the extended setcc code to use.
unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
unsigned Op0r = getReg(Op0, MBB, IP);
// Special case handling of: cmp R, i
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
+ if (isa<ConstantPointerNull>(Op1)) {
+ if (OpNum < 2) // seteq/setne -> test
+ BuildMI(*MBB, IP, X86::TEST32rr, 2).addReg(Op0r).addReg(Op0r);
+ else
+ BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(0);
+ return OpNum;
+
+ } else if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
if (Class == cByte || Class == cShort || Class == cInt) {
unsigned Op1v = CI->getRawValue();
// each, then uses a conditional move to handle the overflow case. For
// example, a setlt for long would generate code like this:
//
- // AL = lo(op1) < lo(op2) // Signedness depends on operands
- // BL = hi(op1) < hi(op2) // Always unsigned comparison
- // dest = hi(op1) == hi(op2) ? AL : BL;
+ // AL = lo(op1) < lo(op2) // Always unsigned comparison
+ // BL = hi(op1) < hi(op2) // Signedness depends on operands
+ // dest = hi(op1) == hi(op2) ? BL : AL;
//
// FIXME: This would be much better if we had hierarchical register
BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
break;
case cFP:
- if (0) { // for processors prior to the P6
- BuildMI(*MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
- BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
- BuildMI(*MBB, IP, X86::SAHF, 1);
- } else {
- BuildMI(*MBB, IP, X86::FpUCOMI, 2).addReg(Op0r).addReg(Op1r);
- }
+ emitUCOMr(MBB, IP, Op0r, Op1r);
break;
case cLong:
//
// AL = lo(op1) < lo(op2) // Signedness depends on operands
// BL = hi(op1) < hi(op2) // Always unsigned comparison
- // dest = hi(op1) == hi(op2) ? AL : BL;
+ // dest = hi(op1) == hi(op2) ? BL : AL;
//
// FIXME: This would be much better if we had hierarchical register
/// operand, in the specified target register.
///
void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
- bool isUnsigned = VR.Ty->isUnsigned();
+ bool isUnsigned = VR.Ty->isUnsigned() || VR.Ty == Type::BoolTy;
Value *Val = VR.Val;
const Type *Ty = VR.Ty;
// copy.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
int TheVal = CI->getRawValue() & 0xFFFFFFFF;
- BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
+ BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
return;
}
}
/// just make a fall-through (but we don't currently).
///
void ISel::visitBranchInst(BranchInst &BI) {
+ // Update machine-CFG edges
+ BB->addSuccessor (MBBMap[BI.getSuccessor(0)]);
+ if (BI.isConditional())
+ BB->addSuccessor (MBBMap[BI.getSuccessor(1)]);
+
BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
if (!BI.isConditional()) { // Unconditional branch?
if (BI.getSuccessor(0) != NextBB)
- BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
+ BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
return;
}
BuildMI(BB, X86::TEST8rr, 2).addReg(condReg).addReg(condReg);
if (BI.getSuccessor(1) == NextBB) {
if (BI.getSuccessor(0) != NextBB)
- BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
+ BuildMI(BB, X86::JNE, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
} else {
- BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1));
+ BuildMI(BB, X86::JE, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
if (BI.getSuccessor(0) != NextBB)
- BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
+ BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
}
return;
}
};
if (BI.getSuccessor(0) != NextBB) {
- BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
+ BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
+ .addMBB(MBBMap[BI.getSuccessor(0)]);
if (BI.getSuccessor(1) != NextBB)
- BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1));
+ BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
} else {
// Change to the inverse condition...
if (BI.getSuccessor(1) != NextBB) {
OpNum ^= 1;
- BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1));
+ BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
+ .addMBB(MBBMap[BI.getSuccessor(1)]);
}
}
}
unsigned ArgReg;
switch (getClassB(Args[i].Ty)) {
case cByte:
+ if (Args[i].Val && isa<ConstantBool>(Args[i].Val)) {
+ addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
+ .addImm(Args[i].Val == ConstantBool::True);
+ break;
+ }
+ // FALL THROUGH
case cShort:
if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
// Zero/Sign extend constant, then stuff into memory.
unsigned Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
X86::ESP, ArgOffset).addImm(Val);
+ } else if (Args[i].Val && isa<ConstantPointerNull>(Args[i].Val)) {
+ addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
+ X86::ESP, ArgOffset).addImm(0);
} else {
ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
}
-
/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
/// function, lowering any calls to unknown intrinsic functions into the
/// equivalent LLVM code.
case Intrinsic::frameaddress:
case Intrinsic::memcpy:
case Intrinsic::memset:
+ case Intrinsic::isunordered:
case Intrinsic::readport:
case Intrinsic::writeport:
// We directly implement these intrinsics
// On X86, memory operations are in-order. Lower this intrinsic
// into a volatile load.
Instruction *Before = CI->getPrev();
- LoadInst * LI = new LoadInst (CI->getOperand(1), "", true, CI);
- CI->replaceAllUsesWith (LI);
- BB->getInstList().erase (CI);
- if (Before) { // Move iterator to instruction after call
- I = Before; ++I;
- } else {
- I = BB->begin();
- }
+ LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI);
+ CI->replaceAllUsesWith(LI);
+ BB->getInstList().erase(CI);
break;
}
case Intrinsic::writeio: {
// On X86, memory operations are in-order. Lower this intrinsic
// into a volatile store.
Instruction *Before = CI->getPrev();
- StoreInst * LI = new StoreInst (CI->getOperand(1),
- CI->getOperand(2), true, CI);
- CI->replaceAllUsesWith (LI);
- BB->getInstList().erase (CI);
- if (Before) { // Move iterator to instruction after call
- I = Before; ++I;
- } else {
- I = BB->begin();
- }
+ StoreInst *LI = new StoreInst(CI->getOperand(1),
+ CI->getOperand(2), true, CI);
+ CI->replaceAllUsesWith(LI);
+ BB->getInstList().erase(CI);
break;
}
default:
Instruction *Before = CI->getPrev();
TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
if (Before) { // Move iterator to instruction after call
- I = Before; ++I;
+ I = Before; ++I;
} else {
I = BB->begin();
}
}
-
}
void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
}
return;
+ case Intrinsic::isunordered:
+ TmpReg1 = getReg(CI.getOperand(1));
+ TmpReg2 = getReg(CI.getOperand(2));
+ emitUCOMr(BB, BB->end(), TmpReg2, TmpReg1);
+ TmpReg2 = getReg(CI);
+ BuildMI(BB, X86::SETPr, 0, TmpReg2);
+ return;
+
case Intrinsic::memcpy: {
assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
unsigned Align = 1;
unsigned DestReg = getReg(B);
MachineBasicBlock::iterator MI = BB->end();
Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
+ unsigned Class = getClassB(B.getType());
// Special case: op Reg, load [mem]
- if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
+ if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1) && Class != cLong &&
+ Op0->hasOneUse() &&
+ isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B))
if (!B.swapOperands())
std::swap(Op0, Op1); // Make sure any loads are in the RHS.
- unsigned Class = getClassB(B.getType());
- if (isa<LoadInst>(Op1) && Class != cLong &&
+ if (isa<LoadInst>(Op1) && Class != cLong && Op1->hasOneUse() &&
isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op1), B)) {
unsigned Opcode;
Opcode = OpcodeTab[OperatorClass][Ty == Type::DoubleTy];
}
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
-
unsigned Op0r = getReg(Op0);
- addFullAddress(BuildMI(BB, Opcode, 2, DestReg).addReg(Op0r),
- BaseReg, Scale, IndexReg, Disp);
+ if (AllocaInst *AI =
+ dyn_castFixedAlloca(cast<LoadInst>(Op1)->getOperand(0))) {
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), FI);
+
+ } else {
+ X86AddressMode AM;
+ getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), AM);
+
+ addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), AM);
+ }
return;
}
assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = Ty == Type::FloatTy ? X86::FSUBR32m : X86::FSUBR64m;
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
-
unsigned Op1r = getReg(Op1);
- addFullAddress(BuildMI(BB, Opcode, 2, DestReg).addReg(Op1r),
- BaseReg, Scale, IndexReg, Disp);
+ if (AllocaInst *AI =
+ dyn_castFixedAlloca(cast<LoadInst>(Op0)->getOperand(0))) {
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), FI);
+ } else {
+ X86AddressMode AM;
+ getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), AM);
+
+ addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), AM);
+ }
return;
}
return;
}
- // sub 0, X -> neg X
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
- if (OperatorClass == 1 && CI->isNullValue()) {
- unsigned op1Reg = getReg(Op1, MBB, IP);
+ if (OperatorClass == 1) {
static unsigned const NEGTab[] = {
X86::NEG8r, X86::NEG16r, X86::NEG32r, 0, X86::NEG32r
};
- BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
+
+ // sub 0, X -> neg X
+ if (CI->isNullValue()) {
+ unsigned op1Reg = getReg(Op1, MBB, IP);
+ BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
- if (Class == cLong) {
- // We just emitted: Dl = neg Sl
- // Now emit : T = addc Sh, 0
- // : Dh = neg T
- unsigned T = makeAnotherReg(Type::IntTy);
- BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
- BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
+ if (Class == cLong) {
+ // We just emitted: Dl = neg Sl
+ // Now emit : T = addc Sh, 0
+ // : Dh = neg T
+ unsigned T = makeAnotherReg(Type::IntTy);
+ BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
+ BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
+ }
+ return;
+ } else if (Op1->hasOneUse() && Class != cLong) {
+ // sub C, X -> tmp = neg X; DestReg = add tmp, C. This is better
+ // than copying C into a temporary register, because of register
+ // pressure (tmp and destreg can share a register.
+ static unsigned const ADDRITab[] = {
+ X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri
+ };
+ unsigned op1Reg = getReg(Op1, MBB, IP);
+ unsigned Tmp = makeAnotherReg(Op0->getType());
+ BuildMI(*MBB, IP, NEGTab[Class], 1, Tmp).addReg(op1Reg);
+ BuildMI(*MBB, IP, ADDRITab[Class], 2,
+ DestReg).addReg(Tmp).addImm(CI->getRawValue());
+ return;
}
- return;
}
// Special case: op Reg, <const int>
// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
// returns zero when the input is not exactly a power of two.
static unsigned ExactLog2(unsigned Val) {
- if (Val == 0) return 0;
+ if (Val == 0 || (Val & (Val-1))) return 0;
unsigned Count = 0;
while (Val != 1) {
- if (Val & 1) return 0;
Val >>= 1;
++Count;
}
unsigned op0Reg, unsigned ConstRHS) {
static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
+ static const unsigned ADDrrTab[] = {X86::ADD8rr, X86::ADD16rr, X86::ADD32rr};
+ static const unsigned NEGrTab[] = {X86::NEG8r , X86::NEG16r , X86::NEG32r };
unsigned Class = getClass(DestTy);
-
- if (ConstRHS == 0) {
+ unsigned TmpReg;
+
+ // Handle special cases here.
+ switch (ConstRHS) {
+ case -2:
+ TmpReg = makeAnotherReg(DestTy);
+ BuildMI(*MBB, IP, NEGrTab[Class], 1, TmpReg).addReg(op0Reg);
+ BuildMI(*MBB, IP, ADDrrTab[Class], 1,DestReg).addReg(TmpReg).addReg(TmpReg);
+ return;
+ case -1:
+ BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(op0Reg);
+ return;
+ case 0:
BuildMI(*MBB, IP, MOVriTab[Class], 1, DestReg).addImm(0);
return;
- } else if (ConstRHS == 1) {
+ case 1:
BuildMI(*MBB, IP, MOVrrTab[Class], 1, DestReg).addReg(op0Reg);
return;
+ case 2:
+ BuildMI(*MBB, IP, ADDrrTab[Class], 1,DestReg).addReg(op0Reg).addReg(op0Reg);
+ return;
+ case 3:
+ case 5:
+ case 9:
+ if (Class == cInt) {
+ X86AddressMode AM;
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = op0Reg;
+ AM.Scale = ConstRHS-1;
+ AM.IndexReg = op0Reg;
+ AM.Disp = 0;
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg), AM);
+ return;
+ }
+ case -3:
+ case -5:
+ case -9:
+ if (Class == cInt) {
+ TmpReg = makeAnotherReg(DestTy);
+ X86AddressMode AM;
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = op0Reg;
+ AM.Scale = -ConstRHS-1;
+ AM.IndexReg = op0Reg;
+ AM.Disp = 0;
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TmpReg), AM);
+ BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(TmpReg);
+ return;
+ }
}
// If the element size is exactly a power of 2, use a shift to get it.
switch (Class) {
default: assert(0 && "Unknown class for this function!");
case cByte:
- BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL8ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
case cShort:
- BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL16ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
case cInt:
BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
}
}
+
+ // If the element size is a negative power of 2, use a shift/neg to get it.
+ if (unsigned Shift = ExactLog2(-ConstRHS)) {
+ TmpReg = makeAnotherReg(DestTy);
+ BuildMI(*MBB, IP, NEGrTab[Class], 1, TmpReg).addReg(op0Reg);
+ switch (Class) {
+ default: assert(0 && "Unknown class for this function!");
+ case cByte:
+ BuildMI(*MBB, IP, X86::SHL8ri,2, DestReg).addReg(TmpReg).addImm(Shift-1);
+ return;
+ case cShort:
+ BuildMI(*MBB, IP, X86::SHL16ri,2, DestReg).addReg(TmpReg).addImm(Shift-1);
+ return;
+ case cInt:
+ BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(TmpReg).addImm(Shift-1);
+ return;
+ }
+ }
if (Class == cShort) {
BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
}
// Most general case, emit a normal multiply...
- unsigned TmpReg = makeAnotherReg(DestTy);
+ TmpReg = makeAnotherReg(DestTy);
BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
// Emit a MUL to multiply the register holding the index by
assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = Ty == Type::FloatTy ? X86::FMUL32m : X86::FMUL64m;
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(LI->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
-
unsigned Op0r = getReg(Op0);
- addFullAddress(BuildMI(BB, Opcode, 2, ResultReg).addReg(Op0r),
- BaseReg, Scale, IndexReg, Disp);
+ if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
+ } else {
+ X86AddressMode AM;
+ getAddressingMode(LI->getOperand(0), AM);
+
+ addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
+ }
return;
}
}
assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = Ty == Type::FloatTy ? X86::FDIV32m : X86::FDIV64m;
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(LI->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
-
unsigned Op0r = getReg(Op0);
- addFullAddress(BuildMI(BB, Opcode, 2, ResultReg).addReg(Op0r),
- BaseReg, Scale, IndexReg, Disp);
+ if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
+ } else {
+ X86AddressMode AM;
+ getAddressingMode(LI->getOperand(0), AM);
+
+ addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
+ }
return;
}
assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = Ty == Type::FloatTy ? X86::FDIVR32m : X86::FDIVR64m;
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(LI->getOperand(0), BaseReg,
- Scale, IndexReg, Disp);
-
unsigned Op1r = getReg(Op1);
- addFullAddress(BuildMI(BB, Opcode, 2, ResultReg).addReg(Op1r),
- BaseReg, Scale, IndexReg, Disp);
+ if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), FI);
+ } else {
+ X86AddressMode AM;
+ getAddressingMode(LI->getOperand(0), AM);
+ addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), AM);
+ }
return;
}
}
default: assert(0 && "Unknown class!");
}
- static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
- static const unsigned SarOpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
+ static const unsigned NEGOpcode[] = { X86::NEG8r, X86::NEG16r, X86::NEG32r };
+ static const unsigned SAROpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
+ static const unsigned SHROpcode[]={ X86::SHR8ri, X86::SHR16ri, X86::SHR32ri };
+ static const unsigned ADDOpcode[]={ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
+
+ // Special case signed division by power of 2.
+ if (isDiv)
+ if (ConstantSInt *CI = dyn_cast<ConstantSInt>(Op1)) {
+ assert(Class != cLong && "This doesn't handle 64-bit divides!");
+ int V = CI->getValue();
+
+ if (V == 1) { // X /s 1 => X
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(Op0Reg);
+ return;
+ }
+
+ if (V == -1) { // X /s -1 => -X
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(Op0Reg);
+ return;
+ }
+
+ bool isNeg = false;
+ if (V < 0) { // Not a positive power of 2?
+ V = -V;
+ isNeg = true; // Maybe it's a negative power of 2.
+ }
+ if (unsigned Log = ExactLog2(V)) {
+ --Log;
+ unsigned Op0Reg = getReg(Op0, BB, IP);
+ unsigned TmpReg = makeAnotherReg(Op0->getType());
+ if (Log != 1)
+ BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg)
+ .addReg(Op0Reg).addImm(Log-1);
+ else
+ BuildMI(*BB, IP, MovOpcode[Class], 1, TmpReg).addReg(Op0Reg);
+ unsigned TmpReg2 = makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, SHROpcode[Class], 2, TmpReg2)
+ .addReg(TmpReg).addImm(32-Log);
+ unsigned TmpReg3 = makeAnotherReg(Op0->getType());
+ BuildMI(*BB, IP, ADDOpcode[Class], 2, TmpReg3)
+ .addReg(Op0Reg).addReg(TmpReg2);
+
+ unsigned TmpReg4 = isNeg ? makeAnotherReg(Op0->getType()) : ResultReg;
+ BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg4)
+ .addReg(Op0Reg).addImm(Log);
+ if (isNeg)
+ BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg4);
+ return;
+ }
+ }
+
+ static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
{ X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
};
- bool isSigned = Ty->isSigned();
unsigned Reg = Regs[Class];
unsigned ExtReg = ExtRegs[Class];
unsigned Op1Reg = getReg(Op1, BB, IP);
BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
- if (isSigned) {
+ if (Ty->isSigned()) {
// Emit a sign extension instruction...
unsigned ShiftResult = makeAnotherReg(Op0->getType());
- BuildMI(*BB, IP, SarOpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
+ BuildMI(*BB, IP, SAROpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
BuildMI(*BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
+
+ // Emit the appropriate divide or remainder instruction...
+ BuildMI(*BB, IP, DivOpcode[1][Class], 1).addReg(Op1Reg);
} else {
// If unsigned, emit a zeroing instruction... (reg = 0)
BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
- }
- // Emit the appropriate divide or remainder instruction...
- BuildMI(*BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
+ // Emit the appropriate divide or remainder instruction...
+ BuildMI(*BB, IP, DivOpcode[0][Class], 1).addReg(Op1Reg);
+ }
// Figure out which register we want to pick the result out of...
unsigned DestReg = isDiv ? Reg : ExtReg;
}
-void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
- unsigned &IndexReg, unsigned &Disp) {
- BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
- if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
- BaseReg, Scale, IndexReg, Disp))
- return;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
- if (CE->getOpcode() == Instruction::GetElementPtr)
- if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
- BaseReg, Scale, IndexReg, Disp))
- return;
- }
-
- // If it's not foldable, reset addr mode.
- BaseReg = getReg(Addr);
- Scale = 1; IndexReg = 0; Disp = 0;
-}
-
-
/// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
/// instruction. The load and store instructions are the only place where we
/// need to worry about the memory layout of the target machine.
case Instruction::Cast:
// If this is a cast from a signed-integer type to a floating point type,
// fold the cast here.
- if (getClass(User->getType()) == cFP &&
+ if (getClassB(User->getType()) == cFP &&
(I.getType() == Type::ShortTy || I.getType() == Type::IntTy ||
I.getType() == Type::LongTy)) {
unsigned DestReg = getReg(User);
static const unsigned Opcode[] = {
0/*BYTE*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m
};
- unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
- getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
- addFullAddress(BuildMI(BB, Opcode[Class], 5, DestReg),
- BaseReg, Scale, IndexReg, Disp);
+
+ if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ addFrameReference(BuildMI(BB, Opcode[Class], 4, DestReg), FI);
+ } else {
+ X86AddressMode AM;
+ getAddressingMode(I.getOperand(0), AM);
+ addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg), AM);
+ }
return;
} else {
User = 0;
// Okay, we found a user. If the load is the first operand and there is
// no second operand load, reverse the operand ordering. Note that this
// can fail for a subtract (ie, no change will be made).
+ bool Swapped = false;
if (!isa<LoadInst>(User->getOperand(1)))
- cast<BinaryOperator>(User)->swapOperands();
+ Swapped = !cast<BinaryOperator>(User)->swapOperands();
// Okay, now that everything is set up, if this load is used by the second
// operand, and if there are no instructions that invalidate the load
User->getOpcode() == Instruction::Div) &&
isSafeToFoldLoadIntoInstruction(I, *User))
return; // Eliminate the load!
- }
- }
- unsigned DestReg = getReg(I);
- unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
- getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
-
- if (Class == cLong) {
- addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
- BaseReg, Scale, IndexReg, Disp);
- addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
- BaseReg, Scale, IndexReg, Disp+4);
- return;
+ // If we swapped the operands to the instruction, but couldn't fold the
+ // load anyway, swap them back. We don't want to break add X, int
+ // folding.
+ if (Swapped) cast<BinaryOperator>(User)->swapOperands();
+ }
}
static const unsigned Opcodes[] = {
- X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m
+ X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m, X86::MOV32rm
};
unsigned Opcode = Opcodes[Class];
if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
- addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
- BaseReg, Scale, IndexReg, Disp);
+
+ unsigned DestReg = getReg(I);
+
+ if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
+ unsigned FI = getFixedSizedAllocaFI(AI);
+ if (Class == cLong) {
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg), FI);
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), FI, 4);
+ } else {
+ addFrameReference(BuildMI(BB, Opcode, 4, DestReg), FI);
+ }
+ } else {
+ X86AddressMode AM;
+ getAddressingMode(I.getOperand(0), AM);
+
+ if (Class == cLong) {
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg), AM);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), AM);
+ } else {
+ addFullAddress(BuildMI(BB, Opcode, 4, DestReg), AM);
+ }
+ }
}
/// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
/// instruction.
///
void ISel::visitStoreInst(StoreInst &I) {
- unsigned BaseReg, Scale, IndexReg, Disp;
- getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getAddressingMode(I.getOperand(1), AM);
const Type *ValTy = I.getOperand(0)->getType();
unsigned Class = getClassB(ValTy);
if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
uint64_t Val = CI->getRawValue();
if (Class == cLong) {
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
- addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
- BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val & ~0U);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val>>32);
} else {
static const unsigned Opcodes[] = {
X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
};
unsigned Opcode = Opcodes[Class];
- addFullAddress(BuildMI(BB, Opcode, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(Val);
+ addFullAddress(BuildMI(BB, Opcode, 5), AM).addImm(Val);
}
+ } else if (isa<ConstantPointerNull>(I.getOperand(0))) {
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(0);
} else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
- addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
- BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
- } else {
- if (Class == cLong) {
- unsigned ValReg = getReg(I.getOperand(0));
- addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
- BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
- addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
- BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
+ addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CB->getValue());
+ } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) {
+ // Store constant FP values with integer instructions to avoid having to
+ // load the constants from the constant pool then do a store.
+ if (CFP->getType() == Type::FloatTy) {
+ union {
+ unsigned I;
+ float F;
+ } V;
+ V.F = CFP->getValue();
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(V.I);
} else {
- unsigned ValReg = getReg(I.getOperand(0));
- static const unsigned Opcodes[] = {
- X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
- };
- unsigned Opcode = Opcodes[Class];
- if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
- addFullAddress(BuildMI(BB, Opcode, 1+4),
- BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
+ union {
+ uint64_t I;
+ double F;
+ } V;
+ V.F = CFP->getValue();
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm((unsigned)V.I);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(
+ unsigned(V.I >> 32));
}
+
+ } else if (Class == cLong) {
+ unsigned ValReg = getReg(I.getOperand(0));
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg);
+ AM.Disp += 4;
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg+1);
+ } else {
+ unsigned ValReg = getReg(I.getOperand(0));
+ static const unsigned Opcodes[] = {
+ X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
+ };
+ unsigned Opcode = Opcodes[Class];
+ if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
+
+ addFullAddress(BuildMI(BB, Opcode, 1+4), AM).addReg(ValReg);
}
}
unsigned DestClass = getClassB(CI.getType());
// Noop casts are not emitted: getReg will return the source operand as the
// register to use for any uses of the noop cast.
- if (DestClass == SrcClass)
- return;
+ if (DestClass == SrcClass) {
+ // The only detail in this plan is that casts from double -> float are
+ // truncating operations that we have to codegen through memory (despite
+ // the fact that the source/dest registers are the same class).
+ if (CI.getType() != Type::FloatTy || Op->getType() != Type::DoubleTy)
+ return;
+ }
// If this is a cast from a 32-bit integer to a Long type, and the only uses
// of the case are GEP instructions, then the cast does not need to be
{ X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
};
- bool isUnsigned = SrcTy->isUnsigned();
+ bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy;
BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
DestReg).addReg(SrcReg);
const Type *PromoteType = 0;
unsigned PromoteOpcode = 0;
unsigned RealDestReg = DestReg;
- switch (SrcTy->getPrimitiveID()) {
+ switch (SrcTy->getTypeID()) {
case Type::BoolTyID:
case Type::SByteTyID:
// We don't have the facilities for directly loading byte sized data from
unsigned DestReg = getReg(I);
unsigned Size;
- switch (I.getArgType()->getPrimitiveID()) {
+ switch (I.getArgType()->getTypeID()) {
default:
std::cerr << I;
assert(0 && "Error: bad type for va_next instruction!");
unsigned VAList = getReg(I.getOperand(0));
unsigned DestReg = getReg(I);
- switch (I.getType()->getPrimitiveID()) {
+ switch (I.getType()->getTypeID()) {
default:
std::cerr << I;
assert(0 && "Error: bad type for va_next instruction!");
void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
// If this GEP instruction will be folded into all of its users, we don't need
// to explicitly calculate it!
- unsigned A, B, C, D;
- if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
+ X86AddressMode AM;
+ if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), AM)) {
// Check all of the users of the instruction to see if they are loads and
// stores.
bool AllWillFold = true;
///
void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
std::vector<Value*> &GEPOps,
- std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
+ std::vector<const Type*> &GEPTypes,
+ X86AddressMode &AM) {
const TargetData &TD = TM.getTargetData();
// Clear out the state we are working with...
- BaseReg = 0; // No base register
- Scale = 1; // Unit scale
- IndexReg = 0; // No index register
- Disp = 0; // No displacement
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = 0; // No base register
+ AM.Scale = 1; // Unit scale
+ AM.IndexReg = 0; // No index register
+ AM.Disp = 0; // No displacement
// While there are GEP indexes that can be folded into the current address,
// keep processing them.
// structure is in memory. Since the structure index must be constant, we
// can get its value and use it to find the right byte offset from the
// StructLayout class's list of structure member offsets.
- Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
+ AM.Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
GEPOps.pop_back(); // Consume a GEP operand
GEPTypes.pop_back();
} else {
// If idx is a constant, fold it into the offset.
unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
- Disp += TypeSize*CSI->getValue();
+ AM.Disp += TypeSize*CSI->getValue();
} else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
- Disp += TypeSize*CUI->getValue();
+ AM.Disp += TypeSize*CUI->getValue();
} else {
// If the index reg is already taken, we can't handle this index.
- if (IndexReg) return;
+ if (AM.IndexReg) return;
// If this is a size that we can handle, then add the index as
switch (TypeSize) {
case 1: case 2: case 4: case 8:
// These are all acceptable scales on X86.
- Scale = TypeSize;
+ AM.Scale = TypeSize;
break;
default:
// Otherwise, we can't handle this scale
CI->getOperand(0)->getType() == Type::UIntTy)
idx = CI->getOperand(0);
- IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
+ AM.IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
}
GEPOps.pop_back(); // Consume a GEP operand
}
}
- // GEPTypes is empty, which means we have a single operand left. See if we
- // can set it as the base register.
+ // GEPTypes is empty, which means we have a single operand left. Set it as
+ // the base register.
//
- // FIXME: When addressing modes are more powerful/correct, we could load
- // global addresses directly as 32-bit immediates.
- assert(BaseReg == 0);
- BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
+ assert(AM.Base.Reg == 0);
+
+ if (AllocaInst *AI = dyn_castFixedAlloca(GEPOps.back())) {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
+ GEPOps.pop_back();
+ return;
+ }
+
+#if 0 // FIXME: TODO!
+ if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ // FIXME: When addressing modes are more powerful/correct, we could load
+ // global addresses directly as 32-bit immediates.
+ }
+#endif
+
+ AM.Base.Reg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
GEPOps.pop_back(); // Consume the last GEP operand
}
/// folded into the addressing mode of a load/store or lea instruction.
bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
Value *Src, User::op_iterator IdxBegin,
- User::op_iterator IdxEnd, unsigned &BaseReg,
- unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
- if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
- Src = CPR->getValue();
+ User::op_iterator IdxEnd, X86AddressMode &AM) {
std::vector<Value*> GEPOps;
GEPOps.resize(IdxEnd-IdxBegin+1);
GEPOps[0] = Src;
std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
- std::vector<const Type*> GEPTypes;
- GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
- gep_type_end(Src->getType(), IdxBegin, IdxEnd));
+ std::vector<const Type*>
+ GEPTypes(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
+ gep_type_end(Src->getType(), IdxBegin, IdxEnd));
MachineBasicBlock::iterator IP;
if (MBB) IP = MBB->end();
- getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
+ getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
// We can fold it away iff the getGEPIndex call eliminated all operands.
return GEPOps.empty();
Value *Src, User::op_iterator IdxBegin,
User::op_iterator IdxEnd, unsigned TargetReg) {
const TargetData &TD = TM.getTargetData();
- if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
- Src = CPR->getValue();
+
+ // If this is a getelementptr null, with all constant integer indices, just
+ // replace it with TargetReg = 42.
+ if (isa<ConstantPointerNull>(Src)) {
+ User::op_iterator I = IdxBegin;
+ for (; I != IdxEnd; ++I)
+ if (!isa<ConstantInt>(*I))
+ break;
+ if (I == IdxEnd) { // All constant indices
+ unsigned Offset = TD.getIndexedOffset(Src->getType(),
+ std::vector<Value*>(IdxBegin, IdxEnd));
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addImm(Offset);
+ return;
+ }
+ }
std::vector<Value*> GEPOps;
GEPOps.resize(IdxEnd-IdxBegin+1);
// Keep emitting instructions until we consume the entire GEP instruction.
while (!GEPOps.empty()) {
unsigned OldSize = GEPOps.size();
- unsigned BaseReg, Scale, IndexReg, Disp;
- getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
+ X86AddressMode AM;
+ getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
if (GEPOps.size() != OldSize) {
// getGEPIndex consumed some of the input. Build an LEA instruction here.
unsigned NextTarget = 0;
if (!GEPOps.empty()) {
- assert(BaseReg == 0 &&
+ assert(AM.Base.Reg == 0 &&
"getGEPIndex should have left the base register open for chaining!");
- NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
+ NextTarget = AM.Base.Reg = makeAnotherReg(Type::UIntTy);
}
- if (IndexReg == 0 && Disp == 0)
- BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
+ if (AM.BaseType == X86AddressMode::RegBase &&
+ AM.IndexReg == 0 && AM.Disp == 0)
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(AM.Base.Reg);
else
- addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
- BaseReg, Scale, IndexReg, Disp);
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg), AM);
--IP;
TargetReg = NextTarget;
} else if (GEPTypes.empty()) {
}
}
-
/// visitAllocaInst - If this is a fixed size alloca, allocate space from the
/// frame manager, otherwise do it the hard way.
///
void ISel::visitAllocaInst(AllocaInst &I) {
+ // If this is a fixed size alloca in the entry block for the function, we
+ // statically stack allocate the space, so we don't need to do anything here.
+ //
+ if (dyn_castFixedAlloca(&I)) return;
+
// Find the data size of the alloca inst's getAllocatedType.
const Type *Ty = I.getAllocatedType();
unsigned TySize = TM.getTargetData().getTypeSize(Ty);
- // If this is a fixed size alloca in the entry block for the function,
- // statically stack allocate the space.
- //
- if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getArraySize())) {
- if (I.getParent() == I.getParent()->getParent()->begin()) {
- TySize *= CUI->getValue(); // Get total allocated size...
- unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
-
- // Create a new stack object using the frame manager...
- int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
- addFrameReference(BuildMI(BB, X86::LEA32r, 5, getReg(I)), FrameIdx);
- return;
- }
- }
-
// Create a register to hold the temporary result of multiplying the type size
// constant by the variable amount.
unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);