#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
-#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicLowering.h"
#include "llvm/Pass.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Target/MRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/InstVisitor.h"
-
-namespace llvm {
+using namespace llvm;
/// BMI - A special BuildMI variant that takes an iterator to insert the
/// instruction at as well as a basic block. This is the version for when you
/// the entire function.
///
bool runOnFunction(Function &Fn) {
+ // First pass over the function, lower any unknown intrinsic functions
+ // with the IntrinsicLowering class.
+ LowerUnknownIntrinsicFunctionCalls(Fn);
+
F = &MachineFunction::construct(&Fn, TM);
// Create all of the machine basic blocks for the function...
BB = MBBMap[&LLVM_BB];
}
+ /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
+ /// function, lowering any calls to unknown intrinsic functions into the
+ /// equivalent LLVM code.
+ void LowerUnknownIntrinsicFunctionCalls(Function &F);
+
/// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
/// from the stack into virtual registers.
///
MachineBasicBlock::iterator &IP,
Value *Op0, Value *Op1, unsigned Opcode,
unsigned TargetReg);
-
+
+ /// emitShiftOperation - Common code shared between visitShiftInst and
+ /// constant expression support.
+ void emitShiftOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator &IP,
+ Value *Op, Value *ShiftAmount, bool isLeftShift,
+ const Type *ResultTy, unsigned DestReg);
+
/// copyConstantToRegister - Output the instructions required to put the
/// specified constant into the specified register.
CE->getOpcode(), R);
return;
+ case Instruction::Shl:
+ case Instruction::Shr:
+ emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
+ CE->getOpcode() == Instruction::Shl, CE->getType(), R);
+ return;
+
default:
std::cerr << "Offending expr: " << C << "\n";
assert(0 && "Constant expression not yet handled!\n");
///
void ISel::visitReturnInst(ReturnInst &I) {
if (I.getNumOperands() == 0) {
+ BuildMI(BB, X86::FP_REG_KILL, 0);
BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
return;
}
visitInstruction(I);
}
// Emit a 'ret' instruction
+ BuildMI(BB, X86::FP_REG_KILL, 0);
BuildMI(BB, X86::RET, 0);
}
BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
if (!BI.isConditional()) { // Unconditional branch?
- if (BI.getSuccessor(0) != NextBB)
+ if (BI.getSuccessor(0) != NextBB) {
+ BuildMI(BB, X86::FP_REG_KILL, 0);
BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
+ }
return;
}
// computed some other way...
unsigned condReg = getReg(BI.getCondition());
BuildMI(BB, X86::CMPri8, 2).addReg(condReg).addZImm(0);
+ BuildMI(BB, X86::FP_REG_KILL, 0);
if (BI.getSuccessor(1) == NextBB) {
if (BI.getSuccessor(0) != NextBB)
BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
X86::JS, X86::JNS },
};
+ BuildMI(BB, X86::FP_REG_KILL, 0);
if (BI.getSuccessor(0) != NextBB) {
BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
if (BI.getSuccessor(1) != NextBB)
}
+/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
+/// function, lowering any calls to unknown intrinsic functions into the
+/// equivalent LLVM code.
+void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
+ if (CallInst *CI = dyn_cast<CallInst>(I++))
+ if (Function *F = CI->getCalledFunction())
+ switch (F->getIntrinsicID()) {
+ case Intrinsic::not_intrinsic:
+ case Intrinsic::va_start:
+ case Intrinsic::va_copy:
+ case Intrinsic::va_end:
+ // We directly implement these intrinsics
+ break;
+ default:
+ // All other intrinsic calls we must lower.
+ Instruction *Before = CI->getPrev();
+ TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
+ if (Before) { // Move iterator to instruction after call
+ I = Before; ++I;
+ } else {
+ I = BB->begin();
+ }
+ }
+
+}
+
void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
unsigned TmpReg1, TmpReg2;
switch (ID) {
return;
case Intrinsic::va_end: return; // Noop on X86
- case Intrinsic::longjmp:
- case Intrinsic::siglongjmp:
- BuildMI(BB, X86::CALLpcrel32, 1).addExternalSymbol("abort", true);
- return;
-
- case Intrinsic::setjmp:
- case Intrinsic::sigsetjmp:
- // Setjmp always returns zero...
- BuildMI(BB, X86::MOVir32, 1, getReg(CI)).addZImm(0);
- return;
- default: assert(0 && "Unknown intrinsic for X86!");
+ default: assert(0 && "Error: unknown intrinsics should have been lowered!");
}
}
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
- static const unsigned ClrOpcode[]={ X86::XORrr8, X86::XORrr16, X86::XORrr32 };
+ static const unsigned ClrOpcode[]={ X86::MOVir8, X86::MOVir16, X86::MOVir32 };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
static const unsigned DivOpcode[][4] = {
BMI(BB, IP, SarOpcode[Class], 2, ShiftResult).addReg(Op0Reg).addZImm(31);
BMI(BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
} else {
- // If unsigned, emit a zeroing instruction... (reg = xor reg, reg)
- BMI(BB, IP, ClrOpcode[Class], 2, ExtReg).addReg(ExtReg).addReg(ExtReg);
+ // If unsigned, emit a zeroing instruction... (reg = 0)
+ BMI(BB, IP, ClrOpcode[Class], 2, ExtReg).addZImm(0);
}
// Emit the appropriate divide or remainder instruction...
/// because the shift amount has to be in CL, not just any old register.
///
void ISel::visitShiftInst(ShiftInst &I) {
- unsigned SrcReg = getReg(I.getOperand(0));
- unsigned DestReg = getReg(I);
- bool isLeftShift = I.getOpcode() == Instruction::Shl;
- bool isSigned = I.getType()->isSigned();
- unsigned Class = getClass(I.getType());
+ MachineBasicBlock::iterator IP = BB->end ();
+ emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
+ I.getOpcode () == Instruction::Shl, I.getType (),
+ getReg (I));
+}
+
+/// emitShiftOperation - Common code shared between visitShiftInst and
+/// constant expression support.
+void ISel::emitShiftOperation(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator &IP,
+ Value *Op, Value *ShiftAmount, bool isLeftShift,
+ const Type *ResultTy, unsigned DestReg) {
+ unsigned SrcReg = getReg (Op, MBB, IP);
+ bool isSigned = ResultTy->isSigned ();
+ unsigned Class = getClass (ResultTy);
static const unsigned ConstantOperand[][4] = {
{ X86::SHRir8, X86::SHRir16, X86::SHRir32, X86::SHRDir32 }, // SHR
// If we have a constant shift, we can generate much more efficient code
// than otherwise...
//
- if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getOperand(1))) {
+ if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
unsigned Amount = CUI->getValue();
if (Amount < 32) {
const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
if (isLeftShift) {
- BuildMI(BB, Opc[3], 3,
- DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addZImm(Amount);
- BuildMI(BB, Opc[2], 2, DestReg).addReg(SrcReg).addZImm(Amount);
+ BMI(MBB, IP, Opc[3], 3,
+ DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addZImm(Amount);
+ BMI(MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addZImm(Amount);
} else {
- BuildMI(BB, Opc[3], 3,
- DestReg).addReg(SrcReg ).addReg(SrcReg+1).addZImm(Amount);
- BuildMI(BB, Opc[2], 2, DestReg+1).addReg(SrcReg+1).addZImm(Amount);
+ BMI(MBB, IP, Opc[3], 3,
+ DestReg).addReg(SrcReg ).addReg(SrcReg+1).addZImm(Amount);
+ BMI(MBB, IP, Opc[2], 2, DestReg+1).addReg(SrcReg+1).addZImm(Amount);
}
} else { // Shifting more than 32 bits
Amount -= 32;
if (isLeftShift) {
- BuildMI(BB, X86::SHLir32, 2,DestReg+1).addReg(SrcReg).addZImm(Amount);
- BuildMI(BB, X86::MOVir32, 1,DestReg ).addZImm(0);
+ BMI(MBB, IP, X86::SHLir32, 2,
+ DestReg + 1).addReg(SrcReg).addZImm(Amount);
+ BMI(MBB, IP, X86::MOVir32, 1,
+ DestReg).addZImm(0);
} else {
unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
- BuildMI(BB, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
- BuildMI(BB, X86::MOVir32, 1, DestReg+1).addZImm(0);
+ BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
+ BMI(MBB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
}
}
} else {
// If this is a SHR of a Long, then we need to do funny sign extension
// stuff. TmpReg gets the value to use as the high-part if we are
// shifting more than 32 bits.
- BuildMI(BB, X86::SARir32, 2, TmpReg).addReg(SrcReg).addZImm(31);
+ BMI(MBB, IP, X86::SARir32, 2, TmpReg).addReg(SrcReg).addZImm(31);
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
- BuildMI(BB, X86::MOVir32, 1, TmpReg).addZImm(0);
+ BMI(MBB, IP, X86::MOVir32, 1, TmpReg).addZImm(0);
}
// Initialize CL with the shift amount...
- unsigned ShiftAmount = getReg(I.getOperand(1));
- BuildMI(BB, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmount);
+ unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
+ BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
if (isLeftShift) {
// TmpReg2 = shld inHi, inLo
- BuildMI(BB, X86::SHLDrr32, 2, TmpReg2).addReg(SrcReg+1).addReg(SrcReg);
+ BMI(MBB, IP, X86::SHLDrr32, 2, TmpReg2).addReg(SrcReg+1).addReg(SrcReg);
// TmpReg3 = shl inLo, CL
- BuildMI(BB, X86::SHLrr32, 1, TmpReg3).addReg(SrcReg);
+ BMI(MBB, IP, X86::SHLrr32, 1, TmpReg3).addReg(SrcReg);
// Set the flags to indicate whether the shift was by more than 32 bits.
- BuildMI(BB, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
+ BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
// DestHi = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(BB, X86::CMOVNErr32, 2,
+ BMI(MBB, IP, X86::CMOVNErr32, 2,
DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
// DestLo = (>32) ? TmpReg : TmpReg3;
- BuildMI(BB, X86::CMOVNErr32, 2, DestReg).addReg(TmpReg3).addReg(TmpReg);
+ BMI(MBB, IP, X86::CMOVNErr32, 2,
+ DestReg).addReg(TmpReg3).addReg(TmpReg);
} else {
// TmpReg2 = shrd inLo, inHi
- BuildMI(BB, X86::SHRDrr32, 2, TmpReg2).addReg(SrcReg).addReg(SrcReg+1);
+ BMI(MBB, IP, X86::SHRDrr32, 2, TmpReg2).addReg(SrcReg).addReg(SrcReg+1);
// TmpReg3 = s[ah]r inHi, CL
- BuildMI(BB, isSigned ? X86::SARrr32 : X86::SHRrr32, 1, TmpReg3)
+ BMI(MBB, IP, isSigned ? X86::SARrr32 : X86::SHRrr32, 1, TmpReg3)
.addReg(SrcReg+1);
// Set the flags to indicate whether the shift was by more than 32 bits.
- BuildMI(BB, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
+ BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
// DestLo = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(BB, X86::CMOVNErr32, 2,
+ BMI(MBB, IP, X86::CMOVNErr32, 2,
DestReg).addReg(TmpReg2).addReg(TmpReg3);
// DestHi = (>32) ? TmpReg : TmpReg3;
- BuildMI(BB, X86::CMOVNErr32, 2,
+ BMI(MBB, IP, X86::CMOVNErr32, 2,
DestReg+1).addReg(TmpReg3).addReg(TmpReg);
}
}
return;
}
- if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getOperand(1))) {
+ if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
// The shift amount is constant, guaranteed to be a ubyte. Get its value.
assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
- BuildMI(BB, Opc[Class], 2, DestReg).addReg(SrcReg).addZImm(CUI->getValue());
+ BMI(MBB, IP, Opc[Class], 2,
+ DestReg).addReg(SrcReg).addZImm(CUI->getValue());
} else { // The shift amount is non-constant.
- BuildMI(BB, X86::MOVrr8, 1, X86::CL).addReg(getReg(I.getOperand(1)));
+ unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
+ BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
- BuildMI(BB, Opc[Class], 1, DestReg).addReg(SrcReg);
+ BMI(MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
}
}
/// into a machine code representation is a very simple peep-hole fashion. The
/// generated code sucks but the implementation is nice and simple.
///
-FunctionPass *createX86SimpleInstructionSelector(TargetMachine &TM) {
+FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
return new ISel(TM);
}
-
-} // End llvm namespace