1 //===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicLowering.h"
22 #include "llvm/Pass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/SSARegMap.h"
27 #include "llvm/Target/MRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/GetElementPtrTypeIterator.h"
30 #include "llvm/Support/InstVisitor.h"
31 #include "Support/Statistic.h"
36 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
38 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
42 cByte, cShort, cInt, cFP, cLong
46 /// getClass - Turn a primitive type into a "class" number which is based on the
47 /// size of the type, and whether or not it is floating point.
49 static inline TypeClass getClass(const Type *Ty) {
50 switch (Ty->getPrimitiveID()) {
52 case Type::UByteTyID: return cByte; // Byte operands are class #0
54 case Type::UShortTyID: return cShort; // Short operands are class #1
57 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
60 case Type::DoubleTyID: return cFP; // Floating Point is #3
63 case Type::ULongTyID: return cLong; // Longs are class #4
65 assert(0 && "Invalid type to getClass!");
66 return cByte; // not reached
70 // getClassB - Just like getClass, but treat boolean values as bytes.
71 static inline TypeClass getClassB(const Type *Ty) {
72 if (Ty == Type::BoolTy) return cByte;
77 struct ISel : public FunctionPass, InstVisitor<ISel> {
79 MachineFunction *F; // The function we are compiling into
80 MachineBasicBlock *BB; // The current MBB we are compiling
81 int VarArgsFrameIndex; // FrameIndex for start of varargs area
82 int ReturnAddressIndex; // FrameIndex for the return address
84 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
86 // MBBMap - Mapping between LLVM BB -> Machine BB
87 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
89 // AllocaMap - Mapping from fixed sized alloca instructions to the
90 // FrameIndex for the alloca.
91 std::map<AllocaInst*, unsigned> AllocaMap;
93 ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
95 /// runOnFunction - Top level implementation of instruction selection for
96 /// the entire function.
98 bool runOnFunction(Function &Fn) {
99 // First pass over the function, lower any unknown intrinsic functions
100 // with the IntrinsicLowering class.
101 LowerUnknownIntrinsicFunctionCalls(Fn);
103 F = &MachineFunction::construct(&Fn, TM);
105 // Create all of the machine basic blocks for the function...
106 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
107 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
111 // Set up a frame object for the return address. This is used by the
112 // llvm.returnaddress & llvm.frameaddress intrinisics.
113 ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
115 // Copy incoming arguments off of the stack...
116 LoadArgumentsToVirtualRegs(Fn);
118 // Instruction select everything except PHI nodes
121 // Select the PHI nodes
124 // Insert the FP_REG_KILL instructions into blocks that need them.
131 // We always build a machine code representation for the function
135 virtual const char *getPassName() const {
136 return "X86 Simple Instruction Selection";
139 /// visitBasicBlock - This method is called when we are visiting a new basic
140 /// block. This simply creates a new MachineBasicBlock to emit code into
141 /// and adds it to the current MachineFunction. Subsequent visit* for
142 /// instructions will be invoked for all instructions in the basic block.
144 void visitBasicBlock(BasicBlock &LLVM_BB) {
145 BB = MBBMap[&LLVM_BB];
148 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
149 /// function, lowering any calls to unknown intrinsic functions into the
150 /// equivalent LLVM code.
152 void LowerUnknownIntrinsicFunctionCalls(Function &F);
154 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
155 /// from the stack into virtual registers.
157 void LoadArgumentsToVirtualRegs(Function &F);
159 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
160 /// because we have to generate our sources into the source basic blocks,
161 /// not the current one.
163 void SelectPHINodes();
165 /// InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks
166 /// that need them. This only occurs due to the floating point stackifier
167 /// not being aggressive enough to handle arbitrary global stackification.
169 void InsertFPRegKills();
171 // Visitation methods for various instructions. These methods simply emit
172 // fixed X86 code for each instruction.
175 // Control flow operators
176 void visitReturnInst(ReturnInst &RI);
177 void visitBranchInst(BranchInst &BI);
183 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
184 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
186 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
187 const std::vector<ValueRecord> &Args);
188 void visitCallInst(CallInst &I);
189 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
191 // Arithmetic operators
192 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
193 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
194 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
195 void visitMul(BinaryOperator &B);
197 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
198 void visitRem(BinaryOperator &B) { visitDivRem(B); }
199 void visitDivRem(BinaryOperator &B);
202 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
203 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
204 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
206 // Comparison operators...
207 void visitSetCondInst(SetCondInst &I);
208 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
209 MachineBasicBlock *MBB,
210 MachineBasicBlock::iterator MBBI);
211 void visitSelectInst(SelectInst &SI);
214 // Memory Instructions
215 void visitLoadInst(LoadInst &I);
216 void visitStoreInst(StoreInst &I);
217 void visitGetElementPtrInst(GetElementPtrInst &I);
218 void visitAllocaInst(AllocaInst &I);
219 void visitMallocInst(MallocInst &I);
220 void visitFreeInst(FreeInst &I);
223 void visitShiftInst(ShiftInst &I);
224 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
225 void visitCastInst(CastInst &I);
226 void visitVANextInst(VANextInst &I);
227 void visitVAArgInst(VAArgInst &I);
229 void visitInstruction(Instruction &I) {
230 std::cerr << "Cannot instruction select: " << I;
234 /// promote32 - Make a value 32-bits wide, and put it somewhere.
236 void promote32(unsigned targetReg, const ValueRecord &VR);
238 /// getAddressingMode - Get the addressing mode to use to address the
239 /// specified value. The returned value should be used with addFullAddress.
240 void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
241 unsigned &IndexReg, unsigned &Disp);
244 /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
246 void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
247 std::vector<Value*> &GEPOps,
248 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
249 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
251 /// isGEPFoldable - Return true if the specified GEP can be completely
252 /// folded into the addressing mode of a load/store or lea instruction.
253 bool isGEPFoldable(MachineBasicBlock *MBB,
254 Value *Src, User::op_iterator IdxBegin,
255 User::op_iterator IdxEnd, unsigned &BaseReg,
256 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
258 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
259 /// constant expression GEP support.
261 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
262 Value *Src, User::op_iterator IdxBegin,
263 User::op_iterator IdxEnd, unsigned TargetReg);
265 /// emitCastOperation - Common code shared between visitCastInst and
266 /// constant expression cast support.
268 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
269 Value *Src, const Type *DestTy, unsigned TargetReg);
271 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
272 /// and constant expression support.
274 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
275 MachineBasicBlock::iterator IP,
276 Value *Op0, Value *Op1,
277 unsigned OperatorClass, unsigned TargetReg);
279 /// emitBinaryFPOperation - This method handles emission of floating point
280 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
281 void emitBinaryFPOperation(MachineBasicBlock *BB,
282 MachineBasicBlock::iterator IP,
283 Value *Op0, Value *Op1,
284 unsigned OperatorClass, unsigned TargetReg);
286 void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
287 Value *Op0, Value *Op1, unsigned TargetReg);
289 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
290 unsigned DestReg, const Type *DestTy,
291 unsigned Op0Reg, unsigned Op1Reg);
292 void doMultiplyConst(MachineBasicBlock *MBB,
293 MachineBasicBlock::iterator MBBI,
294 unsigned DestReg, const Type *DestTy,
295 unsigned Op0Reg, unsigned Op1Val);
297 void emitDivRemOperation(MachineBasicBlock *BB,
298 MachineBasicBlock::iterator IP,
299 Value *Op0, Value *Op1, bool isDiv,
302 /// emitSetCCOperation - Common code shared between visitSetCondInst and
303 /// constant expression support.
305 void emitSetCCOperation(MachineBasicBlock *BB,
306 MachineBasicBlock::iterator IP,
307 Value *Op0, Value *Op1, unsigned Opcode,
310 /// emitShiftOperation - Common code shared between visitShiftInst and
311 /// constant expression support.
313 void emitShiftOperation(MachineBasicBlock *MBB,
314 MachineBasicBlock::iterator IP,
315 Value *Op, Value *ShiftAmount, bool isLeftShift,
316 const Type *ResultTy, unsigned DestReg);
318 /// emitSelectOperation - Common code shared between visitSelectInst and the
319 /// constant expression support.
320 void emitSelectOperation(MachineBasicBlock *MBB,
321 MachineBasicBlock::iterator IP,
322 Value *Cond, Value *TrueVal, Value *FalseVal,
325 /// copyConstantToRegister - Output the instructions required to put the
326 /// specified constant into the specified register.
328 void copyConstantToRegister(MachineBasicBlock *MBB,
329 MachineBasicBlock::iterator MBBI,
330 Constant *C, unsigned Reg);
332 void emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
333 unsigned LHS, unsigned RHS);
335 /// makeAnotherReg - This method returns the next register number we haven't
338 /// Long values are handled somewhat specially. They are always allocated
339 /// as pairs of 32 bit integer values. The register number returned is the
340 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
341 /// of the long value.
343 unsigned makeAnotherReg(const Type *Ty) {
344 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
345 "Current target doesn't have X86 reg info??");
346 const X86RegisterInfo *MRI =
347 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
348 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
349 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
350 // Create the lower part
351 F->getSSARegMap()->createVirtualRegister(RC);
352 // Create the upper part.
353 return F->getSSARegMap()->createVirtualRegister(RC)-1;
356 // Add the mapping of regnumber => reg class to MachineFunction
357 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
358 return F->getSSARegMap()->createVirtualRegister(RC);
361 /// getReg - This method turns an LLVM value into a register number.
363 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
364 unsigned getReg(Value *V) {
365 // Just append to the end of the current bb.
366 MachineBasicBlock::iterator It = BB->end();
367 return getReg(V, BB, It);
369 unsigned getReg(Value *V, MachineBasicBlock *MBB,
370 MachineBasicBlock::iterator IPt);
372 /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
373 /// that is to be statically allocated with the initial stack frame
375 unsigned getFixedSizedAllocaFI(AllocaInst *AI);
379 /// dyn_castFixedAlloca - If the specified value is a fixed size alloca
380 /// instruction in the entry block, return it. Otherwise, return a null
382 static AllocaInst *dyn_castFixedAlloca(Value *V) {
383 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
384 BasicBlock *BB = AI->getParent();
385 if (isa<ConstantUInt>(AI->getArraySize()) && BB ==&BB->getParent()->front())
391 /// getReg - This method turns an LLVM value into a register number.
393 unsigned ISel::getReg(Value *V, MachineBasicBlock *MBB,
394 MachineBasicBlock::iterator IPt) {
395 // If this operand is a constant, emit the code to copy the constant into
396 // the register here...
398 if (Constant *C = dyn_cast<Constant>(V)) {
399 unsigned Reg = makeAnotherReg(V->getType());
400 copyConstantToRegister(MBB, IPt, C, Reg);
402 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
403 unsigned Reg = makeAnotherReg(V->getType());
404 // Move the address of the global into the register
405 BuildMI(*MBB, IPt, X86::MOV32ri, 1, Reg).addGlobalAddress(GV);
407 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
408 // Do not emit noop casts at all.
409 if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()))
410 return getReg(CI->getOperand(0), MBB, IPt);
411 } else if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
412 // If the alloca address couldn't be folded into the instruction addressing,
413 // emit an explicit LEA as appropriate.
414 unsigned Reg = makeAnotherReg(V->getType());
415 unsigned FI = getFixedSizedAllocaFI(AI);
416 addFrameReference(BuildMI(*MBB, IPt, X86::LEA32r, 4, Reg), FI);
420 unsigned &Reg = RegMap[V];
422 Reg = makeAnotherReg(V->getType());
429 /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
430 /// that is to be statically allocated with the initial stack frame
432 unsigned ISel::getFixedSizedAllocaFI(AllocaInst *AI) {
433 // Already computed this?
434 std::map<AllocaInst*, unsigned>::iterator I = AllocaMap.lower_bound(AI);
435 if (I != AllocaMap.end() && I->first == AI) return I->second;
437 const Type *Ty = AI->getAllocatedType();
438 ConstantUInt *CUI = cast<ConstantUInt>(AI->getArraySize());
439 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
440 TySize *= CUI->getValue(); // Get total allocated size...
441 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
443 // Create a new stack object using the frame manager...
444 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
445 AllocaMap.insert(I, std::make_pair(AI, FrameIdx));
450 /// copyConstantToRegister - Output the instructions required to put the
451 /// specified constant into the specified register.
453 void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
454 MachineBasicBlock::iterator IP,
455 Constant *C, unsigned R) {
456 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
458 switch (CE->getOpcode()) {
459 case Instruction::GetElementPtr:
460 emitGEPOperation(MBB, IP, CE->getOperand(0),
461 CE->op_begin()+1, CE->op_end(), R);
463 case Instruction::Cast:
464 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
467 case Instruction::Xor: ++Class; // FALL THROUGH
468 case Instruction::Or: ++Class; // FALL THROUGH
469 case Instruction::And: ++Class; // FALL THROUGH
470 case Instruction::Sub: ++Class; // FALL THROUGH
471 case Instruction::Add:
472 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
476 case Instruction::Mul:
477 emitMultiply(MBB, IP, CE->getOperand(0), CE->getOperand(1), R);
480 case Instruction::Div:
481 case Instruction::Rem:
482 emitDivRemOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
483 CE->getOpcode() == Instruction::Div, R);
486 case Instruction::SetNE:
487 case Instruction::SetEQ:
488 case Instruction::SetLT:
489 case Instruction::SetGT:
490 case Instruction::SetLE:
491 case Instruction::SetGE:
492 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
496 case Instruction::Shl:
497 case Instruction::Shr:
498 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
499 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
502 case Instruction::Select:
503 emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
504 CE->getOperand(2), R);
508 std::cerr << "Offending expr: " << C << "\n";
509 assert(0 && "Constant expression not yet handled!\n");
513 if (C->getType()->isIntegral()) {
514 unsigned Class = getClassB(C->getType());
516 if (Class == cLong) {
517 // Copy the value into the register pair.
518 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
519 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
520 BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
524 assert(Class <= cInt && "Type not handled yet!");
526 static const unsigned IntegralOpcodeTab[] = {
527 X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
530 if (C->getType() == Type::BoolTy) {
531 BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
533 ConstantInt *CI = cast<ConstantInt>(C);
534 BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
536 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
537 if (CFP->isExactlyValue(+0.0))
538 BuildMI(*MBB, IP, X86::FLD0, 0, R);
539 else if (CFP->isExactlyValue(+1.0))
540 BuildMI(*MBB, IP, X86::FLD1, 0, R);
542 // Otherwise we need to spill the constant to memory...
543 MachineConstantPool *CP = F->getConstantPool();
544 unsigned CPI = CP->getConstantPoolIndex(CFP);
545 const Type *Ty = CFP->getType();
547 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
548 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
549 addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
552 } else if (isa<ConstantPointerNull>(C)) {
553 // Copy zero (null pointer) to the register.
554 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
555 } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
556 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(CPR->getValue());
558 std::cerr << "Offending constant: " << C << "\n";
559 assert(0 && "Type not handled yet!");
563 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
564 /// the stack into virtual registers.
566 void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
567 // Emit instructions to load the arguments... On entry to a function on the
568 // X86, the stack frame looks like this:
570 // [ESP] -- return address
571 // [ESP + 4] -- first argument (leftmost lexically)
572 // [ESP + 8] -- second argument, if first argument is four bytes in size
575 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
576 MachineFrameInfo *MFI = F->getFrameInfo();
578 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
579 bool ArgLive = !I->use_empty();
580 unsigned Reg = ArgLive ? getReg(*I) : 0;
581 int FI; // Frame object index
583 switch (getClassB(I->getType())) {
586 FI = MFI->CreateFixedObject(1, ArgOffset);
587 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
592 FI = MFI->CreateFixedObject(2, ArgOffset);
593 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
598 FI = MFI->CreateFixedObject(4, ArgOffset);
599 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
604 FI = MFI->CreateFixedObject(8, ArgOffset);
605 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
606 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
608 ArgOffset += 4; // longs require 4 additional bytes
613 if (I->getType() == Type::FloatTy) {
614 Opcode = X86::FLD32m;
615 FI = MFI->CreateFixedObject(4, ArgOffset);
617 Opcode = X86::FLD64m;
618 FI = MFI->CreateFixedObject(8, ArgOffset);
620 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
622 if (I->getType() == Type::DoubleTy)
623 ArgOffset += 4; // doubles require 4 additional bytes
626 assert(0 && "Unhandled argument type!");
628 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
631 // If the function takes variable number of arguments, add a frame offset for
632 // the start of the first vararg value... this is used to expand
634 if (Fn.getFunctionType()->isVarArg())
635 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
639 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
640 /// because we have to generate our sources into the source basic blocks, not
643 void ISel::SelectPHINodes() {
644 const TargetInstrInfo &TII = *TM.getInstrInfo();
645 const Function &LF = *F->getFunction(); // The LLVM function...
646 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
647 const BasicBlock *BB = I;
648 MachineBasicBlock &MBB = *MBBMap[I];
650 // Loop over all of the PHI nodes in the LLVM basic block...
651 MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
652 for (BasicBlock::const_iterator I = BB->begin();
653 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
655 // Create a new machine instr PHI node, and insert it.
656 unsigned PHIReg = getReg(*PN);
657 MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint,
658 X86::PHI, PN->getNumOperands(), PHIReg);
660 MachineInstr *LongPhiMI = 0;
661 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy)
662 LongPhiMI = BuildMI(MBB, PHIInsertPoint,
663 X86::PHI, PN->getNumOperands(), PHIReg+1);
665 // PHIValues - Map of blocks to incoming virtual registers. We use this
666 // so that we only initialize one incoming value for a particular block,
667 // even if the block has multiple entries in the PHI node.
669 std::map<MachineBasicBlock*, unsigned> PHIValues;
671 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
672 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
674 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
675 PHIValues.lower_bound(PredMBB);
677 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
678 // We already inserted an initialization of the register for this
679 // predecessor. Recycle it.
680 ValReg = EntryIt->second;
683 // Get the incoming value into a virtual register.
685 Value *Val = PN->getIncomingValue(i);
687 // If this is a constant or GlobalValue, we may have to insert code
688 // into the basic block to compute it into a virtual register.
689 if ((isa<Constant>(Val) && !isa<ConstantExpr>(Val)) ||
690 isa<GlobalValue>(Val)) {
691 // Simple constants get emitted at the end of the basic block,
692 // before any terminator instructions. We "know" that the code to
693 // move a constant into a register will never clobber any flags.
694 ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
696 // Because we don't want to clobber any values which might be in
697 // physical registers with the computation of this constant (which
698 // might be arbitrarily complex if it is a constant expression),
699 // just insert the computation at the top of the basic block.
700 MachineBasicBlock::iterator PI = PredMBB->begin();
702 // Skip over any PHI nodes though!
703 while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
706 ValReg = getReg(Val, PredMBB, PI);
709 // Remember that we inserted a value for this PHI for this predecessor
710 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
713 PhiMI->addRegOperand(ValReg);
714 PhiMI->addMachineBasicBlockOperand(PredMBB);
716 LongPhiMI->addRegOperand(ValReg+1);
717 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
721 // Now that we emitted all of the incoming values for the PHI node, make
722 // sure to reposition the InsertPoint after the PHI that we just added.
723 // This is needed because we might have inserted a constant into this
724 // block, right after the PHI's which is before the old insert point!
725 PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI;
731 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
732 /// compensation code on critical edges. As such, it requires that we kill all
733 /// FP registers on the exit from any blocks that either ARE critical edges, or
734 /// branch to a block that has incoming critical edges.
736 /// Note that this kill instruction will eventually be eliminated when
737 /// restrictions in the stackifier are relaxed.
739 static bool RequiresFPRegKill(const MachineBasicBlock *MBB) {
741 const BasicBlock *BB = MBB->getBasicBlock ();
742 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
743 const BasicBlock *Succ = *SI;
744 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
745 ++PI; // Block have at least one predecessory
746 if (PI != PE) { // If it has exactly one, this isn't crit edge
747 // If this block has more than one predecessor, check all of the
748 // predecessors to see if they have multiple successors. If so, then the
749 // block we are analyzing needs an FPRegKill.
750 for (PI = pred_begin(Succ); PI != PE; ++PI) {
751 const BasicBlock *Pred = *PI;
752 succ_const_iterator SI2 = succ_begin(Pred);
753 ++SI2; // There must be at least one successor of this block.
754 if (SI2 != succ_end(Pred))
755 return true; // Yes, we must insert the kill on this edge.
759 // If we got this far, there is no need to insert the kill instruction.
766 // InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks that
767 // need them. This only occurs due to the floating point stackifier not being
768 // aggressive enough to handle arbitrary global stackification.
770 // Currently we insert an FP_REG_KILL instruction into each block that uses or
771 // defines a floating point virtual register.
773 // When the global register allocators (like linear scan) finally update live
774 // variable analysis, we can keep floating point values in registers across
775 // portions of the CFG that do not involve critical edges. This will be a big
776 // win, but we are waiting on the global allocators before we can do this.
778 // With a bit of work, the floating point stackifier pass can be enhanced to
779 // break critical edges as needed (to make a place to put compensation code),
780 // but this will require some infrastructure improvements as well.
782 void ISel::InsertFPRegKills() {
783 SSARegMap &RegMap = *F->getSSARegMap();
785 for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
786 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
787 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
788 MachineOperand& MO = I->getOperand(i);
789 if (MO.isRegister() && MO.getReg()) {
790 unsigned Reg = MO.getReg();
791 if (MRegisterInfo::isVirtualRegister(Reg))
792 if (RegMap.getRegClass(Reg)->getSize() == 10)
796 // If we haven't found an FP register use or def in this basic block, check
797 // to see if any of our successors has an FP PHI node, which will cause a
798 // copy to be inserted into this block.
799 for (MachineBasicBlock::const_succ_iterator SI = BB->succ_begin(),
800 SE = BB->succ_end(); SI != SE; ++SI) {
801 MachineBasicBlock *SBB = *SI;
802 for (MachineBasicBlock::iterator I = SBB->begin();
803 I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
804 if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
810 // Okay, this block uses an FP register. If the block has successors (ie,
811 // it's not an unwind/return), insert the FP_REG_KILL instruction.
812 if (BB->succ_size () && RequiresFPRegKill(BB)) {
813 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
820 void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
821 unsigned &IndexReg, unsigned &Disp) {
822 BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
823 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
824 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
825 BaseReg, Scale, IndexReg, Disp))
827 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
828 if (CE->getOpcode() == Instruction::GetElementPtr)
829 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
830 BaseReg, Scale, IndexReg, Disp))
834 // If it's not foldable, reset addr mode.
835 BaseReg = getReg(Addr);
836 Scale = 1; IndexReg = 0; Disp = 0;
839 // canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
840 // it into the conditional branch or select instruction which is the only user
841 // of the cc instruction. This is the case if the conditional branch is the
842 // only user of the setcc, and if the setcc is in the same basic block as the
843 // conditional branch. We also don't handle long arguments below, so we reject
844 // them here as well.
846 static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
847 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
848 if (SCI->hasOneUse()) {
849 Instruction *User = cast<Instruction>(SCI->use_back());
850 if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
851 SCI->getParent() == User->getParent() &&
852 (getClassB(SCI->getOperand(0)->getType()) != cLong ||
853 SCI->getOpcode() == Instruction::SetEQ ||
854 SCI->getOpcode() == Instruction::SetNE))
860 // Return a fixed numbering for setcc instructions which does not depend on the
861 // order of the opcodes.
863 static unsigned getSetCCNumber(unsigned Opcode) {
865 default: assert(0 && "Unknown setcc instruction!");
866 case Instruction::SetEQ: return 0;
867 case Instruction::SetNE: return 1;
868 case Instruction::SetLT: return 2;
869 case Instruction::SetGE: return 3;
870 case Instruction::SetGT: return 4;
871 case Instruction::SetLE: return 5;
875 // LLVM -> X86 signed X86 unsigned
876 // ----- ---------- ------------
877 // seteq -> sete sete
878 // setne -> setne setne
879 // setlt -> setl setb
880 // setge -> setge setae
881 // setgt -> setg seta
882 // setle -> setle setbe
884 // sets // Used by comparison with 0 optimization
886 static const unsigned SetCCOpcodeTab[2][8] = {
887 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
889 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
890 X86::SETSr, X86::SETNSr },
893 /// emitUCOMr - In the future when we support processors before the P6, this
894 /// wraps the logic for emitting an FUCOMr vs FUCOMIr.
895 void ISel::emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
896 unsigned LHS, unsigned RHS) {
897 if (0) { // for processors prior to the P6
898 BuildMI(*MBB, IP, X86::FUCOMr, 2).addReg(LHS).addReg(RHS);
899 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
900 BuildMI(*MBB, IP, X86::SAHF, 1);
902 BuildMI(*MBB, IP, X86::FUCOMIr, 2).addReg(LHS).addReg(RHS);
906 // EmitComparison - This function emits a comparison of the two operands,
907 // returning the extended setcc code to use.
908 unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
909 MachineBasicBlock *MBB,
910 MachineBasicBlock::iterator IP) {
911 // The arguments are already supposed to be of the same type.
912 const Type *CompTy = Op0->getType();
913 unsigned Class = getClassB(CompTy);
914 unsigned Op0r = getReg(Op0, MBB, IP);
916 // Special case handling of: cmp R, i
917 if (isa<ConstantPointerNull>(Op1)) {
918 if (OpNum < 2) // seteq/setne -> test
919 BuildMI(*MBB, IP, X86::TEST32rr, 2).addReg(Op0r).addReg(Op0r);
921 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(0);
924 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
925 if (Class == cByte || Class == cShort || Class == cInt) {
926 unsigned Op1v = CI->getRawValue();
928 // Mask off any upper bits of the constant, if there are any...
929 Op1v &= (1ULL << (8 << Class)) - 1;
931 // If this is a comparison against zero, emit more efficient code. We
932 // can't handle unsigned comparisons against zero unless they are == or
933 // !=. These should have been strength reduced already anyway.
934 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
935 static const unsigned TESTTab[] = {
936 X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
938 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
940 if (OpNum == 2) return 6; // Map jl -> js
941 if (OpNum == 3) return 7; // Map jg -> jns
945 static const unsigned CMPTab[] = {
946 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
949 BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
952 assert(Class == cLong && "Unknown integer class!");
953 unsigned LowCst = CI->getRawValue();
954 unsigned HiCst = CI->getRawValue() >> 32;
955 if (OpNum < 2) { // seteq, setne
956 unsigned LoTmp = Op0r;
958 LoTmp = makeAnotherReg(Type::IntTy);
959 BuildMI(*MBB, IP, X86::XOR32ri, 2, LoTmp).addReg(Op0r).addImm(LowCst);
961 unsigned HiTmp = Op0r+1;
963 HiTmp = makeAnotherReg(Type::IntTy);
964 BuildMI(*MBB, IP, X86::XOR32ri, 2,HiTmp).addReg(Op0r+1).addImm(HiCst);
966 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
967 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
970 // Emit a sequence of code which compares the high and low parts once
971 // each, then uses a conditional move to handle the overflow case. For
972 // example, a setlt for long would generate code like this:
974 // AL = lo(op1) < lo(op2) // Always unsigned comparison
975 // BL = hi(op1) < hi(op2) // Signedness depends on operands
976 // dest = hi(op1) == hi(op2) ? BL : AL;
979 // FIXME: This would be much better if we had hierarchical register
980 // classes! Until then, hardcode registers so that we can deal with
981 // their aliases (because we don't have conditional byte moves).
983 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(LowCst);
984 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
985 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r+1).addImm(HiCst);
986 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0,X86::BL);
987 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
988 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
989 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
991 // NOTE: visitSetCondInst knows that the value is dumped into the BL
992 // register at this point for long values...
998 // Special case handling of comparison against +/- 0.0
999 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
1000 if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
1001 BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
1002 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
1003 BuildMI(*MBB, IP, X86::SAHF, 1);
1007 unsigned Op1r = getReg(Op1, MBB, IP);
1009 default: assert(0 && "Unknown type class!");
1010 // Emit: cmp <var1>, <var2> (do the comparison). We can
1011 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
1014 BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
1017 BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
1020 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
1023 emitUCOMr(MBB, IP, Op0r, Op1r);
1027 if (OpNum < 2) { // seteq, setne
1028 unsigned LoTmp = makeAnotherReg(Type::IntTy);
1029 unsigned HiTmp = makeAnotherReg(Type::IntTy);
1030 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
1031 BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
1032 BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
1033 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
1034 break; // Allow the sete or setne to be generated from flags set by OR
1036 // Emit a sequence of code which compares the high and low parts once
1037 // each, then uses a conditional move to handle the overflow case. For
1038 // example, a setlt for long would generate code like this:
1040 // AL = lo(op1) < lo(op2) // Signedness depends on operands
1041 // BL = hi(op1) < hi(op2) // Always unsigned comparison
1042 // dest = hi(op1) == hi(op2) ? BL : AL;
1045 // FIXME: This would be much better if we had hierarchical register
1046 // classes! Until then, hardcode registers so that we can deal with their
1047 // aliases (because we don't have conditional byte moves).
1049 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
1050 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
1051 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
1052 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
1053 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
1054 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
1055 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
1057 // NOTE: visitSetCondInst knows that the value is dumped into the BL
1058 // register at this point for long values...
1065 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
1066 /// register, then move it to wherever the result should be.
1068 void ISel::visitSetCondInst(SetCondInst &I) {
1069 if (canFoldSetCCIntoBranchOrSelect(&I))
1070 return; // Fold this into a branch or select.
1072 unsigned DestReg = getReg(I);
1073 MachineBasicBlock::iterator MII = BB->end();
1074 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
1078 /// emitSetCCOperation - Common code shared between visitSetCondInst and
1079 /// constant expression support.
1081 void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
1082 MachineBasicBlock::iterator IP,
1083 Value *Op0, Value *Op1, unsigned Opcode,
1084 unsigned TargetReg) {
1085 unsigned OpNum = getSetCCNumber(Opcode);
1086 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
1088 const Type *CompTy = Op0->getType();
1089 unsigned CompClass = getClassB(CompTy);
1090 bool isSigned = CompTy->isSigned() && CompClass != cFP;
1092 if (CompClass != cLong || OpNum < 2) {
1093 // Handle normal comparisons with a setcc instruction...
1094 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
1096 // Handle long comparisons by copying the value which is already in BL into
1097 // the register we want...
1098 BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
1102 void ISel::visitSelectInst(SelectInst &SI) {
1103 unsigned DestReg = getReg(SI);
1104 MachineBasicBlock::iterator MII = BB->end();
1105 emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
1106 SI.getFalseValue(), DestReg);
1109 /// emitSelect - Common code shared between visitSelectInst and the constant
1110 /// expression support.
1111 void ISel::emitSelectOperation(MachineBasicBlock *MBB,
1112 MachineBasicBlock::iterator IP,
1113 Value *Cond, Value *TrueVal, Value *FalseVal,
1115 unsigned SelectClass = getClassB(TrueVal->getType());
1117 // We don't support 8-bit conditional moves. If we have incoming constants,
1118 // transform them into 16-bit constants to avoid having a run-time conversion.
1119 if (SelectClass == cByte) {
1120 if (Constant *T = dyn_cast<Constant>(TrueVal))
1121 TrueVal = ConstantExpr::getCast(T, Type::ShortTy);
1122 if (Constant *F = dyn_cast<Constant>(FalseVal))
1123 FalseVal = ConstantExpr::getCast(F, Type::ShortTy);
1126 unsigned TrueReg = getReg(TrueVal, MBB, IP);
1127 unsigned FalseReg = getReg(FalseVal, MBB, IP);
1128 if (TrueReg == FalseReg) {
1129 static const unsigned Opcode[] = {
1130 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
1132 BuildMI(*MBB, IP, Opcode[SelectClass], 1, DestReg).addReg(TrueReg);
1133 if (SelectClass == cLong)
1134 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(TrueReg+1);
1139 if (SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(Cond)) {
1140 // We successfully folded the setcc into the select instruction.
1142 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1143 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), MBB,
1146 const Type *CompTy = SCI->getOperand(0)->getType();
1147 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1149 // LLVM -> X86 signed X86 unsigned
1150 // ----- ---------- ------------
1151 // seteq -> cmovNE cmovNE
1152 // setne -> cmovE cmovE
1153 // setlt -> cmovGE cmovAE
1154 // setge -> cmovL cmovB
1155 // setgt -> cmovLE cmovBE
1156 // setle -> cmovG cmovA
1158 // cmovNS // Used by comparison with 0 optimization
1161 switch (SelectClass) {
1162 default: assert(0 && "Unknown value class!");
1164 // Annoyingly, we don't have a full set of floating point conditional
1166 static const unsigned OpcodeTab[2][8] = {
1167 { X86::FCMOVNE, X86::FCMOVE, X86::FCMOVAE, X86::FCMOVB,
1168 X86::FCMOVBE, X86::FCMOVA, 0, 0 },
1169 { X86::FCMOVNE, X86::FCMOVE, 0, 0, 0, 0, 0, 0 },
1171 Opcode = OpcodeTab[isSigned][OpNum];
1173 // If opcode == 0, we hit a case that we don't support. Output a setcc
1174 // and compare the result against zero.
1176 unsigned CompClass = getClassB(CompTy);
1178 if (CompClass != cLong || OpNum < 2) {
1179 CondReg = makeAnotherReg(Type::BoolTy);
1180 // Handle normal comparisons with a setcc instruction...
1181 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, CondReg);
1183 // Long comparisons end up in the BL register.
1187 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1188 Opcode = X86::FCMOVE;
1194 static const unsigned OpcodeTab[2][8] = {
1195 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVAE16rr, X86::CMOVB16rr,
1196 X86::CMOVBE16rr, X86::CMOVA16rr, 0, 0 },
1197 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVGE16rr, X86::CMOVL16rr,
1198 X86::CMOVLE16rr, X86::CMOVG16rr, X86::CMOVNS16rr, X86::CMOVS16rr },
1200 Opcode = OpcodeTab[isSigned][OpNum];
1205 static const unsigned OpcodeTab[2][8] = {
1206 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVAE32rr, X86::CMOVB32rr,
1207 X86::CMOVBE32rr, X86::CMOVA32rr, 0, 0 },
1208 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVGE32rr, X86::CMOVL32rr,
1209 X86::CMOVLE32rr, X86::CMOVG32rr, X86::CMOVNS32rr, X86::CMOVS32rr },
1211 Opcode = OpcodeTab[isSigned][OpNum];
1216 // Get the value being branched on, and use it to set the condition codes.
1217 unsigned CondReg = getReg(Cond, MBB, IP);
1218 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1219 switch (SelectClass) {
1220 default: assert(0 && "Unknown value class!");
1221 case cFP: Opcode = X86::FCMOVE; break;
1223 case cShort: Opcode = X86::CMOVE16rr; break;
1225 case cLong: Opcode = X86::CMOVE32rr; break;
1229 unsigned RealDestReg = DestReg;
1232 // Annoyingly enough, X86 doesn't HAVE 8-bit conditional moves. Because of
1233 // this, we have to promote the incoming values to 16 bits, perform a 16-bit
1234 // cmove, then truncate the result.
1235 if (SelectClass == cByte) {
1236 DestReg = makeAnotherReg(Type::ShortTy);
1237 if (getClassB(TrueVal->getType()) == cByte) {
1238 // Promote the true value, by storing it into AL, and reading from AX.
1239 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::AL).addReg(TrueReg);
1240 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::AH).addImm(0);
1241 TrueReg = makeAnotherReg(Type::ShortTy);
1242 BuildMI(*MBB, IP, X86::MOV16rr, 1, TrueReg).addReg(X86::AX);
1244 if (getClassB(FalseVal->getType()) == cByte) {
1245 // Promote the true value, by storing it into CL, and reading from CX.
1246 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(FalseReg);
1247 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::CH).addImm(0);
1248 FalseReg = makeAnotherReg(Type::ShortTy);
1249 BuildMI(*MBB, IP, X86::MOV16rr, 1, FalseReg).addReg(X86::CX);
1253 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(TrueReg).addReg(FalseReg);
1255 switch (SelectClass) {
1257 // We did the computation with 16-bit registers. Truncate back to our
1258 // result by copying into AX then copying out AL.
1259 BuildMI(*MBB, IP, X86::MOV16rr, 1, X86::AX).addReg(DestReg);
1260 BuildMI(*MBB, IP, X86::MOV8rr, 1, RealDestReg).addReg(X86::AL);
1263 // Move the upper half of the value as well.
1264 BuildMI(*MBB, IP, Opcode, 2,DestReg+1).addReg(TrueReg+1).addReg(FalseReg+1);
1271 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
1272 /// operand, in the specified target register.
1274 void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
1275 bool isUnsigned = VR.Ty->isUnsigned() || VR.Ty == Type::BoolTy;
1277 Value *Val = VR.Val;
1278 const Type *Ty = VR.Ty;
1280 if (Constant *C = dyn_cast<Constant>(Val)) {
1281 Val = ConstantExpr::getCast(C, Type::IntTy);
1285 // If this is a simple constant, just emit a MOVri directly to avoid the
1287 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
1288 int TheVal = CI->getRawValue() & 0xFFFFFFFF;
1289 BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
1294 // Make sure we have the register number for this value...
1295 unsigned Reg = Val ? getReg(Val) : VR.Reg;
1297 switch (getClassB(Ty)) {
1299 // Extend value into target register (8->32)
1301 BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
1303 BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
1306 // Extend value into target register (16->32)
1308 BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
1310 BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
1313 // Move value into target register (32->32)
1314 BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
1317 assert(0 && "Unpromotable operand class in promote32");
1321 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
1322 /// we have the following possibilities:
1324 /// ret void: No return value, simply emit a 'ret' instruction
1325 /// ret sbyte, ubyte : Extend value into EAX and return
1326 /// ret short, ushort: Extend value into EAX and return
1327 /// ret int, uint : Move value into EAX and return
1328 /// ret pointer : Move value into EAX and return
1329 /// ret long, ulong : Move value into EAX/EDX and return
1330 /// ret float/double : Top of FP stack
1332 void ISel::visitReturnInst(ReturnInst &I) {
1333 if (I.getNumOperands() == 0) {
1334 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
1338 Value *RetVal = I.getOperand(0);
1339 switch (getClassB(RetVal->getType())) {
1340 case cByte: // integral return values: extend or move into EAX and return
1343 promote32(X86::EAX, ValueRecord(RetVal));
1344 // Declare that EAX is live on exit
1345 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
1347 case cFP: { // Floats & Doubles: Return in ST(0)
1348 unsigned RetReg = getReg(RetVal);
1349 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
1350 // Declare that top-of-stack is live on exit
1351 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
1355 unsigned RetReg = getReg(RetVal);
1356 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
1357 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
1358 // Declare that EAX & EDX are live on exit
1359 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
1364 visitInstruction(I);
1366 // Emit a 'ret' instruction
1367 BuildMI(BB, X86::RET, 0);
1370 // getBlockAfter - Return the basic block which occurs lexically after the
1372 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
1373 Function::iterator I = BB; ++I; // Get iterator to next block
1374 return I != BB->getParent()->end() ? &*I : 0;
1377 /// visitBranchInst - Handle conditional and unconditional branches here. Note
1378 /// that since code layout is frozen at this point, that if we are trying to
1379 /// jump to a block that is the immediate successor of the current block, we can
1380 /// just make a fall-through (but we don't currently).
1382 void ISel::visitBranchInst(BranchInst &BI) {
1383 // Update machine-CFG edges
1384 BB->addSuccessor (MBBMap[BI.getSuccessor(0)]);
1385 if (BI.isConditional())
1386 BB->addSuccessor (MBBMap[BI.getSuccessor(1)]);
1388 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
1390 if (!BI.isConditional()) { // Unconditional branch?
1391 if (BI.getSuccessor(0) != NextBB)
1392 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1396 // See if we can fold the setcc into the branch itself...
1397 SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition());
1399 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
1400 // computed some other way...
1401 unsigned condReg = getReg(BI.getCondition());
1402 BuildMI(BB, X86::TEST8rr, 2).addReg(condReg).addReg(condReg);
1403 if (BI.getSuccessor(1) == NextBB) {
1404 if (BI.getSuccessor(0) != NextBB)
1405 BuildMI(BB, X86::JNE, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1407 BuildMI(BB, X86::JE, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
1409 if (BI.getSuccessor(0) != NextBB)
1410 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1415 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1416 MachineBasicBlock::iterator MII = BB->end();
1417 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
1419 const Type *CompTy = SCI->getOperand(0)->getType();
1420 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1423 // LLVM -> X86 signed X86 unsigned
1424 // ----- ---------- ------------
1432 // js // Used by comparison with 0 optimization
1435 static const unsigned OpcodeTab[2][8] = {
1436 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
1437 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
1438 X86::JS, X86::JNS },
1441 if (BI.getSuccessor(0) != NextBB) {
1442 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
1443 .addMBB(MBBMap[BI.getSuccessor(0)]);
1444 if (BI.getSuccessor(1) != NextBB)
1445 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
1447 // Change to the inverse condition...
1448 if (BI.getSuccessor(1) != NextBB) {
1450 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
1451 .addMBB(MBBMap[BI.getSuccessor(1)]);
1457 /// doCall - This emits an abstract call instruction, setting up the arguments
1458 /// and the return value as appropriate. For the actual function call itself,
1459 /// it inserts the specified CallMI instruction into the stream.
1461 void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1462 const std::vector<ValueRecord> &Args) {
1464 // Count how many bytes are to be pushed on the stack...
1465 unsigned NumBytes = 0;
1467 if (!Args.empty()) {
1468 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1469 switch (getClassB(Args[i].Ty)) {
1470 case cByte: case cShort: case cInt:
1471 NumBytes += 4; break;
1473 NumBytes += 8; break;
1475 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1477 default: assert(0 && "Unknown class!");
1480 // Adjust the stack pointer for the new arguments...
1481 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(NumBytes);
1483 // Arguments go on the stack in reverse order, as specified by the ABI.
1484 unsigned ArgOffset = 0;
1485 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1487 switch (getClassB(Args[i].Ty)) {
1489 if (Args[i].Val && isa<ConstantBool>(Args[i].Val)) {
1490 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1491 .addImm(Args[i].Val == ConstantBool::True);
1496 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1497 // Zero/Sign extend constant, then stuff into memory.
1498 ConstantInt *Val = cast<ConstantInt>(Args[i].Val);
1499 Val = cast<ConstantInt>(ConstantExpr::getCast(Val, Type::IntTy));
1500 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1501 .addImm(Val->getRawValue() & 0xFFFFFFFF);
1503 // Promote arg to 32 bits wide into a temporary register...
1504 ArgReg = makeAnotherReg(Type::UIntTy);
1505 promote32(ArgReg, Args[i]);
1506 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1507 X86::ESP, ArgOffset).addReg(ArgReg);
1511 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1512 unsigned Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1513 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1514 X86::ESP, ArgOffset).addImm(Val);
1515 } else if (Args[i].Val && isa<ConstantPointerNull>(Args[i].Val)) {
1516 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1517 X86::ESP, ArgOffset).addImm(0);
1519 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1520 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1521 X86::ESP, ArgOffset).addReg(ArgReg);
1525 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1526 uint64_t Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1527 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1528 X86::ESP, ArgOffset).addImm(Val & ~0U);
1529 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1530 X86::ESP, ArgOffset+4).addImm(Val >> 32ULL);
1532 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1533 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1534 X86::ESP, ArgOffset).addReg(ArgReg);
1535 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1536 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1538 ArgOffset += 4; // 8 byte entry, not 4.
1542 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1543 if (Args[i].Ty == Type::FloatTy) {
1544 addRegOffset(BuildMI(BB, X86::FST32m, 5),
1545 X86::ESP, ArgOffset).addReg(ArgReg);
1547 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1548 addRegOffset(BuildMI(BB, X86::FST64m, 5),
1549 X86::ESP, ArgOffset).addReg(ArgReg);
1550 ArgOffset += 4; // 8 byte entry, not 4.
1554 default: assert(0 && "Unknown class!");
1559 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(0);
1562 BB->push_back(CallMI);
1564 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addImm(NumBytes);
1566 // If there is a return value, scavenge the result from the location the call
1569 if (Ret.Ty != Type::VoidTy) {
1570 unsigned DestClass = getClassB(Ret.Ty);
1571 switch (DestClass) {
1575 // Integral results are in %eax, or the appropriate portion
1577 static const unsigned regRegMove[] = {
1578 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
1580 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1581 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1584 case cFP: // Floating-point return values live in %ST(0)
1585 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1587 case cLong: // Long values are left in EDX:EAX
1588 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
1589 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
1591 default: assert(0 && "Unknown class!");
1597 /// visitCallInst - Push args on stack and do a procedure call instruction.
1598 void ISel::visitCallInst(CallInst &CI) {
1599 MachineInstr *TheCall;
1600 if (Function *F = CI.getCalledFunction()) {
1601 // Is it an intrinsic function call?
1602 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1603 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1607 // Emit a CALL instruction with PC-relative displacement.
1608 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1609 } else { // Emit an indirect call...
1610 unsigned Reg = getReg(CI.getCalledValue());
1611 TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
1614 std::vector<ValueRecord> Args;
1615 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1616 Args.push_back(ValueRecord(CI.getOperand(i)));
1618 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1619 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1622 /// dyncastIsNan - Return the operand of an isnan operation if this is an isnan.
1624 static Value *dyncastIsNan(Value *V) {
1625 if (CallInst *CI = dyn_cast<CallInst>(V))
1626 if (Function *F = CI->getCalledFunction())
1627 if (F->getIntrinsicID() == Intrinsic::isnan)
1628 return CI->getOperand(1);
1632 /// isOnlyUsedByUnorderedComparisons - Return true if this value is only used by
1633 /// or's whos operands are all calls to the isnan predicate.
1634 static bool isOnlyUsedByUnorderedComparisons(Value *V) {
1635 assert(dyncastIsNan(V) && "The value isn't an isnan call!");
1637 // Check all uses, which will be or's of isnans if this predicate is true.
1638 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
1639 Instruction *I = cast<Instruction>(*UI);
1640 if (I->getOpcode() != Instruction::Or) return false;
1641 if (I->getOperand(0) != V && !dyncastIsNan(I->getOperand(0))) return false;
1642 if (I->getOperand(1) != V && !dyncastIsNan(I->getOperand(1))) return false;
1648 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1649 /// function, lowering any calls to unknown intrinsic functions into the
1650 /// equivalent LLVM code.
1652 void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1653 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1654 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1655 if (CallInst *CI = dyn_cast<CallInst>(I++))
1656 if (Function *F = CI->getCalledFunction())
1657 switch (F->getIntrinsicID()) {
1658 case Intrinsic::not_intrinsic:
1659 case Intrinsic::vastart:
1660 case Intrinsic::vacopy:
1661 case Intrinsic::vaend:
1662 case Intrinsic::returnaddress:
1663 case Intrinsic::frameaddress:
1664 case Intrinsic::memcpy:
1665 case Intrinsic::memset:
1666 case Intrinsic::isnan:
1667 case Intrinsic::readport:
1668 case Intrinsic::writeport:
1669 // We directly implement these intrinsics
1671 case Intrinsic::readio: {
1672 // On X86, memory operations are in-order. Lower this intrinsic
1673 // into a volatile load.
1674 Instruction *Before = CI->getPrev();
1675 LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI);
1676 CI->replaceAllUsesWith(LI);
1677 BB->getInstList().erase(CI);
1680 case Intrinsic::writeio: {
1681 // On X86, memory operations are in-order. Lower this intrinsic
1682 // into a volatile store.
1683 Instruction *Before = CI->getPrev();
1684 StoreInst *LI = new StoreInst(CI->getOperand(1),
1685 CI->getOperand(2), true, CI);
1686 CI->replaceAllUsesWith(LI);
1687 BB->getInstList().erase(CI);
1691 // All other intrinsic calls we must lower.
1692 Instruction *Before = CI->getPrev();
1693 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1694 if (Before) { // Move iterator to instruction after call
1702 void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1703 unsigned TmpReg1, TmpReg2;
1705 case Intrinsic::vastart:
1706 // Get the address of the first vararg value...
1707 TmpReg1 = getReg(CI);
1708 addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
1711 case Intrinsic::vacopy:
1712 TmpReg1 = getReg(CI);
1713 TmpReg2 = getReg(CI.getOperand(1));
1714 BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
1716 case Intrinsic::vaend: return; // Noop on X86
1718 case Intrinsic::returnaddress:
1719 case Intrinsic::frameaddress:
1720 TmpReg1 = getReg(CI);
1721 if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
1722 if (ID == Intrinsic::returnaddress) {
1723 // Just load the return address
1724 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
1725 ReturnAddressIndex);
1727 addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
1728 ReturnAddressIndex, -4);
1731 // Values other than zero are not implemented yet.
1732 BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
1736 case Intrinsic::isnan:
1737 // If this is only used by 'isunordered' style comparisons, don't emit it.
1738 if (isOnlyUsedByUnorderedComparisons(&CI)) return;
1739 TmpReg1 = getReg(CI.getOperand(1));
1740 emitUCOMr(BB, BB->end(), TmpReg1, TmpReg1);
1741 TmpReg2 = getReg(CI);
1742 BuildMI(BB, X86::SETPr, 0, TmpReg2);
1745 case Intrinsic::memcpy: {
1746 assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
1748 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1749 Align = AlignC->getRawValue();
1750 if (Align == 0) Align = 1;
1753 // Turn the byte code into # iterations
1756 switch (Align & 3) {
1757 case 2: // WORD aligned
1758 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1759 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1761 CountReg = makeAnotherReg(Type::IntTy);
1762 unsigned ByteReg = getReg(CI.getOperand(3));
1763 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1765 Opcode = X86::REP_MOVSW;
1767 case 0: // DWORD aligned
1768 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1769 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1771 CountReg = makeAnotherReg(Type::IntTy);
1772 unsigned ByteReg = getReg(CI.getOperand(3));
1773 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1775 Opcode = X86::REP_MOVSD;
1777 default: // BYTE aligned
1778 CountReg = getReg(CI.getOperand(3));
1779 Opcode = X86::REP_MOVSB;
1783 // No matter what the alignment is, we put the source in ESI, the
1784 // destination in EDI, and the count in ECX.
1785 TmpReg1 = getReg(CI.getOperand(1));
1786 TmpReg2 = getReg(CI.getOperand(2));
1787 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1788 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1789 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
1790 BuildMI(BB, Opcode, 0);
1793 case Intrinsic::memset: {
1794 assert(CI.getNumOperands() == 5 && "Illegal llvm.memset call!");
1796 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1797 Align = AlignC->getRawValue();
1798 if (Align == 0) Align = 1;
1801 // Turn the byte code into # iterations
1804 if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
1805 unsigned Val = ValC->getRawValue() & 255;
1807 // If the value is a constant, then we can potentially use larger copies.
1808 switch (Align & 3) {
1809 case 2: // WORD aligned
1810 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1811 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1813 CountReg = makeAnotherReg(Type::IntTy);
1814 unsigned ByteReg = getReg(CI.getOperand(3));
1815 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1817 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
1818 Opcode = X86::REP_STOSW;
1820 case 0: // DWORD aligned
1821 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1822 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1824 CountReg = makeAnotherReg(Type::IntTy);
1825 unsigned ByteReg = getReg(CI.getOperand(3));
1826 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1828 Val = (Val << 8) | Val;
1829 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
1830 Opcode = X86::REP_STOSD;
1832 default: // BYTE aligned
1833 CountReg = getReg(CI.getOperand(3));
1834 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
1835 Opcode = X86::REP_STOSB;
1839 // If it's not a constant value we are storing, just fall back. We could
1840 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
1841 unsigned ValReg = getReg(CI.getOperand(2));
1842 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1843 CountReg = getReg(CI.getOperand(3));
1844 Opcode = X86::REP_STOSB;
1847 // No matter what the alignment is, we put the source in ESI, the
1848 // destination in EDI, and the count in ECX.
1849 TmpReg1 = getReg(CI.getOperand(1));
1850 //TmpReg2 = getReg(CI.getOperand(2));
1851 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1852 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1853 BuildMI(BB, Opcode, 0);
1857 case Intrinsic::readport: {
1858 // First, determine that the size of the operand falls within the acceptable
1859 // range for this architecture.
1861 if (getClassB(CI.getOperand(1)->getType()) != cShort) {
1862 std::cerr << "llvm.readport: Address size is not 16 bits\n";
1866 // Now, move the I/O port address into the DX register and use the IN
1867 // instruction to get the input data.
1869 unsigned Class = getClass(CI.getCalledFunction()->getReturnType());
1870 unsigned DestReg = getReg(CI);
1872 // If the port is a single-byte constant, use the immediate form.
1873 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(1)))
1874 if ((C->getRawValue() & 255) == C->getRawValue()) {
1877 BuildMI(BB, X86::IN8ri, 1).addImm((unsigned char)C->getRawValue());
1878 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1881 BuildMI(BB, X86::IN16ri, 1).addImm((unsigned char)C->getRawValue());
1882 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AX);
1885 BuildMI(BB, X86::IN32ri, 1).addImm((unsigned char)C->getRawValue());
1886 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::EAX);
1891 unsigned Reg = getReg(CI.getOperand(1));
1892 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
1895 BuildMI(BB, X86::IN8rr, 0);
1896 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1899 BuildMI(BB, X86::IN16rr, 0);
1900 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AX);
1903 BuildMI(BB, X86::IN32rr, 0);
1904 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::EAX);
1907 std::cerr << "Cannot do input on this data type";
1913 case Intrinsic::writeport: {
1914 // First, determine that the size of the operand falls within the
1915 // acceptable range for this architecture.
1916 if (getClass(CI.getOperand(2)->getType()) != cShort) {
1917 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
1921 unsigned Class = getClassB(CI.getOperand(1)->getType());
1922 unsigned ValReg = getReg(CI.getOperand(1));
1925 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1928 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(ValReg);
1931 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(ValReg);
1934 std::cerr << "llvm.writeport: invalid data type for X86 target";
1939 // If the port is a single-byte constant, use the immediate form.
1940 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(2)))
1941 if ((C->getRawValue() & 255) == C->getRawValue()) {
1942 static const unsigned O[] = { X86::OUT8ir, X86::OUT16ir, X86::OUT32ir };
1943 BuildMI(BB, O[Class], 1).addImm((unsigned char)C->getRawValue());
1947 // Otherwise, move the I/O port address into the DX register and the value
1948 // to write into the AL/AX/EAX register.
1949 static const unsigned Opc[] = { X86::OUT8rr, X86::OUT16rr, X86::OUT32rr };
1950 unsigned Reg = getReg(CI.getOperand(2));
1951 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
1952 BuildMI(BB, Opc[Class], 0);
1956 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
1960 static bool isSafeToFoldLoadIntoInstruction(LoadInst &LI, Instruction &User) {
1961 if (LI.getParent() != User.getParent())
1963 BasicBlock::iterator It = &LI;
1964 // Check all of the instructions between the load and the user. We should
1965 // really use alias analysis here, but for now we just do something simple.
1966 for (++It; It != BasicBlock::iterator(&User); ++It) {
1967 switch (It->getOpcode()) {
1968 case Instruction::Free:
1969 case Instruction::Store:
1970 case Instruction::Call:
1971 case Instruction::Invoke:
1973 case Instruction::Load:
1974 if (cast<LoadInst>(It)->isVolatile() && LI.isVolatile())
1982 /// visitSimpleBinary - Implement simple binary operators for integral types...
1983 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1986 void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1987 unsigned DestReg = getReg(B);
1988 MachineBasicBlock::iterator MI = BB->end();
1989 Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
1990 unsigned Class = getClassB(B.getType());
1992 // Special case: op Reg, load [mem]
1993 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1) && Class != cLong &&
1994 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B))
1995 if (!B.swapOperands())
1996 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
1998 if (isa<LoadInst>(Op1) && Class != cLong &&
1999 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op1), B)) {
2003 static const unsigned OpcodeTab[][3] = {
2004 // Arithmetic operators
2005 { X86::ADD8rm, X86::ADD16rm, X86::ADD32rm }, // ADD
2006 { X86::SUB8rm, X86::SUB16rm, X86::SUB32rm }, // SUB
2008 // Bitwise operators
2009 { X86::AND8rm, X86::AND16rm, X86::AND32rm }, // AND
2010 { X86:: OR8rm, X86:: OR16rm, X86:: OR32rm }, // OR
2011 { X86::XOR8rm, X86::XOR16rm, X86::XOR32rm }, // XOR
2013 Opcode = OpcodeTab[OperatorClass][Class];
2015 static const unsigned OpcodeTab[][2] = {
2016 { X86::FADD32m, X86::FADD64m }, // ADD
2017 { X86::FSUB32m, X86::FSUB64m }, // SUB
2019 const Type *Ty = Op0->getType();
2020 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2021 Opcode = OpcodeTab[OperatorClass][Ty == Type::DoubleTy];
2024 unsigned Op0r = getReg(Op0);
2025 if (AllocaInst *AI =
2026 dyn_castFixedAlloca(cast<LoadInst>(Op1)->getOperand(0))) {
2027 unsigned FI = getFixedSizedAllocaFI(AI);
2028 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), FI);
2031 unsigned BaseReg, Scale, IndexReg, Disp;
2032 getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), BaseReg,
2033 Scale, IndexReg, Disp);
2035 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r),
2036 BaseReg, Scale, IndexReg, Disp);
2041 // If this is a floating point subtract, check to see if we can fold the first
2043 if (Class == cFP && OperatorClass == 1 &&
2044 isa<LoadInst>(Op0) &&
2045 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B)) {
2046 const Type *Ty = Op0->getType();
2047 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2048 unsigned Opcode = Ty == Type::FloatTy ? X86::FSUBR32m : X86::FSUBR64m;
2050 unsigned Op1r = getReg(Op1);
2051 if (AllocaInst *AI =
2052 dyn_castFixedAlloca(cast<LoadInst>(Op0)->getOperand(0))) {
2053 unsigned FI = getFixedSizedAllocaFI(AI);
2054 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), FI);
2056 unsigned BaseReg, Scale, IndexReg, Disp;
2057 getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), BaseReg,
2058 Scale, IndexReg, Disp);
2060 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r),
2061 BaseReg, Scale, IndexReg, Disp);
2066 emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
2070 /// emitBinaryFPOperation - This method handles emission of floating point
2071 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
2072 void ISel::emitBinaryFPOperation(MachineBasicBlock *BB,
2073 MachineBasicBlock::iterator IP,
2074 Value *Op0, Value *Op1,
2075 unsigned OperatorClass, unsigned DestReg) {
2077 // Special case: op Reg, <const fp>
2078 if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1))
2079 if (!Op1C->isExactlyValue(+0.0) && !Op1C->isExactlyValue(+1.0)) {
2080 // Create a constant pool entry for this constant.
2081 MachineConstantPool *CP = F->getConstantPool();
2082 unsigned CPI = CP->getConstantPoolIndex(Op1C);
2083 const Type *Ty = Op1->getType();
2085 static const unsigned OpcodeTab[][4] = {
2086 { X86::FADD32m, X86::FSUB32m, X86::FMUL32m, X86::FDIV32m }, // Float
2087 { X86::FADD64m, X86::FSUB64m, X86::FMUL64m, X86::FDIV64m }, // Double
2090 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2091 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
2092 unsigned Op0r = getReg(Op0, BB, IP);
2093 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
2094 DestReg).addReg(Op0r), CPI);
2098 // Special case: R1 = op <const fp>, R2
2099 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
2100 if (CFP->isExactlyValue(-0.0) && OperatorClass == 1) {
2102 unsigned op1Reg = getReg(Op1, BB, IP);
2103 BuildMI(*BB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
2105 } else if (!CFP->isExactlyValue(+0.0) && !CFP->isExactlyValue(+1.0)) {
2106 // R1 = op CST, R2 --> R1 = opr R2, CST
2108 // Create a constant pool entry for this constant.
2109 MachineConstantPool *CP = F->getConstantPool();
2110 unsigned CPI = CP->getConstantPoolIndex(CFP);
2111 const Type *Ty = CFP->getType();
2113 static const unsigned OpcodeTab[][4] = {
2114 { X86::FADD32m, X86::FSUBR32m, X86::FMUL32m, X86::FDIVR32m }, // Float
2115 { X86::FADD64m, X86::FSUBR64m, X86::FMUL64m, X86::FDIVR64m }, // Double
2118 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2119 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
2120 unsigned Op1r = getReg(Op1, BB, IP);
2121 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
2122 DestReg).addReg(Op1r), CPI);
2127 static const unsigned OpcodeTab[4] = {
2128 X86::FpADD, X86::FpSUB, X86::FpMUL, X86::FpDIV
2131 unsigned Opcode = OpcodeTab[OperatorClass];
2132 unsigned Op0r = getReg(Op0, BB, IP);
2133 unsigned Op1r = getReg(Op1, BB, IP);
2134 BuildMI(*BB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
2137 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
2138 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
2141 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
2142 /// and constant expression support.
2144 void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
2145 MachineBasicBlock::iterator IP,
2146 Value *Op0, Value *Op1,
2147 unsigned OperatorClass, unsigned DestReg) {
2148 unsigned Class = getClassB(Op0->getType());
2151 assert(OperatorClass < 2 && "No logical ops for FP!");
2152 emitBinaryFPOperation(MBB, IP, Op0, Op1, OperatorClass, DestReg);
2156 if (Op0->getType() == Type::BoolTy) {
2157 if (OperatorClass == 3)
2158 // If this is an or of two isnan's, emit an FP comparison directly instead
2159 // of or'ing two isnan's together.
2160 if (Value *LHS = dyncastIsNan(Op0))
2161 if (Value *RHS = dyncastIsNan(Op1)) {
2162 unsigned Op0Reg = getReg(RHS, MBB, IP), Op1Reg = getReg(LHS, MBB, IP);
2163 emitUCOMr(MBB, IP, Op0Reg, Op1Reg);
2164 BuildMI(*MBB, IP, X86::SETPr, 0, DestReg);
2170 // sub 0, X -> neg X
2171 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
2172 if (OperatorClass == 1 && CI->isNullValue()) {
2173 unsigned op1Reg = getReg(Op1, MBB, IP);
2174 static unsigned const NEGTab[] = {
2175 X86::NEG8r, X86::NEG16r, X86::NEG32r, 0, X86::NEG32r
2177 BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
2179 if (Class == cLong) {
2180 // We just emitted: Dl = neg Sl
2181 // Now emit : T = addc Sh, 0
2183 unsigned T = makeAnotherReg(Type::IntTy);
2184 BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
2185 BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
2190 // Special case: op Reg, <const int>
2191 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
2192 unsigned Op0r = getReg(Op0, MBB, IP);
2194 // xor X, -1 -> not X
2195 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
2196 static unsigned const NOTTab[] = {
2197 X86::NOT8r, X86::NOT16r, X86::NOT32r, 0, X86::NOT32r
2199 BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
2200 if (Class == cLong) // Invert the top part too
2201 BuildMI(*MBB, IP, X86::NOT32r, 1, DestReg+1).addReg(Op0r+1);
2205 // add X, -1 -> dec X
2206 if (OperatorClass == 0 && Op1C->isAllOnesValue() && Class != cLong) {
2207 // Note that we can't use dec for 64-bit decrements, because it does not
2208 // set the carry flag!
2209 static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
2210 BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
2214 // add X, 1 -> inc X
2215 if (OperatorClass == 0 && Op1C->equalsInt(1) && Class != cLong) {
2216 // Note that we can't use inc for 64-bit increments, because it does not
2217 // set the carry flag!
2218 static unsigned const INCTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
2219 BuildMI(*MBB, IP, INCTab[Class], 1, DestReg).addReg(Op0r);
2223 static const unsigned OpcodeTab[][5] = {
2224 // Arithmetic operators
2225 { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri }, // ADD
2226 { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, X86::SUB32ri }, // SUB
2228 // Bitwise operators
2229 { X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, X86::AND32ri }, // AND
2230 { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri, 0, X86::OR32ri }, // OR
2231 { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, X86::XOR32ri }, // XOR
2234 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2235 unsigned Op1l = cast<ConstantInt>(Op1C)->getRawValue();
2237 if (Class != cLong) {
2238 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2242 // If this is a long value and the high or low bits have a special
2243 // property, emit some special cases.
2244 unsigned Op1h = cast<ConstantInt>(Op1C)->getRawValue() >> 32LL;
2246 // If the constant is zero in the low 32-bits, just copy the low part
2247 // across and apply the normal 32-bit operation to the high parts. There
2248 // will be no carry or borrow into the top.
2250 if (OperatorClass != 2) // All but and...
2251 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0r);
2253 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2254 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg+1)
2255 .addReg(Op0r+1).addImm(Op1h);
2259 // If this is a logical operation and the top 32-bits are zero, just
2260 // operate on the lower 32.
2261 if (Op1h == 0 && OperatorClass > 1) {
2262 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg)
2263 .addReg(Op0r).addImm(Op1l);
2264 if (OperatorClass != 2) // All but and
2265 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(Op0r+1);
2267 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2271 // TODO: We could handle lots of other special cases here, such as AND'ing
2272 // with 0xFFFFFFFF00000000 -> noop, etc.
2274 // Otherwise, code generate the full operation with a constant.
2275 static const unsigned TopTab[] = {
2276 X86::ADC32ri, X86::SBB32ri, X86::AND32ri, X86::OR32ri, X86::XOR32ri
2279 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2280 BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1)
2281 .addReg(Op0r+1).addImm(Op1h);
2285 // Finally, handle the general case now.
2286 static const unsigned OpcodeTab[][5] = {
2287 // Arithmetic operators
2288 { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, 0, X86::ADD32rr }, // ADD
2289 { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, 0, X86::SUB32rr }, // SUB
2291 // Bitwise operators
2292 { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, X86::AND32rr }, // AND
2293 { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0, X86:: OR32rr }, // OR
2294 { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, X86::XOR32rr }, // XOR
2297 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2298 unsigned Op0r = getReg(Op0, MBB, IP);
2299 unsigned Op1r = getReg(Op1, MBB, IP);
2300 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
2302 if (Class == cLong) { // Handle the upper 32 bits of long values...
2303 static const unsigned TopTab[] = {
2304 X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
2306 BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
2307 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
2311 /// doMultiply - Emit appropriate instructions to multiply together the
2312 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
2313 /// result should be given as DestTy.
2315 void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
2316 unsigned DestReg, const Type *DestTy,
2317 unsigned op0Reg, unsigned op1Reg) {
2318 unsigned Class = getClass(DestTy);
2322 BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
2323 .addReg(op0Reg).addReg(op1Reg);
2326 // Must use the MUL instruction, which forces use of AL...
2327 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
2328 BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
2329 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
2332 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
2336 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
2337 // returns zero when the input is not exactly a power of two.
2338 static unsigned ExactLog2(unsigned Val) {
2339 if (Val == 0 || (Val & (Val-1))) return 0;
2349 /// doMultiplyConst - This function is specialized to efficiently codegen an 8,
2350 /// 16, or 32-bit integer multiply by a constant.
2351 void ISel::doMultiplyConst(MachineBasicBlock *MBB,
2352 MachineBasicBlock::iterator IP,
2353 unsigned DestReg, const Type *DestTy,
2354 unsigned op0Reg, unsigned ConstRHS) {
2355 static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
2356 static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
2357 static const unsigned ADDrrTab[] = {X86::ADD8rr, X86::ADD16rr, X86::ADD32rr};
2359 unsigned Class = getClass(DestTy);
2361 // Handle special cases here.
2364 BuildMI(*MBB, IP, MOVriTab[Class], 1, DestReg).addImm(0);
2367 BuildMI(*MBB, IP, MOVrrTab[Class], 1, DestReg).addReg(op0Reg);
2370 BuildMI(*MBB, IP, ADDrrTab[Class], 1,DestReg).addReg(op0Reg).addReg(op0Reg);
2375 if (Class == cInt) {
2376 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg),
2377 op0Reg, ConstRHS-1, op0Reg, 0);
2382 // If the element size is exactly a power of 2, use a shift to get it.
2383 if (unsigned Shift = ExactLog2(ConstRHS)) {
2385 default: assert(0 && "Unknown class for this function!");
2387 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2390 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2393 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2398 if (Class == cShort) {
2399 BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2401 } else if (Class == cInt) {
2402 BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2406 // Most general case, emit a normal multiply...
2407 unsigned TmpReg = makeAnotherReg(DestTy);
2408 BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
2410 // Emit a MUL to multiply the register holding the index by
2411 // elementSize, putting the result in OffsetReg.
2412 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
2415 /// visitMul - Multiplies are not simple binary operators because they must deal
2416 /// with the EAX register explicitly.
2418 void ISel::visitMul(BinaryOperator &I) {
2419 unsigned ResultReg = getReg(I);
2421 Value *Op0 = I.getOperand(0);
2422 Value *Op1 = I.getOperand(1);
2424 // Fold loads into floating point multiplies.
2425 if (getClass(Op0->getType()) == cFP) {
2426 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
2427 if (!I.swapOperands())
2428 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
2429 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2430 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2431 const Type *Ty = Op0->getType();
2432 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2433 unsigned Opcode = Ty == Type::FloatTy ? X86::FMUL32m : X86::FMUL64m;
2435 unsigned Op0r = getReg(Op0);
2436 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2437 unsigned FI = getFixedSizedAllocaFI(AI);
2438 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
2440 unsigned BaseReg, Scale, IndexReg, Disp;
2441 getAddressingMode(LI->getOperand(0), BaseReg,
2442 Scale, IndexReg, Disp);
2444 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r),
2445 BaseReg, Scale, IndexReg, Disp);
2451 MachineBasicBlock::iterator IP = BB->end();
2452 emitMultiply(BB, IP, Op0, Op1, ResultReg);
2455 void ISel::emitMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
2456 Value *Op0, Value *Op1, unsigned DestReg) {
2457 MachineBasicBlock &BB = *MBB;
2458 TypeClass Class = getClass(Op0->getType());
2460 // Simple scalar multiply?
2461 unsigned Op0Reg = getReg(Op0, &BB, IP);
2466 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2467 unsigned Val = (unsigned)CI->getRawValue(); // Isn't a 64-bit constant
2468 doMultiplyConst(&BB, IP, DestReg, Op0->getType(), Op0Reg, Val);
2470 unsigned Op1Reg = getReg(Op1, &BB, IP);
2471 doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg);
2475 emitBinaryFPOperation(MBB, IP, Op0, Op1, 2, DestReg);
2481 // Long value. We have to do things the hard way...
2482 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2483 unsigned CLow = CI->getRawValue();
2484 unsigned CHi = CI->getRawValue() >> 32;
2487 // If the low part of the constant is all zeros, things are simple.
2488 BuildMI(BB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2489 doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi);
2493 // Multiply the two low parts... capturing carry into EDX
2494 unsigned OverflowReg = 0;
2496 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
2498 unsigned Op1RegL = makeAnotherReg(Type::UIntTy);
2499 OverflowReg = makeAnotherReg(Type::UIntTy);
2500 BuildMI(BB, IP, X86::MOV32ri, 1, Op1RegL).addImm(CLow);
2501 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2502 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1RegL); // AL*BL
2504 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2505 BuildMI(BB, IP, X86::MOV32rr, 1,
2506 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2509 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2510 doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow);
2512 unsigned AHBLplusOverflowReg;
2514 AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2515 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2516 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2518 AHBLplusOverflowReg = AHBLReg;
2522 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(AHBLplusOverflowReg);
2524 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2525 doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi);
2527 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2528 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2533 // General 64x64 multiply
2535 unsigned Op1Reg = getReg(Op1, &BB, IP);
2536 // Multiply the two low parts... capturing carry into EDX
2537 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2538 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
2540 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
2541 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2542 BuildMI(BB, IP, X86::MOV32rr, 1,
2543 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2545 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2546 BuildMI(BB, IP, X86::IMUL32rr, 2,
2547 AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
2549 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2550 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2551 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2553 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2554 BuildMI(BB, IP, X86::IMUL32rr, 2,
2555 ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
2557 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2558 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2562 /// visitDivRem - Handle division and remainder instructions... these
2563 /// instruction both require the same instructions to be generated, they just
2564 /// select the result from a different register. Note that both of these
2565 /// instructions work differently for signed and unsigned operands.
2567 void ISel::visitDivRem(BinaryOperator &I) {
2568 unsigned ResultReg = getReg(I);
2569 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2571 // Fold loads into floating point divides.
2572 if (getClass(Op0->getType()) == cFP) {
2573 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2574 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2575 const Type *Ty = Op0->getType();
2576 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2577 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIV32m : X86::FDIV64m;
2579 unsigned Op0r = getReg(Op0);
2580 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2581 unsigned FI = getFixedSizedAllocaFI(AI);
2582 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
2584 unsigned BaseReg, Scale, IndexReg, Disp;
2585 getAddressingMode(LI->getOperand(0), BaseReg,
2586 Scale, IndexReg, Disp);
2588 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r),
2589 BaseReg, Scale, IndexReg, Disp);
2594 if (LoadInst *LI = dyn_cast<LoadInst>(Op0))
2595 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2596 const Type *Ty = Op0->getType();
2597 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2598 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIVR32m : X86::FDIVR64m;
2600 unsigned Op1r = getReg(Op1);
2601 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2602 unsigned FI = getFixedSizedAllocaFI(AI);
2603 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), FI);
2605 unsigned BaseReg, Scale, IndexReg, Disp;
2606 getAddressingMode(LI->getOperand(0), BaseReg, Scale, IndexReg, Disp);
2607 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r),
2608 BaseReg, Scale, IndexReg, Disp);
2615 MachineBasicBlock::iterator IP = BB->end();
2616 emitDivRemOperation(BB, IP, Op0, Op1,
2617 I.getOpcode() == Instruction::Div, ResultReg);
2620 void ISel::emitDivRemOperation(MachineBasicBlock *BB,
2621 MachineBasicBlock::iterator IP,
2622 Value *Op0, Value *Op1, bool isDiv,
2623 unsigned ResultReg) {
2624 const Type *Ty = Op0->getType();
2625 unsigned Class = getClass(Ty);
2627 case cFP: // Floating point divide
2629 emitBinaryFPOperation(BB, IP, Op0, Op1, 3, ResultReg);
2631 } else { // Floating point remainder...
2632 unsigned Op0Reg = getReg(Op0, BB, IP);
2633 unsigned Op1Reg = getReg(Op1, BB, IP);
2634 MachineInstr *TheCall =
2635 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
2636 std::vector<ValueRecord> Args;
2637 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
2638 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
2639 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
2643 static const char *FnName[] =
2644 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
2645 unsigned Op0Reg = getReg(Op0, BB, IP);
2646 unsigned Op1Reg = getReg(Op1, BB, IP);
2647 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
2648 MachineInstr *TheCall =
2649 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
2651 std::vector<ValueRecord> Args;
2652 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
2653 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
2654 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
2657 case cByte: case cShort: case cInt:
2658 break; // Small integrals, handled below...
2659 default: assert(0 && "Unknown class!");
2662 static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
2663 static const unsigned NEGOpcode[] = { X86::NEG8r, X86::NEG16r, X86::NEG32r };
2664 static const unsigned SAROpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
2665 static const unsigned SHROpcode[]={ X86::SHR8ri, X86::SHR16ri, X86::SHR32ri };
2666 static const unsigned ADDOpcode[]={ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
2668 // Special case signed division by power of 2.
2670 if (ConstantSInt *CI = dyn_cast<ConstantSInt>(Op1)) {
2671 assert(Class != cLong && "This doesn't handle 64-bit divides!");
2672 int V = CI->getValue();
2674 if (V == 1) { // X /s 1 => X
2675 unsigned Op0Reg = getReg(Op0, BB, IP);
2676 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(Op0Reg);
2680 if (V == -1) { // X /s -1 => -X
2681 unsigned Op0Reg = getReg(Op0, BB, IP);
2682 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(Op0Reg);
2687 if (V < 0) { // Not a positive power of 2?
2689 isNeg = true; // Maybe it's a negative power of 2.
2691 if (unsigned Log = ExactLog2(V)) {
2693 unsigned Op0Reg = getReg(Op0, BB, IP);
2694 unsigned TmpReg = makeAnotherReg(Op0->getType());
2696 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg)
2697 .addReg(Op0Reg).addImm(Log-1);
2699 BuildMI(*BB, IP, MovOpcode[Class], 1, TmpReg).addReg(Op0Reg);
2700 unsigned TmpReg2 = makeAnotherReg(Op0->getType());
2701 BuildMI(*BB, IP, SHROpcode[Class], 2, TmpReg2)
2702 .addReg(TmpReg).addImm(32-Log);
2703 unsigned TmpReg3 = makeAnotherReg(Op0->getType());
2704 BuildMI(*BB, IP, ADDOpcode[Class], 2, TmpReg3)
2705 .addReg(Op0Reg).addReg(TmpReg2);
2707 unsigned TmpReg4 = isNeg ? makeAnotherReg(Op0->getType()) : ResultReg;
2708 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg4)
2709 .addReg(Op0Reg).addImm(Log);
2711 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg4);
2716 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
2717 static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
2718 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
2720 static const unsigned DivOpcode[][4] = {
2721 { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
2722 { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
2725 unsigned Reg = Regs[Class];
2726 unsigned ExtReg = ExtRegs[Class];
2728 // Put the first operand into one of the A registers...
2729 unsigned Op0Reg = getReg(Op0, BB, IP);
2730 unsigned Op1Reg = getReg(Op1, BB, IP);
2731 BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
2733 if (Ty->isSigned()) {
2734 // Emit a sign extension instruction...
2735 unsigned ShiftResult = makeAnotherReg(Op0->getType());
2736 BuildMI(*BB, IP, SAROpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
2737 BuildMI(*BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
2739 // Emit the appropriate divide or remainder instruction...
2740 BuildMI(*BB, IP, DivOpcode[1][Class], 1).addReg(Op1Reg);
2742 // If unsigned, emit a zeroing instruction... (reg = 0)
2743 BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
2745 // Emit the appropriate divide or remainder instruction...
2746 BuildMI(*BB, IP, DivOpcode[0][Class], 1).addReg(Op1Reg);
2749 // Figure out which register we want to pick the result out of...
2750 unsigned DestReg = isDiv ? Reg : ExtReg;
2752 // Put the result into the destination register...
2753 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
2757 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
2758 /// for constant immediate shift values, and for constant immediate
2759 /// shift values equal to 1. Even the general case is sort of special,
2760 /// because the shift amount has to be in CL, not just any old register.
2762 void ISel::visitShiftInst(ShiftInst &I) {
2763 MachineBasicBlock::iterator IP = BB->end ();
2764 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
2765 I.getOpcode () == Instruction::Shl, I.getType (),
2769 /// emitShiftOperation - Common code shared between visitShiftInst and
2770 /// constant expression support.
2771 void ISel::emitShiftOperation(MachineBasicBlock *MBB,
2772 MachineBasicBlock::iterator IP,
2773 Value *Op, Value *ShiftAmount, bool isLeftShift,
2774 const Type *ResultTy, unsigned DestReg) {
2775 unsigned SrcReg = getReg (Op, MBB, IP);
2776 bool isSigned = ResultTy->isSigned ();
2777 unsigned Class = getClass (ResultTy);
2779 static const unsigned ConstantOperand[][4] = {
2780 { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
2781 { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
2782 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
2783 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
2786 static const unsigned NonConstantOperand[][4] = {
2787 { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
2788 { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
2789 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
2790 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
2793 // Longs, as usual, are handled specially...
2794 if (Class == cLong) {
2795 // If we have a constant shift, we can generate much more efficient code
2796 // than otherwise...
2798 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
2799 unsigned Amount = CUI->getValue();
2801 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
2803 BuildMI(*MBB, IP, Opc[3], 3,
2804 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addImm(Amount);
2805 BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
2807 BuildMI(*MBB, IP, Opc[3], 3,
2808 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
2809 BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
2811 } else { // Shifting more than 32 bits
2815 BuildMI(*MBB, IP, X86::SHL32ri, 2,
2816 DestReg + 1).addReg(SrcReg).addImm(Amount);
2818 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
2820 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2823 BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
2824 DestReg).addReg(SrcReg+1).addImm(Amount);
2826 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
2828 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2832 unsigned TmpReg = makeAnotherReg(Type::IntTy);
2834 if (!isLeftShift && isSigned) {
2835 // If this is a SHR of a Long, then we need to do funny sign extension
2836 // stuff. TmpReg gets the value to use as the high-part if we are
2837 // shifting more than 32 bits.
2838 BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
2840 // Other shifts use a fixed zero value if the shift is more than 32
2842 BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
2845 // Initialize CL with the shift amount...
2846 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
2847 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2849 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
2850 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
2852 // TmpReg2 = shld inHi, inLo
2853 BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
2855 // TmpReg3 = shl inLo, CL
2856 BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
2858 // Set the flags to indicate whether the shift was by more than 32 bits.
2859 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2861 // DestHi = (>32) ? TmpReg3 : TmpReg2;
2862 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2863 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
2864 // DestLo = (>32) ? TmpReg : TmpReg3;
2865 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2866 DestReg).addReg(TmpReg3).addReg(TmpReg);
2868 // TmpReg2 = shrd inLo, inHi
2869 BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
2871 // TmpReg3 = s[ah]r inHi, CL
2872 BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
2875 // Set the flags to indicate whether the shift was by more than 32 bits.
2876 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2878 // DestLo = (>32) ? TmpReg3 : TmpReg2;
2879 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2880 DestReg).addReg(TmpReg2).addReg(TmpReg3);
2882 // DestHi = (>32) ? TmpReg : TmpReg3;
2883 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2884 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
2890 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
2891 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
2892 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
2894 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
2895 BuildMI(*MBB, IP, Opc[Class], 2,
2896 DestReg).addReg(SrcReg).addImm(CUI->getValue());
2897 } else { // The shift amount is non-constant.
2898 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
2899 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2901 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
2902 BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
2907 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
2908 /// instruction. The load and store instructions are the only place where we
2909 /// need to worry about the memory layout of the target machine.
2911 void ISel::visitLoadInst(LoadInst &I) {
2912 // Check to see if this load instruction is going to be folded into a binary
2913 // instruction, like add. If so, we don't want to emit it. Wouldn't a real
2914 // pattern matching instruction selector be nice?
2915 unsigned Class = getClassB(I.getType());
2916 if (I.hasOneUse()) {
2917 Instruction *User = cast<Instruction>(I.use_back());
2918 switch (User->getOpcode()) {
2919 case Instruction::Cast:
2920 // If this is a cast from a signed-integer type to a floating point type,
2921 // fold the cast here.
2922 if (getClassB(User->getType()) == cFP &&
2923 (I.getType() == Type::ShortTy || I.getType() == Type::IntTy ||
2924 I.getType() == Type::LongTy)) {
2925 unsigned DestReg = getReg(User);
2926 static const unsigned Opcode[] = {
2927 0/*BYTE*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m
2930 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
2931 unsigned FI = getFixedSizedAllocaFI(AI);
2932 addFrameReference(BuildMI(BB, Opcode[Class], 4, DestReg), FI);
2934 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
2935 getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
2936 addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg),
2937 BaseReg, Scale, IndexReg, Disp);
2945 case Instruction::Add:
2946 case Instruction::Sub:
2947 case Instruction::And:
2948 case Instruction::Or:
2949 case Instruction::Xor:
2950 if (Class == cLong) User = 0;
2952 case Instruction::Mul:
2953 case Instruction::Div:
2954 if (Class != cFP) User = 0;
2955 break; // Folding only implemented for floating point.
2956 default: User = 0; break;
2960 // Okay, we found a user. If the load is the first operand and there is
2961 // no second operand load, reverse the operand ordering. Note that this
2962 // can fail for a subtract (ie, no change will be made).
2963 if (!isa<LoadInst>(User->getOperand(1)))
2964 cast<BinaryOperator>(User)->swapOperands();
2966 // Okay, now that everything is set up, if this load is used by the second
2967 // operand, and if there are no instructions that invalidate the load
2968 // before the binary operator, eliminate the load.
2969 if (User->getOperand(1) == &I &&
2970 isSafeToFoldLoadIntoInstruction(I, *User))
2971 return; // Eliminate the load!
2973 // If this is a floating point sub or div, we won't be able to swap the
2974 // operands, but we will still be able to eliminate the load.
2975 if (Class == cFP && User->getOperand(0) == &I &&
2976 !isa<LoadInst>(User->getOperand(1)) &&
2977 (User->getOpcode() == Instruction::Sub ||
2978 User->getOpcode() == Instruction::Div) &&
2979 isSafeToFoldLoadIntoInstruction(I, *User))
2980 return; // Eliminate the load!
2984 static const unsigned Opcodes[] = {
2985 X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m, X86::MOV32rm
2987 unsigned Opcode = Opcodes[Class];
2988 if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
2990 unsigned DestReg = getReg(I);
2992 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
2993 unsigned FI = getFixedSizedAllocaFI(AI);
2994 if (Class == cLong) {
2995 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg), FI);
2996 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), FI, 4);
2998 addFrameReference(BuildMI(BB, Opcode, 4, DestReg), FI);
3001 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
3002 getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
3004 if (Class == cLong) {
3005 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
3006 BaseReg, Scale, IndexReg, Disp);
3007 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
3008 BaseReg, Scale, IndexReg, Disp+4);
3010 addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
3011 BaseReg, Scale, IndexReg, Disp);
3016 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
3019 void ISel::visitStoreInst(StoreInst &I) {
3020 unsigned BaseReg = ~0U, Scale = ~0U, IndexReg = ~0U, Disp = ~0U;
3021 unsigned AllocaFrameIdx = ~0U;
3023 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(1)))
3024 AllocaFrameIdx = getFixedSizedAllocaFI(AI);
3026 getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
3028 const Type *ValTy = I.getOperand(0)->getType();
3029 unsigned Class = getClassB(ValTy);
3031 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
3032 uint64_t Val = CI->getRawValue();
3033 if (Class == cLong) {
3034 if (AllocaFrameIdx != ~0U) {
3035 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3036 AllocaFrameIdx).addImm(Val & ~0U);
3037 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3038 AllocaFrameIdx, 4).addImm(Val>>32);
3040 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3041 BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
3042 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3043 BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
3046 static const unsigned Opcodes[] = {
3047 X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
3049 unsigned Opcode = Opcodes[Class];
3050 if (AllocaFrameIdx != ~0U)
3051 addFrameReference(BuildMI(BB, Opcode, 5), AllocaFrameIdx).addImm(Val);
3053 addFullAddress(BuildMI(BB, Opcode, 5),
3054 BaseReg, Scale, IndexReg, Disp).addImm(Val);
3056 } else if (isa<ConstantPointerNull>(I.getOperand(0))) {
3057 if (AllocaFrameIdx != ~0U)
3058 addFrameReference(BuildMI(BB, X86::MOV32mi, 5), AllocaFrameIdx).addImm(0);
3060 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3061 BaseReg, Scale, IndexReg, Disp).addImm(0);
3063 } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
3064 if (AllocaFrameIdx != ~0U)
3065 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
3066 AllocaFrameIdx).addImm(CB->getValue());
3068 addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
3069 BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
3070 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) {
3071 // Store constant FP values with integer instructions to avoid having to
3072 // load the constants from the constant pool then do a store.
3073 if (CFP->getType() == Type::FloatTy) {
3078 V.F = CFP->getValue();
3079 if (AllocaFrameIdx != ~0U)
3080 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3081 AllocaFrameIdx).addImm(V.I);
3083 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3084 BaseReg, Scale, IndexReg, Disp).addImm(V.I);
3090 V.F = CFP->getValue();
3091 if (AllocaFrameIdx != ~0U) {
3092 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3093 AllocaFrameIdx).addImm((unsigned)V.I);
3094 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3095 AllocaFrameIdx, 4).addImm(unsigned(V.I >> 32));
3097 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3098 BaseReg, Scale, IndexReg, Disp).addImm((unsigned)V.I);
3099 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3100 BaseReg, Scale, IndexReg, Disp+4).addImm(
3101 unsigned(V.I >> 32));
3105 } else if (Class == cLong) {
3106 unsigned ValReg = getReg(I.getOperand(0));
3107 if (AllocaFrameIdx != ~0U) {
3108 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
3109 AllocaFrameIdx).addReg(ValReg);
3110 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
3111 AllocaFrameIdx, 4).addReg(ValReg+1);
3113 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
3114 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
3115 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
3116 BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
3119 unsigned ValReg = getReg(I.getOperand(0));
3120 static const unsigned Opcodes[] = {
3121 X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
3123 unsigned Opcode = Opcodes[Class];
3124 if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
3126 if (AllocaFrameIdx != ~0U)
3127 addFrameReference(BuildMI(BB, Opcode, 5), AllocaFrameIdx).addReg(ValReg);
3129 addFullAddress(BuildMI(BB, Opcode, 1+4),
3130 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
3135 /// visitCastInst - Here we have various kinds of copying with or without sign
3136 /// extension going on.
3138 void ISel::visitCastInst(CastInst &CI) {
3139 Value *Op = CI.getOperand(0);
3141 unsigned SrcClass = getClassB(Op->getType());
3142 unsigned DestClass = getClassB(CI.getType());
3143 // Noop casts are not emitted: getReg will return the source operand as the
3144 // register to use for any uses of the noop cast.
3145 if (DestClass == SrcClass)
3148 // If this is a cast from a 32-bit integer to a Long type, and the only uses
3149 // of the case are GEP instructions, then the cast does not need to be
3150 // generated explicitly, it will be folded into the GEP.
3151 if (DestClass == cLong && SrcClass == cInt) {
3152 bool AllUsesAreGEPs = true;
3153 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
3154 if (!isa<GetElementPtrInst>(*I)) {
3155 AllUsesAreGEPs = false;
3159 // No need to codegen this cast if all users are getelementptr instrs...
3160 if (AllUsesAreGEPs) return;
3163 // If this cast converts a load from a short,int, or long integer to a FP
3164 // value, we will have folded this cast away.
3165 if (DestClass == cFP && isa<LoadInst>(Op) && Op->hasOneUse() &&
3166 (Op->getType() == Type::ShortTy || Op->getType() == Type::IntTy ||
3167 Op->getType() == Type::LongTy))
3171 unsigned DestReg = getReg(CI);
3172 MachineBasicBlock::iterator MI = BB->end();
3173 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
3176 /// emitCastOperation - Common code shared between visitCastInst and constant
3177 /// expression cast support.
3179 void ISel::emitCastOperation(MachineBasicBlock *BB,
3180 MachineBasicBlock::iterator IP,
3181 Value *Src, const Type *DestTy,
3183 const Type *SrcTy = Src->getType();
3184 unsigned SrcClass = getClassB(SrcTy);
3185 unsigned DestClass = getClassB(DestTy);
3186 unsigned SrcReg = getReg(Src, BB, IP);
3188 // Implement casts to bool by using compare on the operand followed by set if
3189 // not zero on the result.
3190 if (DestTy == Type::BoolTy) {
3193 BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
3196 BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
3199 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
3202 unsigned TmpReg = makeAnotherReg(Type::IntTy);
3203 BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
3207 BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
3208 BuildMI(*BB, IP, X86::FNSTSW8r, 0);
3209 BuildMI(*BB, IP, X86::SAHF, 1);
3213 // If the zero flag is not set, then the value is true, set the byte to
3215 BuildMI(*BB, IP, X86::SETNEr, 1, DestReg);
3219 static const unsigned RegRegMove[] = {
3220 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
3223 // Implement casts between values of the same type class (as determined by
3224 // getClass) by using a register-to-register move.
3225 if (SrcClass == DestClass) {
3226 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
3227 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
3228 } else if (SrcClass == cFP) {
3229 if (SrcTy == Type::FloatTy) { // double -> float
3230 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
3231 BuildMI(*BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
3232 } else { // float -> double
3233 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
3234 "Unknown cFP member!");
3235 // Truncate from double to float by storing to memory as short, then
3237 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
3238 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
3239 addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5), FrameIdx).addReg(SrcReg);
3240 addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
3242 } else if (SrcClass == cLong) {
3243 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
3244 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
3246 assert(0 && "Cannot handle this type of cast instruction!");
3252 // Handle cast of SMALLER int to LARGER int using a move with sign extension
3253 // or zero extension, depending on whether the source type was signed.
3254 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
3255 SrcClass < DestClass) {
3256 bool isLong = DestClass == cLong;
3257 if (isLong) DestClass = cInt;
3259 static const unsigned Opc[][4] = {
3260 { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
3261 { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
3264 bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy;
3265 BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
3266 DestReg).addReg(SrcReg);
3268 if (isLong) { // Handle upper 32 bits as appropriate...
3269 if (isUnsigned) // Zero out top bits...
3270 BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
3271 else // Sign extend bottom half...
3272 BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
3277 // Special case long -> int ...
3278 if (SrcClass == cLong && DestClass == cInt) {
3279 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
3283 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
3284 // move out of AX or AL.
3285 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
3286 && SrcClass > DestClass) {
3287 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
3288 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
3289 BuildMI(*BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
3293 // Handle casts from integer to floating point now...
3294 if (DestClass == cFP) {
3295 // Promote the integer to a type supported by FLD. We do this because there
3296 // are no unsigned FLD instructions, so we must promote an unsigned value to
3297 // a larger signed value, then use FLD on the larger value.
3299 const Type *PromoteType = 0;
3300 unsigned PromoteOpcode = 0;
3301 unsigned RealDestReg = DestReg;
3302 switch (SrcTy->getPrimitiveID()) {
3303 case Type::BoolTyID:
3304 case Type::SByteTyID:
3305 // We don't have the facilities for directly loading byte sized data from
3306 // memory (even signed). Promote it to 16 bits.
3307 PromoteType = Type::ShortTy;
3308 PromoteOpcode = X86::MOVSX16rr8;
3310 case Type::UByteTyID:
3311 PromoteType = Type::ShortTy;
3312 PromoteOpcode = X86::MOVZX16rr8;
3314 case Type::UShortTyID:
3315 PromoteType = Type::IntTy;
3316 PromoteOpcode = X86::MOVZX32rr16;
3318 case Type::UIntTyID: {
3319 // Make a 64 bit temporary... and zero out the top of it...
3320 unsigned TmpReg = makeAnotherReg(Type::LongTy);
3321 BuildMI(*BB, IP, X86::MOV32rr, 1, TmpReg).addReg(SrcReg);
3322 BuildMI(*BB, IP, X86::MOV32ri, 1, TmpReg+1).addImm(0);
3323 SrcTy = Type::LongTy;
3328 case Type::ULongTyID:
3329 // Don't fild into the read destination.
3330 DestReg = makeAnotherReg(Type::DoubleTy);
3332 default: // No promotion needed...
3337 unsigned TmpReg = makeAnotherReg(PromoteType);
3338 BuildMI(*BB, IP, PromoteOpcode, 1, TmpReg).addReg(SrcReg);
3339 SrcTy = PromoteType;
3340 SrcClass = getClass(PromoteType);
3344 // Spill the integer to memory and reload it from there...
3346 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
3348 if (SrcClass == cLong) {
3349 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3350 FrameIdx).addReg(SrcReg);
3351 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3352 FrameIdx, 4).addReg(SrcReg+1);
3354 static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
3355 addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
3356 FrameIdx).addReg(SrcReg);
3359 static const unsigned Op2[] =
3360 { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
3361 addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
3363 // We need special handling for unsigned 64-bit integer sources. If the
3364 // input number has the "sign bit" set, then we loaded it incorrectly as a
3365 // negative 64-bit number. In this case, add an offset value.
3366 if (SrcTy == Type::ULongTy) {
3367 // Emit a test instruction to see if the dynamic input value was signed.
3368 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
3370 // If the sign bit is set, get a pointer to an offset, otherwise get a
3371 // pointer to a zero.
3372 MachineConstantPool *CP = F->getConstantPool();
3373 unsigned Zero = makeAnotherReg(Type::IntTy);
3374 Constant *Null = Constant::getNullValue(Type::UIntTy);
3375 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
3376 CP->getConstantPoolIndex(Null));
3377 unsigned Offset = makeAnotherReg(Type::IntTy);
3378 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
3380 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
3381 CP->getConstantPoolIndex(OffsetCst));
3382 unsigned Addr = makeAnotherReg(Type::IntTy);
3383 BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
3385 // Load the constant for an add. FIXME: this could make an 'fadd' that
3386 // reads directly from memory, but we don't support these yet.
3387 unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
3388 addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
3390 BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
3391 .addReg(ConstReg).addReg(DestReg);
3397 // Handle casts from floating point to integer now...
3398 if (SrcClass == cFP) {
3399 // Change the floating point control register to use "round towards zero"
3400 // mode when truncating to an integer value.
3402 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
3403 addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
3405 // Load the old value of the high byte of the control word...
3406 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
3407 addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
3410 // Set the high part to be round to zero...
3411 addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
3412 CWFrameIdx, 1).addImm(12);
3414 // Reload the modified control word now...
3415 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3417 // Restore the memory image of control word to original value
3418 addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
3419 CWFrameIdx, 1).addReg(HighPartOfCW);
3421 // We don't have the facilities for directly storing byte sized data to
3422 // memory. Promote it to 16 bits. We also must promote unsigned values to
3423 // larger classes because we only have signed FP stores.
3424 unsigned StoreClass = DestClass;
3425 const Type *StoreTy = DestTy;
3426 if (StoreClass == cByte || DestTy->isUnsigned())
3427 switch (StoreClass) {
3428 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
3429 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
3430 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
3431 // The following treatment of cLong may not be perfectly right,
3432 // but it survives chains of casts of the form
3433 // double->ulong->double.
3434 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
3435 default: assert(0 && "Unknown store class!");
3438 // Spill the integer to memory and reload it from there...
3440 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
3442 static const unsigned Op1[] =
3443 { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
3444 addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
3445 FrameIdx).addReg(SrcReg);
3447 if (DestClass == cLong) {
3448 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
3449 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
3452 static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
3453 addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
3456 // Reload the original control word now...
3457 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3461 // Anything we haven't handled already, we can't (yet) handle at all.
3462 assert(0 && "Unhandled cast instruction!");
3466 /// visitVANextInst - Implement the va_next instruction...
3468 void ISel::visitVANextInst(VANextInst &I) {
3469 unsigned VAList = getReg(I.getOperand(0));
3470 unsigned DestReg = getReg(I);
3473 switch (I.getArgType()->getPrimitiveID()) {
3476 assert(0 && "Error: bad type for va_next instruction!");
3478 case Type::PointerTyID:
3479 case Type::UIntTyID:
3483 case Type::ULongTyID:
3484 case Type::LongTyID:
3485 case Type::DoubleTyID:
3490 // Increment the VAList pointer...
3491 BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
3494 void ISel::visitVAArgInst(VAArgInst &I) {
3495 unsigned VAList = getReg(I.getOperand(0));
3496 unsigned DestReg = getReg(I);
3498 switch (I.getType()->getPrimitiveID()) {
3501 assert(0 && "Error: bad type for va_next instruction!");
3503 case Type::PointerTyID:
3504 case Type::UIntTyID:
3506 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3508 case Type::ULongTyID:
3509 case Type::LongTyID:
3510 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3511 addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
3513 case Type::DoubleTyID:
3514 addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
3519 /// visitGetElementPtrInst - instruction-select GEP instructions
3521 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
3522 // If this GEP instruction will be folded into all of its users, we don't need
3523 // to explicitly calculate it!
3524 unsigned A, B, C, D;
3525 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
3526 // Check all of the users of the instruction to see if they are loads and
3528 bool AllWillFold = true;
3529 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI)
3530 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Load)
3531 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Store ||
3532 cast<Instruction>(*UI)->getOperand(0) == &I) {
3533 AllWillFold = false;
3537 // If the instruction is foldable, and will be folded into all users, don't
3539 if (AllWillFold) return;
3542 unsigned outputReg = getReg(I);
3543 emitGEPOperation(BB, BB->end(), I.getOperand(0),
3544 I.op_begin()+1, I.op_end(), outputReg);
3547 /// getGEPIndex - Inspect the getelementptr operands specified with GEPOps and
3548 /// GEPTypes (the derived types being stepped through at each level). On return
3549 /// from this function, if some indexes of the instruction are representable as
3550 /// an X86 lea instruction, the machine operands are put into the Ops
3551 /// instruction and the consumed indexes are poped from the GEPOps/GEPTypes
3552 /// lists. Otherwise, GEPOps.size() is returned. If this returns a an
3553 /// addressing mode that only partially consumes the input, the BaseReg input of
3554 /// the addressing mode must be left free.
3556 /// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
3558 void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
3559 std::vector<Value*> &GEPOps,
3560 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
3561 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
3562 const TargetData &TD = TM.getTargetData();
3564 // Clear out the state we are working with...
3565 BaseReg = 0; // No base register
3566 Scale = 1; // Unit scale
3567 IndexReg = 0; // No index register
3568 Disp = 0; // No displacement
3570 // While there are GEP indexes that can be folded into the current address,
3571 // keep processing them.
3572 while (!GEPTypes.empty()) {
3573 if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
3574 // It's a struct access. CUI is the index into the structure,
3575 // which names the field. This index must have unsigned type.
3576 const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
3578 // Use the TargetData structure to pick out what the layout of the
3579 // structure is in memory. Since the structure index must be constant, we
3580 // can get its value and use it to find the right byte offset from the
3581 // StructLayout class's list of structure member offsets.
3582 Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
3583 GEPOps.pop_back(); // Consume a GEP operand
3584 GEPTypes.pop_back();
3586 // It's an array or pointer access: [ArraySize x ElementType].
3587 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3588 Value *idx = GEPOps.back();
3590 // idx is the index into the array. Unlike with structure
3591 // indices, we may not know its actual value at code-generation
3594 // If idx is a constant, fold it into the offset.
3595 unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
3596 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
3597 Disp += TypeSize*CSI->getValue();
3598 } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
3599 Disp += TypeSize*CUI->getValue();
3601 // If the index reg is already taken, we can't handle this index.
3602 if (IndexReg) return;
3604 // If this is a size that we can handle, then add the index as
3606 case 1: case 2: case 4: case 8:
3607 // These are all acceptable scales on X86.
3611 // Otherwise, we can't handle this scale
3615 if (CastInst *CI = dyn_cast<CastInst>(idx))
3616 if (CI->getOperand(0)->getType() == Type::IntTy ||
3617 CI->getOperand(0)->getType() == Type::UIntTy)
3618 idx = CI->getOperand(0);
3620 IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
3623 GEPOps.pop_back(); // Consume a GEP operand
3624 GEPTypes.pop_back();
3628 // GEPTypes is empty, which means we have a single operand left. Set it as
3629 // the base register.
3631 assert(BaseReg == 0);
3633 #if 0 // FIXME: TODO!
3634 if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
3635 // FIXME: When we can add FrameIndex values as the first operand, we can
3636 // make GEP's of allocas MUCH more efficient!
3637 unsigned FI = getFixedSizedAllocaFI(AI);
3640 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
3641 // FIXME: When addressing modes are more powerful/correct, we could load
3642 // global addresses directly as 32-bit immediates.
3646 BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
3647 GEPOps.pop_back(); // Consume the last GEP operand
3651 /// isGEPFoldable - Return true if the specified GEP can be completely
3652 /// folded into the addressing mode of a load/store or lea instruction.
3653 bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
3654 Value *Src, User::op_iterator IdxBegin,
3655 User::op_iterator IdxEnd, unsigned &BaseReg,
3656 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
3657 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
3658 Src = CPR->getValue();
3660 std::vector<Value*> GEPOps;
3661 GEPOps.resize(IdxEnd-IdxBegin+1);
3663 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3665 std::vector<const Type*>
3666 GEPTypes(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3667 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3669 MachineBasicBlock::iterator IP;
3670 if (MBB) IP = MBB->end();
3671 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
3673 // We can fold it away iff the getGEPIndex call eliminated all operands.
3674 return GEPOps.empty();
3677 void ISel::emitGEPOperation(MachineBasicBlock *MBB,
3678 MachineBasicBlock::iterator IP,
3679 Value *Src, User::op_iterator IdxBegin,
3680 User::op_iterator IdxEnd, unsigned TargetReg) {
3681 const TargetData &TD = TM.getTargetData();
3682 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
3683 Src = CPR->getValue();
3685 std::vector<Value*> GEPOps;
3686 GEPOps.resize(IdxEnd-IdxBegin+1);
3688 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3690 std::vector<const Type*> GEPTypes;
3691 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3692 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3694 // Keep emitting instructions until we consume the entire GEP instruction.
3695 while (!GEPOps.empty()) {
3696 unsigned OldSize = GEPOps.size();
3697 unsigned BaseReg, Scale, IndexReg, Disp;
3698 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
3700 if (GEPOps.size() != OldSize) {
3701 // getGEPIndex consumed some of the input. Build an LEA instruction here.
3702 unsigned NextTarget = 0;
3703 if (!GEPOps.empty()) {
3704 assert(BaseReg == 0 &&
3705 "getGEPIndex should have left the base register open for chaining!");
3706 NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
3709 if (IndexReg == 0 && Disp == 0)
3710 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3712 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
3713 BaseReg, Scale, IndexReg, Disp);
3715 TargetReg = NextTarget;
3716 } else if (GEPTypes.empty()) {
3717 // The getGEPIndex operation didn't want to build an LEA. Check to see if
3718 // all operands are consumed but the base pointer. If so, just load it
3719 // into the register.
3720 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
3721 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
3723 unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
3724 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3726 break; // we are now done
3729 // It's an array or pointer access: [ArraySize x ElementType].
3730 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3731 Value *idx = GEPOps.back();
3732 GEPOps.pop_back(); // Consume a GEP operand
3733 GEPTypes.pop_back();
3735 // Many GEP instructions use a [cast (int/uint) to LongTy] as their
3736 // operand on X86. Handle this case directly now...
3737 if (CastInst *CI = dyn_cast<CastInst>(idx))
3738 if (CI->getOperand(0)->getType() == Type::IntTy ||
3739 CI->getOperand(0)->getType() == Type::UIntTy)
3740 idx = CI->getOperand(0);
3742 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
3743 // must find the size of the pointed-to type (Not coincidentally, the next
3744 // type is the type of the elements in the array).
3745 const Type *ElTy = SqTy->getElementType();
3746 unsigned elementSize = TD.getTypeSize(ElTy);
3748 // If idxReg is a constant, we don't need to perform the multiply!
3749 if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) {
3750 if (!CSI->isNullValue()) {
3751 unsigned Offset = elementSize*CSI->getRawValue();
3752 unsigned Reg = makeAnotherReg(Type::UIntTy);
3753 BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
3754 .addReg(Reg).addImm(Offset);
3755 --IP; // Insert the next instruction before this one.
3756 TargetReg = Reg; // Codegen the rest of the GEP into this
3758 } else if (elementSize == 1) {
3759 // If the element size is 1, we don't have to multiply, just add
3760 unsigned idxReg = getReg(idx, MBB, IP);
3761 unsigned Reg = makeAnotherReg(Type::UIntTy);
3762 BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
3763 --IP; // Insert the next instruction before this one.
3764 TargetReg = Reg; // Codegen the rest of the GEP into this
3766 unsigned idxReg = getReg(idx, MBB, IP);
3767 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
3769 // Make sure we can back the iterator up to point to the first
3770 // instruction emitted.
3771 MachineBasicBlock::iterator BeforeIt = IP;
3772 if (IP == MBB->begin())
3773 BeforeIt = MBB->end();
3776 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
3778 // Emit an ADD to add OffsetReg to the basePtr.
3779 unsigned Reg = makeAnotherReg(Type::UIntTy);
3780 BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
3781 .addReg(Reg).addReg(OffsetReg);
3783 // Step to the first instruction of the multiply.
3784 if (BeforeIt == MBB->end())
3789 TargetReg = Reg; // Codegen the rest of the GEP into this
3795 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
3796 /// frame manager, otherwise do it the hard way.
3798 void ISel::visitAllocaInst(AllocaInst &I) {
3799 // If this is a fixed size alloca in the entry block for the function, we
3800 // statically stack allocate the space, so we don't need to do anything here.
3802 if (dyn_castFixedAlloca(&I)) return;
3804 // Find the data size of the alloca inst's getAllocatedType.
3805 const Type *Ty = I.getAllocatedType();
3806 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
3808 // Create a register to hold the temporary result of multiplying the type size
3809 // constant by the variable amount.
3810 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
3811 unsigned SrcReg1 = getReg(I.getArraySize());
3813 // TotalSizeReg = mul <numelements>, <TypeSize>
3814 MachineBasicBlock::iterator MBBI = BB->end();
3815 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
3817 // AddedSize = add <TotalSizeReg>, 15
3818 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
3819 BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
3821 // AlignedSize = and <AddedSize>, ~15
3822 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
3823 BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
3825 // Subtract size from stack pointer, thereby allocating some space.
3826 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
3828 // Put a pointer to the space into the result register, by copying
3829 // the stack pointer.
3830 BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
3832 // Inform the Frame Information that we have just allocated a variable-sized
3834 F->getFrameInfo()->CreateVariableSizedObject();
3837 /// visitMallocInst - Malloc instructions are code generated into direct calls
3838 /// to the library malloc.
3840 void ISel::visitMallocInst(MallocInst &I) {
3841 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
3844 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
3845 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
3847 Arg = makeAnotherReg(Type::UIntTy);
3848 unsigned Op0Reg = getReg(I.getOperand(0));
3849 MachineBasicBlock::iterator MBBI = BB->end();
3850 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
3853 std::vector<ValueRecord> Args;
3854 Args.push_back(ValueRecord(Arg, Type::UIntTy));
3855 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3856 1).addExternalSymbol("malloc", true);
3857 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
3861 /// visitFreeInst - Free instructions are code gen'd to call the free libc
3864 void ISel::visitFreeInst(FreeInst &I) {
3865 std::vector<ValueRecord> Args;
3866 Args.push_back(ValueRecord(I.getOperand(0)));
3867 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3868 1).addExternalSymbol("free", true);
3869 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
3872 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
3873 /// into a machine code representation is a very simple peep-hole fashion. The
3874 /// generated code sucks but the implementation is nice and simple.
3876 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
3877 return new ISel(TM);