1 //===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicLowering.h"
22 #include "llvm/Pass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/SSARegMap.h"
27 #include "llvm/Target/MRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/GetElementPtrTypeIterator.h"
30 #include "llvm/Support/InstVisitor.h"
31 #include "llvm/Support/CFG.h"
32 #include "Support/Statistic.h"
37 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
41 struct ISel : public FunctionPass, InstVisitor<ISel> {
43 MachineFunction *F; // The function we are compiling into
44 MachineBasicBlock *BB; // The current MBB we are compiling
45 int VarArgsFrameIndex; // FrameIndex for start of varargs area
46 int ReturnAddressIndex; // FrameIndex for the return address
48 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
50 // MBBMap - Mapping between LLVM BB -> Machine BB
51 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
53 ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
55 /// runOnFunction - Top level implementation of instruction selection for
56 /// the entire function.
58 bool runOnFunction(Function &Fn) {
59 // First pass over the function, lower any unknown intrinsic functions
60 // with the IntrinsicLowering class.
61 LowerUnknownIntrinsicFunctionCalls(Fn);
63 F = &MachineFunction::construct(&Fn, TM);
65 // Create all of the machine basic blocks for the function...
66 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
67 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
71 // Set up a frame object for the return address. This is used by the
72 // llvm.returnaddress & llvm.frameaddress intrinisics.
73 ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
75 // Copy incoming arguments off of the stack...
76 LoadArgumentsToVirtualRegs(Fn);
78 // Instruction select everything except PHI nodes
81 // Select the PHI nodes
84 // Insert the FP_REG_KILL instructions into blocks that need them.
90 // We always build a machine code representation for the function
94 virtual const char *getPassName() const {
95 return "X86 Simple Instruction Selection";
98 /// visitBasicBlock - This method is called when we are visiting a new basic
99 /// block. This simply creates a new MachineBasicBlock to emit code into
100 /// and adds it to the current MachineFunction. Subsequent visit* for
101 /// instructions will be invoked for all instructions in the basic block.
103 void visitBasicBlock(BasicBlock &LLVM_BB) {
104 BB = MBBMap[&LLVM_BB];
107 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
108 /// function, lowering any calls to unknown intrinsic functions into the
109 /// equivalent LLVM code.
111 void LowerUnknownIntrinsicFunctionCalls(Function &F);
113 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
114 /// from the stack into virtual registers.
116 void LoadArgumentsToVirtualRegs(Function &F);
118 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
119 /// because we have to generate our sources into the source basic blocks,
120 /// not the current one.
122 void SelectPHINodes();
124 /// InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks
125 /// that need them. This only occurs due to the floating point stackifier
126 /// not being aggressive enough to handle arbitrary global stackification.
128 void InsertFPRegKills();
130 // Visitation methods for various instructions. These methods simply emit
131 // fixed X86 code for each instruction.
134 // Control flow operators
135 void visitReturnInst(ReturnInst &RI);
136 void visitBranchInst(BranchInst &BI);
142 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
143 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
145 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
146 const std::vector<ValueRecord> &Args);
147 void visitCallInst(CallInst &I);
148 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
150 // Arithmetic operators
151 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
152 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
153 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
154 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
155 unsigned DestReg, const Type *DestTy,
156 unsigned Op0Reg, unsigned Op1Reg);
157 void doMultiplyConst(MachineBasicBlock *MBB,
158 MachineBasicBlock::iterator MBBI,
159 unsigned DestReg, const Type *DestTy,
160 unsigned Op0Reg, unsigned Op1Val);
161 void visitMul(BinaryOperator &B);
163 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
164 void visitRem(BinaryOperator &B) { visitDivRem(B); }
165 void visitDivRem(BinaryOperator &B);
168 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
169 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
170 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
172 // Comparison operators...
173 void visitSetCondInst(SetCondInst &I);
174 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
175 MachineBasicBlock *MBB,
176 MachineBasicBlock::iterator MBBI);
177 void visitSelectInst(SelectInst &SI);
180 // Memory Instructions
181 void visitLoadInst(LoadInst &I);
182 void visitStoreInst(StoreInst &I);
183 void visitGetElementPtrInst(GetElementPtrInst &I);
184 void visitAllocaInst(AllocaInst &I);
185 void visitMallocInst(MallocInst &I);
186 void visitFreeInst(FreeInst &I);
189 void visitShiftInst(ShiftInst &I);
190 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
191 void visitCastInst(CastInst &I);
192 void visitVANextInst(VANextInst &I);
193 void visitVAArgInst(VAArgInst &I);
195 void visitInstruction(Instruction &I) {
196 std::cerr << "Cannot instruction select: " << I;
200 /// promote32 - Make a value 32-bits wide, and put it somewhere.
202 void promote32(unsigned targetReg, const ValueRecord &VR);
204 /// getAddressingMode - Get the addressing mode to use to address the
205 /// specified value. The returned value should be used with addFullAddress.
206 void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
207 unsigned &IndexReg, unsigned &Disp);
210 /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
212 void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
213 std::vector<Value*> &GEPOps,
214 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
215 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
217 /// isGEPFoldable - Return true if the specified GEP can be completely
218 /// folded into the addressing mode of a load/store or lea instruction.
219 bool isGEPFoldable(MachineBasicBlock *MBB,
220 Value *Src, User::op_iterator IdxBegin,
221 User::op_iterator IdxEnd, unsigned &BaseReg,
222 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
224 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
225 /// constant expression GEP support.
227 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
228 Value *Src, User::op_iterator IdxBegin,
229 User::op_iterator IdxEnd, unsigned TargetReg);
231 /// emitCastOperation - Common code shared between visitCastInst and
232 /// constant expression cast support.
234 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
235 Value *Src, const Type *DestTy, unsigned TargetReg);
237 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
238 /// and constant expression support.
240 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
241 MachineBasicBlock::iterator IP,
242 Value *Op0, Value *Op1,
243 unsigned OperatorClass, unsigned TargetReg);
245 void emitDivRemOperation(MachineBasicBlock *BB,
246 MachineBasicBlock::iterator IP,
247 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
248 const Type *Ty, unsigned TargetReg);
250 /// emitSetCCOperation - Common code shared between visitSetCondInst and
251 /// constant expression support.
253 void emitSetCCOperation(MachineBasicBlock *BB,
254 MachineBasicBlock::iterator IP,
255 Value *Op0, Value *Op1, unsigned Opcode,
258 /// emitShiftOperation - Common code shared between visitShiftInst and
259 /// constant expression support.
261 void emitShiftOperation(MachineBasicBlock *MBB,
262 MachineBasicBlock::iterator IP,
263 Value *Op, Value *ShiftAmount, bool isLeftShift,
264 const Type *ResultTy, unsigned DestReg);
266 /// emitSelectOperation - Common code shared between visitSelectInst and the
267 /// constant expression support.
268 void emitSelectOperation(MachineBasicBlock *MBB,
269 MachineBasicBlock::iterator IP,
270 Value *Cond, Value *TrueVal, Value *FalseVal,
273 /// copyConstantToRegister - Output the instructions required to put the
274 /// specified constant into the specified register.
276 void copyConstantToRegister(MachineBasicBlock *MBB,
277 MachineBasicBlock::iterator MBBI,
278 Constant *C, unsigned Reg);
280 /// makeAnotherReg - This method returns the next register number we haven't
283 /// Long values are handled somewhat specially. They are always allocated
284 /// as pairs of 32 bit integer values. The register number returned is the
285 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
286 /// of the long value.
288 unsigned makeAnotherReg(const Type *Ty) {
289 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
290 "Current target doesn't have X86 reg info??");
291 const X86RegisterInfo *MRI =
292 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
293 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
294 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
295 // Create the lower part
296 F->getSSARegMap()->createVirtualRegister(RC);
297 // Create the upper part.
298 return F->getSSARegMap()->createVirtualRegister(RC)-1;
301 // Add the mapping of regnumber => reg class to MachineFunction
302 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
303 return F->getSSARegMap()->createVirtualRegister(RC);
306 /// getReg - This method turns an LLVM value into a register number. This
307 /// is guaranteed to produce the same register number for a particular value
308 /// every time it is queried.
310 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
311 unsigned getReg(Value *V) {
312 // Just append to the end of the current bb.
313 MachineBasicBlock::iterator It = BB->end();
314 return getReg(V, BB, It);
316 unsigned getReg(Value *V, MachineBasicBlock *MBB,
317 MachineBasicBlock::iterator IPt) {
318 unsigned &Reg = RegMap[V];
320 Reg = makeAnotherReg(V->getType());
324 // If this operand is a constant, emit the code to copy the constant into
325 // the register here...
327 if (Constant *C = dyn_cast<Constant>(V)) {
328 copyConstantToRegister(MBB, IPt, C, Reg);
329 RegMap.erase(V); // Assign a new name to this constant if ref'd again
330 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
331 // Move the address of the global into the register
332 BuildMI(*MBB, IPt, X86::MOV32ri, 1, Reg).addGlobalAddress(GV);
333 RegMap.erase(V); // Assign a new name to this address if ref'd again
341 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
345 cByte, cShort, cInt, cFP, cLong
348 /// getClass - Turn a primitive type into a "class" number which is based on the
349 /// size of the type, and whether or not it is floating point.
351 static inline TypeClass getClass(const Type *Ty) {
352 switch (Ty->getPrimitiveID()) {
353 case Type::SByteTyID:
354 case Type::UByteTyID: return cByte; // Byte operands are class #0
355 case Type::ShortTyID:
356 case Type::UShortTyID: return cShort; // Short operands are class #1
359 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
361 case Type::FloatTyID:
362 case Type::DoubleTyID: return cFP; // Floating Point is #3
365 case Type::ULongTyID: return cLong; // Longs are class #4
367 assert(0 && "Invalid type to getClass!");
368 return cByte; // not reached
372 // getClassB - Just like getClass, but treat boolean values as bytes.
373 static inline TypeClass getClassB(const Type *Ty) {
374 if (Ty == Type::BoolTy) return cByte;
379 /// copyConstantToRegister - Output the instructions required to put the
380 /// specified constant into the specified register.
382 void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
383 MachineBasicBlock::iterator IP,
384 Constant *C, unsigned R) {
385 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
387 switch (CE->getOpcode()) {
388 case Instruction::GetElementPtr:
389 emitGEPOperation(MBB, IP, CE->getOperand(0),
390 CE->op_begin()+1, CE->op_end(), R);
392 case Instruction::Cast:
393 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
396 case Instruction::Xor: ++Class; // FALL THROUGH
397 case Instruction::Or: ++Class; // FALL THROUGH
398 case Instruction::And: ++Class; // FALL THROUGH
399 case Instruction::Sub: ++Class; // FALL THROUGH
400 case Instruction::Add:
401 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
405 case Instruction::Mul: {
406 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
407 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
408 doMultiply(MBB, IP, R, CE->getType(), Op0Reg, Op1Reg);
411 case Instruction::Div:
412 case Instruction::Rem: {
413 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
414 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
415 emitDivRemOperation(MBB, IP, Op0Reg, Op1Reg,
416 CE->getOpcode() == Instruction::Div,
421 case Instruction::SetNE:
422 case Instruction::SetEQ:
423 case Instruction::SetLT:
424 case Instruction::SetGT:
425 case Instruction::SetLE:
426 case Instruction::SetGE:
427 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
431 case Instruction::Shl:
432 case Instruction::Shr:
433 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
434 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
437 case Instruction::Select:
438 emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
439 CE->getOperand(2), R);
443 std::cerr << "Offending expr: " << C << "\n";
444 assert(0 && "Constant expression not yet handled!\n");
448 if (C->getType()->isIntegral()) {
449 unsigned Class = getClassB(C->getType());
451 if (Class == cLong) {
452 // Copy the value into the register pair.
453 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
454 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
455 BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
459 assert(Class <= cInt && "Type not handled yet!");
461 static const unsigned IntegralOpcodeTab[] = {
462 X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
465 if (C->getType() == Type::BoolTy) {
466 BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
468 ConstantInt *CI = cast<ConstantInt>(C);
469 BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
471 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
472 if (CFP->isExactlyValue(+0.0))
473 BuildMI(*MBB, IP, X86::FLD0, 0, R);
474 else if (CFP->isExactlyValue(+1.0))
475 BuildMI(*MBB, IP, X86::FLD1, 0, R);
477 // Otherwise we need to spill the constant to memory...
478 MachineConstantPool *CP = F->getConstantPool();
479 unsigned CPI = CP->getConstantPoolIndex(CFP);
480 const Type *Ty = CFP->getType();
482 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
483 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
484 addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
487 } else if (isa<ConstantPointerNull>(C)) {
488 // Copy zero (null pointer) to the register.
489 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
490 } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
491 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(CPR->getValue());
493 std::cerr << "Offending constant: " << C << "\n";
494 assert(0 && "Type not handled yet!");
498 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
499 /// the stack into virtual registers.
501 void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
502 // Emit instructions to load the arguments... On entry to a function on the
503 // X86, the stack frame looks like this:
505 // [ESP] -- return address
506 // [ESP + 4] -- first argument (leftmost lexically)
507 // [ESP + 8] -- second argument, if first argument is four bytes in size
510 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
511 MachineFrameInfo *MFI = F->getFrameInfo();
513 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
514 unsigned Reg = getReg(*I);
516 int FI; // Frame object index
517 switch (getClassB(I->getType())) {
519 FI = MFI->CreateFixedObject(1, ArgOffset);
520 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
523 FI = MFI->CreateFixedObject(2, ArgOffset);
524 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
527 FI = MFI->CreateFixedObject(4, ArgOffset);
528 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
531 FI = MFI->CreateFixedObject(8, ArgOffset);
532 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
533 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
534 ArgOffset += 4; // longs require 4 additional bytes
538 if (I->getType() == Type::FloatTy) {
539 Opcode = X86::FLD32m;
540 FI = MFI->CreateFixedObject(4, ArgOffset);
542 Opcode = X86::FLD64m;
543 FI = MFI->CreateFixedObject(8, ArgOffset);
544 ArgOffset += 4; // doubles require 4 additional bytes
546 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
549 assert(0 && "Unhandled argument type!");
551 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
554 // If the function takes variable number of arguments, add a frame offset for
555 // the start of the first vararg value... this is used to expand
557 if (Fn.getFunctionType()->isVarArg())
558 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
562 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
563 /// because we have to generate our sources into the source basic blocks, not
566 void ISel::SelectPHINodes() {
567 const TargetInstrInfo &TII = TM.getInstrInfo();
568 const Function &LF = *F->getFunction(); // The LLVM function...
569 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
570 const BasicBlock *BB = I;
571 MachineBasicBlock &MBB = *MBBMap[I];
573 // Loop over all of the PHI nodes in the LLVM basic block...
574 MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
575 for (BasicBlock::const_iterator I = BB->begin();
576 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
578 // Create a new machine instr PHI node, and insert it.
579 unsigned PHIReg = getReg(*PN);
580 MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint,
581 X86::PHI, PN->getNumOperands(), PHIReg);
583 MachineInstr *LongPhiMI = 0;
584 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy)
585 LongPhiMI = BuildMI(MBB, PHIInsertPoint,
586 X86::PHI, PN->getNumOperands(), PHIReg+1);
588 // PHIValues - Map of blocks to incoming virtual registers. We use this
589 // so that we only initialize one incoming value for a particular block,
590 // even if the block has multiple entries in the PHI node.
592 std::map<MachineBasicBlock*, unsigned> PHIValues;
594 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
595 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
597 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
598 PHIValues.lower_bound(PredMBB);
600 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
601 // We already inserted an initialization of the register for this
602 // predecessor. Recycle it.
603 ValReg = EntryIt->second;
606 // Get the incoming value into a virtual register.
608 Value *Val = PN->getIncomingValue(i);
610 // If this is a constant or GlobalValue, we may have to insert code
611 // into the basic block to compute it into a virtual register.
612 if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
613 if (isa<ConstantExpr>(Val)) {
614 // Because we don't want to clobber any values which might be in
615 // physical registers with the computation of this constant (which
616 // might be arbitrarily complex if it is a constant expression),
617 // just insert the computation at the top of the basic block.
618 MachineBasicBlock::iterator PI = PredMBB->begin();
620 // Skip over any PHI nodes though!
621 while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
624 ValReg = getReg(Val, PredMBB, PI);
626 // Simple constants get emitted at the end of the basic block,
627 // before any terminator instructions. We "know" that the code to
628 // move a constant into a register will never clobber any flags.
629 ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
632 ValReg = getReg(Val);
635 // Remember that we inserted a value for this PHI for this predecessor
636 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
639 PhiMI->addRegOperand(ValReg);
640 PhiMI->addMachineBasicBlockOperand(PredMBB);
642 LongPhiMI->addRegOperand(ValReg+1);
643 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
647 // Now that we emitted all of the incoming values for the PHI node, make
648 // sure to reposition the InsertPoint after the PHI that we just added.
649 // This is needed because we might have inserted a constant into this
650 // block, right after the PHI's which is before the old insert point!
651 PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI;
657 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
658 /// compensation code on critical edges. As such, it requires that we kill all
659 /// FP registers on the exit from any blocks that either ARE critical edges, or
660 /// branch to a block that has incoming critical edges.
662 /// Note that this kill instruction will eventually be eliminated when
663 /// restrictions in the stackifier are relaxed.
665 static bool RequiresFPRegKill(const BasicBlock *BB) {
667 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
668 const BasicBlock *Succ = *SI;
669 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
670 ++PI; // Block have at least one predecessory
671 if (PI != PE) { // If it has exactly one, this isn't crit edge
672 // If this block has more than one predecessor, check all of the
673 // predecessors to see if they have multiple successors. If so, then the
674 // block we are analyzing needs an FPRegKill.
675 for (PI = pred_begin(Succ); PI != PE; ++PI) {
676 const BasicBlock *Pred = *PI;
677 succ_const_iterator SI2 = succ_begin(Pred);
678 ++SI2; // There must be at least one successor of this block.
679 if (SI2 != succ_end(Pred))
680 return true; // Yes, we must insert the kill on this edge.
684 // If we got this far, there is no need to insert the kill instruction.
691 // InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks that
692 // need them. This only occurs due to the floating point stackifier not being
693 // aggressive enough to handle arbitrary global stackification.
695 // Currently we insert an FP_REG_KILL instruction into each block that uses or
696 // defines a floating point virtual register.
698 // When the global register allocators (like linear scan) finally update live
699 // variable analysis, we can keep floating point values in registers across
700 // portions of the CFG that do not involve critical edges. This will be a big
701 // win, but we are waiting on the global allocators before we can do this.
703 // With a bit of work, the floating point stackifier pass can be enhanced to
704 // break critical edges as needed (to make a place to put compensation code),
705 // but this will require some infrastructure improvements as well.
707 void ISel::InsertFPRegKills() {
708 SSARegMap &RegMap = *F->getSSARegMap();
710 for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
711 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
712 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
713 MachineOperand& MO = I->getOperand(i);
714 if (MO.isRegister() && MO.getReg()) {
715 unsigned Reg = MO.getReg();
716 if (MRegisterInfo::isVirtualRegister(Reg))
717 if (RegMap.getRegClass(Reg)->getSize() == 10)
721 // If we haven't found an FP register use or def in this basic block, check
722 // to see if any of our successors has an FP PHI node, which will cause a
723 // copy to be inserted into this block.
724 for (succ_const_iterator SI = succ_begin(BB->getBasicBlock()),
725 E = succ_end(BB->getBasicBlock()); SI != E; ++SI) {
726 MachineBasicBlock *SBB = MBBMap[*SI];
727 for (MachineBasicBlock::iterator I = SBB->begin();
728 I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
729 if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
735 // Okay, this block uses an FP register. If the block has successors (ie,
736 // it's not an unwind/return), insert the FP_REG_KILL instruction.
737 if (BB->getBasicBlock()->getTerminator()->getNumSuccessors() &&
738 RequiresFPRegKill(BB->getBasicBlock())) {
739 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
746 // canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
747 // it into the conditional branch or select instruction which is the only user
748 // of the cc instruction. This is the case if the conditional branch is the
749 // only user of the setcc, and if the setcc is in the same basic block as the
750 // conditional branch. We also don't handle long arguments below, so we reject
751 // them here as well.
753 static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
754 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
755 if (SCI->hasOneUse()) {
756 Instruction *User = cast<Instruction>(SCI->use_back());
757 if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
758 SCI->getParent() == User->getParent() &&
759 (getClassB(SCI->getOperand(0)->getType()) != cLong ||
760 SCI->getOpcode() == Instruction::SetEQ ||
761 SCI->getOpcode() == Instruction::SetNE))
767 // Return a fixed numbering for setcc instructions which does not depend on the
768 // order of the opcodes.
770 static unsigned getSetCCNumber(unsigned Opcode) {
772 default: assert(0 && "Unknown setcc instruction!");
773 case Instruction::SetEQ: return 0;
774 case Instruction::SetNE: return 1;
775 case Instruction::SetLT: return 2;
776 case Instruction::SetGE: return 3;
777 case Instruction::SetGT: return 4;
778 case Instruction::SetLE: return 5;
782 // LLVM -> X86 signed X86 unsigned
783 // ----- ---------- ------------
784 // seteq -> sete sete
785 // setne -> setne setne
786 // setlt -> setl setb
787 // setge -> setge setae
788 // setgt -> setg seta
789 // setle -> setle setbe
791 // sets // Used by comparison with 0 optimization
793 static const unsigned SetCCOpcodeTab[2][8] = {
794 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
796 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
797 X86::SETSr, X86::SETNSr },
800 // EmitComparison - This function emits a comparison of the two operands,
801 // returning the extended setcc code to use.
802 unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
803 MachineBasicBlock *MBB,
804 MachineBasicBlock::iterator IP) {
805 // The arguments are already supposed to be of the same type.
806 const Type *CompTy = Op0->getType();
807 unsigned Class = getClassB(CompTy);
808 unsigned Op0r = getReg(Op0, MBB, IP);
810 // Special case handling of: cmp R, i
811 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
812 if (Class == cByte || Class == cShort || Class == cInt) {
813 unsigned Op1v = CI->getRawValue();
815 // Mask off any upper bits of the constant, if there are any...
816 Op1v &= (1ULL << (8 << Class)) - 1;
818 // If this is a comparison against zero, emit more efficient code. We
819 // can't handle unsigned comparisons against zero unless they are == or
820 // !=. These should have been strength reduced already anyway.
821 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
822 static const unsigned TESTTab[] = {
823 X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
825 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
827 if (OpNum == 2) return 6; // Map jl -> js
828 if (OpNum == 3) return 7; // Map jg -> jns
832 static const unsigned CMPTab[] = {
833 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
836 BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
839 assert(Class == cLong && "Unknown integer class!");
840 unsigned LowCst = CI->getRawValue();
841 unsigned HiCst = CI->getRawValue() >> 32;
842 if (OpNum < 2) { // seteq, setne
843 unsigned LoTmp = Op0r;
845 LoTmp = makeAnotherReg(Type::IntTy);
846 BuildMI(*MBB, IP, X86::XOR32ri, 2, LoTmp).addReg(Op0r).addImm(LowCst);
848 unsigned HiTmp = Op0r+1;
850 HiTmp = makeAnotherReg(Type::IntTy);
851 BuildMI(*MBB, IP, X86::XOR32ri, 2,HiTmp).addReg(Op0r+1).addImm(HiCst);
853 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
854 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
857 // Emit a sequence of code which compares the high and low parts once
858 // each, then uses a conditional move to handle the overflow case. For
859 // example, a setlt for long would generate code like this:
861 // AL = lo(op1) < lo(op2) // Signedness depends on operands
862 // BL = hi(op1) < hi(op2) // Always unsigned comparison
863 // dest = hi(op1) == hi(op2) ? AL : BL;
866 // FIXME: This would be much better if we had hierarchical register
867 // classes! Until then, hardcode registers so that we can deal with
868 // their aliases (because we don't have conditional byte moves).
870 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(LowCst);
871 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
872 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r+1).addImm(HiCst);
873 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0,X86::BL);
874 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
875 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
876 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
878 // NOTE: visitSetCondInst knows that the value is dumped into the BL
879 // register at this point for long values...
885 // Special case handling of comparison against +/- 0.0
886 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
887 if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
888 BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
889 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
890 BuildMI(*MBB, IP, X86::SAHF, 1);
894 unsigned Op1r = getReg(Op1, MBB, IP);
896 default: assert(0 && "Unknown type class!");
897 // Emit: cmp <var1>, <var2> (do the comparison). We can
898 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
901 BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
904 BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
907 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
910 BuildMI(*MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
911 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
912 BuildMI(*MBB, IP, X86::SAHF, 1);
916 if (OpNum < 2) { // seteq, setne
917 unsigned LoTmp = makeAnotherReg(Type::IntTy);
918 unsigned HiTmp = makeAnotherReg(Type::IntTy);
919 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
920 BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
921 BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
922 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
923 break; // Allow the sete or setne to be generated from flags set by OR
925 // Emit a sequence of code which compares the high and low parts once
926 // each, then uses a conditional move to handle the overflow case. For
927 // example, a setlt for long would generate code like this:
929 // AL = lo(op1) < lo(op2) // Signedness depends on operands
930 // BL = hi(op1) < hi(op2) // Always unsigned comparison
931 // dest = hi(op1) == hi(op2) ? AL : BL;
934 // FIXME: This would be much better if we had hierarchical register
935 // classes! Until then, hardcode registers so that we can deal with their
936 // aliases (because we don't have conditional byte moves).
938 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
939 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
940 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
941 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
942 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
943 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
944 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
946 // NOTE: visitSetCondInst knows that the value is dumped into the BL
947 // register at this point for long values...
954 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
955 /// register, then move it to wherever the result should be.
957 void ISel::visitSetCondInst(SetCondInst &I) {
958 if (canFoldSetCCIntoBranchOrSelect(&I))
959 return; // Fold this into a branch or select.
961 unsigned DestReg = getReg(I);
962 MachineBasicBlock::iterator MII = BB->end();
963 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
967 /// emitSetCCOperation - Common code shared between visitSetCondInst and
968 /// constant expression support.
970 void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
971 MachineBasicBlock::iterator IP,
972 Value *Op0, Value *Op1, unsigned Opcode,
973 unsigned TargetReg) {
974 unsigned OpNum = getSetCCNumber(Opcode);
975 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
977 const Type *CompTy = Op0->getType();
978 unsigned CompClass = getClassB(CompTy);
979 bool isSigned = CompTy->isSigned() && CompClass != cFP;
981 if (CompClass != cLong || OpNum < 2) {
982 // Handle normal comparisons with a setcc instruction...
983 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
985 // Handle long comparisons by copying the value which is already in BL into
986 // the register we want...
987 BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
991 void ISel::visitSelectInst(SelectInst &SI) {
992 unsigned DestReg = getReg(SI);
993 MachineBasicBlock::iterator MII = BB->end();
994 emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
995 SI.getFalseValue(), DestReg);
998 /// emitSelect - Common code shared between visitSelectInst and the constant
999 /// expression support.
1000 void ISel::emitSelectOperation(MachineBasicBlock *MBB,
1001 MachineBasicBlock::iterator IP,
1002 Value *Cond, Value *TrueVal, Value *FalseVal,
1004 unsigned SelectClass = getClassB(TrueVal->getType());
1006 // We don't support 8-bit conditional moves. If we have incoming constants,
1007 // transform them into 16-bit constants to avoid having a run-time conversion.
1008 if (SelectClass == cByte) {
1009 if (Constant *T = dyn_cast<Constant>(TrueVal))
1010 TrueVal = ConstantExpr::getCast(T, Type::ShortTy);
1011 if (Constant *F = dyn_cast<Constant>(FalseVal))
1012 FalseVal = ConstantExpr::getCast(F, Type::ShortTy);
1017 if (SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(Cond)) {
1018 // We successfully folded the setcc into the select instruction.
1020 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1021 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), MBB,
1024 const Type *CompTy = SCI->getOperand(0)->getType();
1025 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1027 // LLVM -> X86 signed X86 unsigned
1028 // ----- ---------- ------------
1029 // seteq -> cmovNE cmovNE
1030 // setne -> cmovE cmovE
1031 // setlt -> cmovGE cmovAE
1032 // setge -> cmovL cmovB
1033 // setgt -> cmovLE cmovBE
1034 // setle -> cmovG cmovA
1036 // cmovNS // Used by comparison with 0 optimization
1039 switch (SelectClass) {
1040 default: assert(0 && "Unknown value class!");
1042 // Annoyingly, we don't have a full set of floating point conditional
1044 static const unsigned OpcodeTab[2][8] = {
1045 { X86::FCMOVNE, X86::FCMOVE, X86::FCMOVAE, X86::FCMOVB,
1046 X86::FCMOVBE, X86::FCMOVA, 0, 0 },
1047 { X86::FCMOVNE, X86::FCMOVE, 0, 0, 0, 0, 0, 0 },
1049 Opcode = OpcodeTab[isSigned][OpNum];
1051 // If opcode == 0, we hit a case that we don't support. Output a setcc
1052 // and compare the result against zero.
1054 unsigned CompClass = getClassB(CompTy);
1056 if (CompClass != cLong || OpNum < 2) {
1057 CondReg = makeAnotherReg(Type::BoolTy);
1058 // Handle normal comparisons with a setcc instruction...
1059 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, CondReg);
1061 // Long comparisons end up in the BL register.
1065 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1066 Opcode = X86::FCMOVE;
1072 static const unsigned OpcodeTab[2][8] = {
1073 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVAE16rr, X86::CMOVB16rr,
1074 X86::CMOVBE16rr, X86::CMOVA16rr, 0, 0 },
1075 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVGE16rr, X86::CMOVL16rr,
1076 X86::CMOVLE16rr, X86::CMOVG16rr, X86::CMOVNS16rr, X86::CMOVS16rr },
1078 Opcode = OpcodeTab[isSigned][OpNum];
1083 static const unsigned OpcodeTab[2][8] = {
1084 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVAE32rr, X86::CMOVB32rr,
1085 X86::CMOVBE32rr, X86::CMOVA32rr, 0, 0 },
1086 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVGE32rr, X86::CMOVL32rr,
1087 X86::CMOVLE32rr, X86::CMOVG32rr, X86::CMOVNS32rr, X86::CMOVS32rr },
1089 Opcode = OpcodeTab[isSigned][OpNum];
1094 // Get the value being branched on, and use it to set the condition codes.
1095 unsigned CondReg = getReg(Cond, MBB, IP);
1096 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1097 switch (SelectClass) {
1098 default: assert(0 && "Unknown value class!");
1099 case cFP: Opcode = X86::FCMOVE; break;
1101 case cShort: Opcode = X86::CMOVE16rr; break;
1103 case cLong: Opcode = X86::CMOVE32rr; break;
1107 unsigned TrueReg = getReg(TrueVal, MBB, IP);
1108 unsigned FalseReg = getReg(FalseVal, MBB, IP);
1109 unsigned RealDestReg = DestReg;
1112 // Annoyingly enough, X86 doesn't HAVE 8-bit conditional moves. Because of
1113 // this, we have to promote the incoming values to 16 bits, perform a 16-bit
1114 // cmove, then truncate the result.
1115 if (SelectClass == cByte) {
1116 DestReg = makeAnotherReg(Type::ShortTy);
1117 if (getClassB(TrueVal->getType()) == cByte) {
1118 // Promote the true value, by storing it into AL, and reading from AX.
1119 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::AL).addReg(TrueReg);
1120 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::AH).addImm(0);
1121 TrueReg = makeAnotherReg(Type::ShortTy);
1122 BuildMI(*MBB, IP, X86::MOV16rr, 1, TrueReg).addReg(X86::AX);
1124 if (getClassB(FalseVal->getType()) == cByte) {
1125 // Promote the true value, by storing it into CL, and reading from CX.
1126 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(FalseReg);
1127 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::CH).addImm(0);
1128 FalseReg = makeAnotherReg(Type::ShortTy);
1129 BuildMI(*MBB, IP, X86::MOV16rr, 1, FalseReg).addReg(X86::CX);
1133 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(TrueReg).addReg(FalseReg);
1135 switch (SelectClass) {
1137 // We did the computation with 16-bit registers. Truncate back to our
1138 // result by copying into AX then copying out AL.
1139 BuildMI(*MBB, IP, X86::MOV16rr, 1, X86::AX).addReg(DestReg);
1140 BuildMI(*MBB, IP, X86::MOV8rr, 1, RealDestReg).addReg(X86::AL);
1143 // Move the upper half of the value as well.
1144 BuildMI(*MBB, IP, Opcode, 2,DestReg+1).addReg(TrueReg+1).addReg(FalseReg+1);
1151 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
1152 /// operand, in the specified target register.
1154 void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
1155 bool isUnsigned = VR.Ty->isUnsigned();
1157 Value *Val = VR.Val;
1158 const Type *Ty = VR.Ty;
1160 if (Constant *C = dyn_cast<Constant>(Val)) {
1161 Val = ConstantExpr::getCast(C, Type::IntTy);
1165 // If this is a simple constant, just emit a MOVri directly to avoid the
1167 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
1168 int TheVal = CI->getRawValue() & 0xFFFFFFFF;
1169 BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
1174 // Make sure we have the register number for this value...
1175 unsigned Reg = Val ? getReg(Val) : VR.Reg;
1177 switch (getClassB(Ty)) {
1179 // Extend value into target register (8->32)
1181 BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
1183 BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
1186 // Extend value into target register (16->32)
1188 BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
1190 BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
1193 // Move value into target register (32->32)
1194 BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
1197 assert(0 && "Unpromotable operand class in promote32");
1201 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
1202 /// we have the following possibilities:
1204 /// ret void: No return value, simply emit a 'ret' instruction
1205 /// ret sbyte, ubyte : Extend value into EAX and return
1206 /// ret short, ushort: Extend value into EAX and return
1207 /// ret int, uint : Move value into EAX and return
1208 /// ret pointer : Move value into EAX and return
1209 /// ret long, ulong : Move value into EAX/EDX and return
1210 /// ret float/double : Top of FP stack
1212 void ISel::visitReturnInst(ReturnInst &I) {
1213 if (I.getNumOperands() == 0) {
1214 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
1218 Value *RetVal = I.getOperand(0);
1219 switch (getClassB(RetVal->getType())) {
1220 case cByte: // integral return values: extend or move into EAX and return
1223 promote32(X86::EAX, ValueRecord(RetVal));
1224 // Declare that EAX is live on exit
1225 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
1227 case cFP: { // Floats & Doubles: Return in ST(0)
1228 unsigned RetReg = getReg(RetVal);
1229 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
1230 // Declare that top-of-stack is live on exit
1231 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
1235 unsigned RetReg = getReg(RetVal);
1236 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
1237 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
1238 // Declare that EAX & EDX are live on exit
1239 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
1244 visitInstruction(I);
1246 // Emit a 'ret' instruction
1247 BuildMI(BB, X86::RET, 0);
1250 // getBlockAfter - Return the basic block which occurs lexically after the
1252 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
1253 Function::iterator I = BB; ++I; // Get iterator to next block
1254 return I != BB->getParent()->end() ? &*I : 0;
1257 /// visitBranchInst - Handle conditional and unconditional branches here. Note
1258 /// that since code layout is frozen at this point, that if we are trying to
1259 /// jump to a block that is the immediate successor of the current block, we can
1260 /// just make a fall-through (but we don't currently).
1262 void ISel::visitBranchInst(BranchInst &BI) {
1263 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
1265 if (!BI.isConditional()) { // Unconditional branch?
1266 if (BI.getSuccessor(0) != NextBB)
1267 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1271 // See if we can fold the setcc into the branch itself...
1272 SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition());
1274 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
1275 // computed some other way...
1276 unsigned condReg = getReg(BI.getCondition());
1277 BuildMI(BB, X86::TEST8rr, 2).addReg(condReg).addReg(condReg);
1278 if (BI.getSuccessor(1) == NextBB) {
1279 if (BI.getSuccessor(0) != NextBB)
1280 BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
1282 BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1));
1284 if (BI.getSuccessor(0) != NextBB)
1285 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1290 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1291 MachineBasicBlock::iterator MII = BB->end();
1292 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
1294 const Type *CompTy = SCI->getOperand(0)->getType();
1295 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1298 // LLVM -> X86 signed X86 unsigned
1299 // ----- ---------- ------------
1307 // js // Used by comparison with 0 optimization
1310 static const unsigned OpcodeTab[2][8] = {
1311 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
1312 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
1313 X86::JS, X86::JNS },
1316 if (BI.getSuccessor(0) != NextBB) {
1317 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
1318 if (BI.getSuccessor(1) != NextBB)
1319 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1));
1321 // Change to the inverse condition...
1322 if (BI.getSuccessor(1) != NextBB) {
1324 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1));
1330 /// doCall - This emits an abstract call instruction, setting up the arguments
1331 /// and the return value as appropriate. For the actual function call itself,
1332 /// it inserts the specified CallMI instruction into the stream.
1334 void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1335 const std::vector<ValueRecord> &Args) {
1337 // Count how many bytes are to be pushed on the stack...
1338 unsigned NumBytes = 0;
1340 if (!Args.empty()) {
1341 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1342 switch (getClassB(Args[i].Ty)) {
1343 case cByte: case cShort: case cInt:
1344 NumBytes += 4; break;
1346 NumBytes += 8; break;
1348 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1350 default: assert(0 && "Unknown class!");
1353 // Adjust the stack pointer for the new arguments...
1354 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(NumBytes);
1356 // Arguments go on the stack in reverse order, as specified by the ABI.
1357 unsigned ArgOffset = 0;
1358 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1360 switch (getClassB(Args[i].Ty)) {
1363 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1364 // Zero/Sign extend constant, then stuff into memory.
1365 ConstantInt *Val = cast<ConstantInt>(Args[i].Val);
1366 Val = cast<ConstantInt>(ConstantExpr::getCast(Val, Type::IntTy));
1367 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1368 .addImm(Val->getRawValue() & 0xFFFFFFFF);
1370 // Promote arg to 32 bits wide into a temporary register...
1371 ArgReg = makeAnotherReg(Type::UIntTy);
1372 promote32(ArgReg, Args[i]);
1373 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1374 X86::ESP, ArgOffset).addReg(ArgReg);
1378 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1379 unsigned Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1380 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1381 X86::ESP, ArgOffset).addImm(Val);
1383 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1384 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1385 X86::ESP, ArgOffset).addReg(ArgReg);
1389 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1390 uint64_t Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1391 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1392 X86::ESP, ArgOffset).addImm(Val & ~0U);
1393 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1394 X86::ESP, ArgOffset+4).addImm(Val >> 32ULL);
1396 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1397 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1398 X86::ESP, ArgOffset).addReg(ArgReg);
1399 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1400 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1402 ArgOffset += 4; // 8 byte entry, not 4.
1406 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1407 if (Args[i].Ty == Type::FloatTy) {
1408 addRegOffset(BuildMI(BB, X86::FST32m, 5),
1409 X86::ESP, ArgOffset).addReg(ArgReg);
1411 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1412 addRegOffset(BuildMI(BB, X86::FST64m, 5),
1413 X86::ESP, ArgOffset).addReg(ArgReg);
1414 ArgOffset += 4; // 8 byte entry, not 4.
1418 default: assert(0 && "Unknown class!");
1423 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(0);
1426 BB->push_back(CallMI);
1428 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addImm(NumBytes);
1430 // If there is a return value, scavenge the result from the location the call
1433 if (Ret.Ty != Type::VoidTy) {
1434 unsigned DestClass = getClassB(Ret.Ty);
1435 switch (DestClass) {
1439 // Integral results are in %eax, or the appropriate portion
1441 static const unsigned regRegMove[] = {
1442 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
1444 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1445 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1448 case cFP: // Floating-point return values live in %ST(0)
1449 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1451 case cLong: // Long values are left in EDX:EAX
1452 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
1453 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
1455 default: assert(0 && "Unknown class!");
1461 /// visitCallInst - Push args on stack and do a procedure call instruction.
1462 void ISel::visitCallInst(CallInst &CI) {
1463 MachineInstr *TheCall;
1464 if (Function *F = CI.getCalledFunction()) {
1465 // Is it an intrinsic function call?
1466 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1467 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1471 // Emit a CALL instruction with PC-relative displacement.
1472 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1473 } else { // Emit an indirect call...
1474 unsigned Reg = getReg(CI.getCalledValue());
1475 TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
1478 std::vector<ValueRecord> Args;
1479 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1480 Args.push_back(ValueRecord(CI.getOperand(i)));
1482 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1483 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1487 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1488 /// function, lowering any calls to unknown intrinsic functions into the
1489 /// equivalent LLVM code.
1491 void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1492 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1493 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1494 if (CallInst *CI = dyn_cast<CallInst>(I++))
1495 if (Function *F = CI->getCalledFunction())
1496 switch (F->getIntrinsicID()) {
1497 case Intrinsic::not_intrinsic:
1498 case Intrinsic::vastart:
1499 case Intrinsic::vacopy:
1500 case Intrinsic::vaend:
1501 case Intrinsic::returnaddress:
1502 case Intrinsic::frameaddress:
1503 case Intrinsic::memcpy:
1504 case Intrinsic::memset:
1505 // We directly implement these intrinsics
1508 // All other intrinsic calls we must lower.
1509 Instruction *Before = CI->getPrev();
1510 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1511 if (Before) { // Move iterator to instruction after call
1520 void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1521 unsigned TmpReg1, TmpReg2;
1523 case Intrinsic::vastart:
1524 // Get the address of the first vararg value...
1525 TmpReg1 = getReg(CI);
1526 addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
1529 case Intrinsic::vacopy:
1530 TmpReg1 = getReg(CI);
1531 TmpReg2 = getReg(CI.getOperand(1));
1532 BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
1534 case Intrinsic::vaend: return; // Noop on X86
1536 case Intrinsic::returnaddress:
1537 case Intrinsic::frameaddress:
1538 TmpReg1 = getReg(CI);
1539 if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
1540 if (ID == Intrinsic::returnaddress) {
1541 // Just load the return address
1542 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
1543 ReturnAddressIndex);
1545 addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
1546 ReturnAddressIndex, -4);
1549 // Values other than zero are not implemented yet.
1550 BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
1554 case Intrinsic::memcpy: {
1555 assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
1557 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1558 Align = AlignC->getRawValue();
1559 if (Align == 0) Align = 1;
1562 // Turn the byte code into # iterations
1565 switch (Align & 3) {
1566 case 2: // WORD aligned
1567 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1568 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1570 CountReg = makeAnotherReg(Type::IntTy);
1571 unsigned ByteReg = getReg(CI.getOperand(3));
1572 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1574 Opcode = X86::REP_MOVSW;
1576 case 0: // DWORD aligned
1577 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1578 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1580 CountReg = makeAnotherReg(Type::IntTy);
1581 unsigned ByteReg = getReg(CI.getOperand(3));
1582 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1584 Opcode = X86::REP_MOVSD;
1586 default: // BYTE aligned
1587 CountReg = getReg(CI.getOperand(3));
1588 Opcode = X86::REP_MOVSB;
1592 // No matter what the alignment is, we put the source in ESI, the
1593 // destination in EDI, and the count in ECX.
1594 TmpReg1 = getReg(CI.getOperand(1));
1595 TmpReg2 = getReg(CI.getOperand(2));
1596 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1597 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1598 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
1599 BuildMI(BB, Opcode, 0);
1602 case Intrinsic::memset: {
1603 assert(CI.getNumOperands() == 5 && "Illegal llvm.memset call!");
1605 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1606 Align = AlignC->getRawValue();
1607 if (Align == 0) Align = 1;
1610 // Turn the byte code into # iterations
1613 if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
1614 unsigned Val = ValC->getRawValue() & 255;
1616 // If the value is a constant, then we can potentially use larger copies.
1617 switch (Align & 3) {
1618 case 2: // WORD aligned
1619 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1620 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1622 CountReg = makeAnotherReg(Type::IntTy);
1623 unsigned ByteReg = getReg(CI.getOperand(3));
1624 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1626 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
1627 Opcode = X86::REP_STOSW;
1629 case 0: // DWORD aligned
1630 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1631 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1633 CountReg = makeAnotherReg(Type::IntTy);
1634 unsigned ByteReg = getReg(CI.getOperand(3));
1635 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1637 Val = (Val << 8) | Val;
1638 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
1639 Opcode = X86::REP_STOSD;
1641 default: // BYTE aligned
1642 CountReg = getReg(CI.getOperand(3));
1643 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
1644 Opcode = X86::REP_STOSB;
1648 // If it's not a constant value we are storing, just fall back. We could
1649 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
1650 unsigned ValReg = getReg(CI.getOperand(2));
1651 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1652 CountReg = getReg(CI.getOperand(3));
1653 Opcode = X86::REP_STOSB;
1656 // No matter what the alignment is, we put the source in ESI, the
1657 // destination in EDI, and the count in ECX.
1658 TmpReg1 = getReg(CI.getOperand(1));
1659 //TmpReg2 = getReg(CI.getOperand(2));
1660 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1661 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1662 BuildMI(BB, Opcode, 0);
1666 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
1670 static bool isSafeToFoldLoadIntoInstruction(LoadInst &LI, Instruction &User) {
1671 if (LI.getParent() != User.getParent())
1673 BasicBlock::iterator It = &LI;
1674 // Check all of the instructions between the load and the user. We should
1675 // really use alias analysis here, but for now we just do something simple.
1676 for (++It; It != BasicBlock::iterator(&User); ++It) {
1677 switch (It->getOpcode()) {
1678 case Instruction::Free:
1679 case Instruction::Store:
1680 case Instruction::Call:
1681 case Instruction::Invoke:
1689 /// visitSimpleBinary - Implement simple binary operators for integral types...
1690 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1693 void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1694 unsigned DestReg = getReg(B);
1695 MachineBasicBlock::iterator MI = BB->end();
1696 Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
1698 // Special case: op Reg, load [mem]
1699 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
1700 if (!B.swapOperands())
1701 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
1703 unsigned Class = getClassB(B.getType());
1704 if (isa<LoadInst>(Op1) && Class < cFP &&
1705 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op1), B)) {
1707 static const unsigned OpcodeTab[][3] = {
1708 // Arithmetic operators
1709 { X86::ADD8rm, X86::ADD16rm, X86::ADD32rm }, // ADD
1710 { X86::SUB8rm, X86::SUB16rm, X86::SUB32rm }, // SUB
1712 // Bitwise operators
1713 { X86::AND8rm, X86::AND16rm, X86::AND32rm }, // AND
1714 { X86:: OR8rm, X86:: OR16rm, X86:: OR32rm }, // OR
1715 { X86::XOR8rm, X86::XOR16rm, X86::XOR32rm }, // XOR
1718 assert(Class < cFP && "General code handles 64-bit integer types!");
1719 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1721 unsigned BaseReg, Scale, IndexReg, Disp;
1722 getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), BaseReg,
1723 Scale, IndexReg, Disp);
1725 unsigned Op0r = getReg(Op0);
1726 addFullAddress(BuildMI(BB, Opcode, 2, DestReg).addReg(Op0r),
1727 BaseReg, Scale, IndexReg, Disp);
1731 emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
1734 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
1735 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
1738 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
1739 /// and constant expression support.
1741 void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
1742 MachineBasicBlock::iterator IP,
1743 Value *Op0, Value *Op1,
1744 unsigned OperatorClass, unsigned DestReg) {
1745 unsigned Class = getClassB(Op0->getType());
1747 // sub 0, X -> neg X
1748 if (OperatorClass == 1)
1749 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0)) {
1750 if (CI->isNullValue()) {
1751 unsigned op1Reg = getReg(Op1, MBB, IP);
1752 static unsigned const NEGTab[] = {
1753 X86::NEG8r, X86::NEG16r, X86::NEG32r, 0, X86::NEG32r
1755 BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
1757 if (Class == cLong) {
1758 // We just emitted: Dl = neg Sl
1759 // Now emit : T = addc Sh, 0
1761 unsigned T = makeAnotherReg(Type::IntTy);
1762 BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
1763 BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
1767 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
1768 if (CFP->isExactlyValue(-0.0)) {
1770 unsigned op1Reg = getReg(Op1, MBB, IP);
1771 BuildMI(*MBB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
1775 // Special case: op Reg, <const>
1776 if (isa<ConstantInt>(Op1)) {
1777 ConstantInt *Op1C = cast<ConstantInt>(Op1);
1778 unsigned Op0r = getReg(Op0, MBB, IP);
1780 // xor X, -1 -> not X
1781 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
1782 static unsigned const NOTTab[] = {
1783 X86::NOT8r, X86::NOT16r, X86::NOT32r, 0, X86::NOT32r
1785 BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
1786 if (Class == cLong) // Invert the top part too
1787 BuildMI(*MBB, IP, X86::NOT32r, 1, DestReg+1).addReg(Op0r+1);
1791 // add X, -1 -> dec X
1792 if (OperatorClass == 0 && Op1C->isAllOnesValue() && Class != cLong) {
1793 // Note that we can't use dec for 64-bit decrements, because it does not
1794 // set the carry flag!
1795 static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
1796 BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1800 // add X, 1 -> inc X
1801 if (OperatorClass == 0 && Op1C->equalsInt(1) && Class != cLong) {
1802 // Note that we can't use inc for 64-bit increments, because it does not
1803 // set the carry flag!
1804 static unsigned const INCTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
1805 BuildMI(*MBB, IP, INCTab[Class], 1, DestReg).addReg(Op0r);
1809 static const unsigned OpcodeTab[][5] = {
1810 // Arithmetic operators
1811 { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri }, // ADD
1812 { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, X86::SUB32ri }, // SUB
1814 // Bitwise operators
1815 { X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, X86::AND32ri }, // AND
1816 { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri, 0, X86::OR32ri }, // OR
1817 { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, X86::XOR32ri }, // XOR
1820 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1821 unsigned Op1l = cast<ConstantInt>(Op1C)->getRawValue();
1823 if (Class != cLong) {
1824 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
1827 // If this is a long value and the high or low bits have a special
1828 // property, emit some special cases.
1829 unsigned Op1h = cast<ConstantInt>(Op1C)->getRawValue() >> 32LL;
1831 // If the constant is zero in the low 32-bits, just copy the low part
1832 // across and apply the normal 32-bit operation to the high parts. There
1833 // will be no carry or borrow into the top.
1835 if (OperatorClass != 2) // All but and...
1836 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0r);
1838 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
1839 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg+1)
1840 .addReg(Op0r+1).addImm(Op1h);
1844 // If this is a logical operation and the top 32-bits are zero, just
1845 // operate on the lower 32.
1846 if (Op1h == 0 && OperatorClass > 1) {
1847 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg)
1848 .addReg(Op0r).addImm(Op1l);
1849 if (OperatorClass != 2) // All but and
1850 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(Op0r+1);
1852 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
1856 // TODO: We could handle lots of other special cases here, such as AND'ing
1857 // with 0xFFFFFFFF00000000 -> noop, etc.
1859 // Otherwise, code generate the full operation with a constant.
1860 static const unsigned TopTab[] = {
1861 X86::ADC32ri, X86::SBB32ri, X86::AND32ri, X86::OR32ri, X86::XOR32ri
1864 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
1865 BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1)
1866 .addReg(Op0r+1).addImm(Op1h);
1871 // Finally, handle the general case now.
1872 static const unsigned OpcodeTab[][5] = {
1873 // Arithmetic operators
1874 { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD, X86::ADD32rr },// ADD
1875 { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB, X86::SUB32rr },// SUB
1877 // Bitwise operators
1878 { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, X86::AND32rr }, // AND
1879 { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0, X86:: OR32rr }, // OR
1880 { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, X86::XOR32rr }, // XOR
1883 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1884 assert(Opcode && "Floating point arguments to logical inst?");
1885 unsigned Op0r = getReg(Op0, MBB, IP);
1886 unsigned Op1r = getReg(Op1, MBB, IP);
1887 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
1889 if (Class == cLong) { // Handle the upper 32 bits of long values...
1890 static const unsigned TopTab[] = {
1891 X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
1893 BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
1894 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
1898 /// doMultiply - Emit appropriate instructions to multiply together the
1899 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
1900 /// result should be given as DestTy.
1902 void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
1903 unsigned DestReg, const Type *DestTy,
1904 unsigned op0Reg, unsigned op1Reg) {
1905 unsigned Class = getClass(DestTy);
1907 case cFP: // Floating point multiply
1908 BuildMI(*MBB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg);
1912 BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
1913 .addReg(op0Reg).addReg(op1Reg);
1916 // Must use the MUL instruction, which forces use of AL...
1917 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
1918 BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
1919 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1922 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
1926 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
1927 // returns zero when the input is not exactly a power of two.
1928 static unsigned ExactLog2(unsigned Val) {
1929 if (Val == 0) return 0;
1932 if (Val & 1) return 0;
1939 void ISel::doMultiplyConst(MachineBasicBlock *MBB,
1940 MachineBasicBlock::iterator IP,
1941 unsigned DestReg, const Type *DestTy,
1942 unsigned op0Reg, unsigned ConstRHS) {
1943 static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
1944 static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
1946 unsigned Class = getClass(DestTy);
1948 if (ConstRHS == 0) {
1949 BuildMI(*MBB, IP, MOVriTab[Class], 1, DestReg).addImm(0);
1951 } else if (ConstRHS == 1) {
1952 BuildMI(*MBB, IP, MOVrrTab[Class], 1, DestReg).addReg(op0Reg);
1956 // If the element size is exactly a power of 2, use a shift to get it.
1957 if (unsigned Shift = ExactLog2(ConstRHS)) {
1959 default: assert(0 && "Unknown class for this function!");
1961 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
1964 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
1967 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
1972 if (Class == cShort) {
1973 BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
1975 } else if (Class == cInt) {
1976 BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
1980 // Most general case, emit a normal multiply...
1981 unsigned TmpReg = makeAnotherReg(DestTy);
1982 BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
1984 // Emit a MUL to multiply the register holding the index by
1985 // elementSize, putting the result in OffsetReg.
1986 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
1989 /// visitMul - Multiplies are not simple binary operators because they must deal
1990 /// with the EAX register explicitly.
1992 void ISel::visitMul(BinaryOperator &I) {
1993 unsigned Op0Reg = getReg(I.getOperand(0));
1994 unsigned DestReg = getReg(I);
1996 // Simple scalar multiply?
1997 if (getClass(I.getType()) != cLong) {
1998 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1))) {
1999 unsigned Val = (unsigned)CI->getRawValue(); // Cannot be 64-bit constant
2000 MachineBasicBlock::iterator MBBI = BB->end();
2001 doMultiplyConst(BB, MBBI, DestReg, I.getType(), Op0Reg, Val);
2003 unsigned Op1Reg = getReg(I.getOperand(1));
2004 MachineBasicBlock::iterator MBBI = BB->end();
2005 doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg);
2008 // Long value. We have to do things the hard way...
2009 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1))) {
2010 unsigned CLow = CI->getRawValue();
2011 unsigned CHi = CI->getRawValue() >> 32;
2014 // If the low part of the constant is all zeros, things are simple.
2015 BuildMI(BB, X86::MOV32ri, 1, DestReg).addImm(0);
2016 doMultiplyConst(BB, BB->end(), DestReg+1, Type::UIntTy, Op0Reg, CHi);
2020 // Multiply the two low parts... capturing carry into EDX
2021 unsigned OverflowReg = 0;
2023 BuildMI(BB, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
2025 unsigned Op1RegL = makeAnotherReg(Type::UIntTy);
2026 OverflowReg = makeAnotherReg(Type::UIntTy);
2027 BuildMI(BB, X86::MOV32ri, 1, Op1RegL).addImm(CLow);
2028 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2029 BuildMI(BB, X86::MUL32r, 1).addReg(Op1RegL); // AL*BL
2031 BuildMI(BB, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2032 BuildMI(BB, X86::MOV32rr, 1,OverflowReg).addReg(X86::EDX);// AL*BL >> 32
2035 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2036 doMultiplyConst(BB, BB->end(), AHBLReg, Type::UIntTy, Op0Reg+1, CLow);
2038 unsigned AHBLplusOverflowReg;
2040 AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2041 BuildMI(BB, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2042 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2044 AHBLplusOverflowReg = AHBLReg;
2048 BuildMI(BB, X86::MOV32rr, 1, DestReg+1).addReg(AHBLplusOverflowReg);
2050 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2051 doMultiplyConst(BB, BB->end(), ALBHReg, Type::UIntTy, Op0Reg, CHi);
2053 BuildMI(BB, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2054 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2057 unsigned Op1Reg = getReg(I.getOperand(1));
2058 // Multiply the two low parts... capturing carry into EDX
2059 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2060 BuildMI(BB, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
2062 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
2063 BuildMI(BB, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2064 BuildMI(BB, X86::MOV32rr, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2066 MachineBasicBlock::iterator MBBI = BB->end();
2067 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2068 BuildMI(*BB, MBBI, X86::IMUL32rr, 2,
2069 AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
2071 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2072 BuildMI(*BB, MBBI, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2073 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2076 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2077 BuildMI(*BB, MBBI, X86::IMUL32rr, 2,
2078 ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
2080 BuildMI(*BB, MBBI, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2081 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2087 /// visitDivRem - Handle division and remainder instructions... these
2088 /// instruction both require the same instructions to be generated, they just
2089 /// select the result from a different register. Note that both of these
2090 /// instructions work differently for signed and unsigned operands.
2092 void ISel::visitDivRem(BinaryOperator &I) {
2093 unsigned Op0Reg = getReg(I.getOperand(0));
2094 unsigned Op1Reg = getReg(I.getOperand(1));
2095 unsigned ResultReg = getReg(I);
2097 MachineBasicBlock::iterator IP = BB->end();
2098 emitDivRemOperation(BB, IP, Op0Reg, Op1Reg, I.getOpcode() == Instruction::Div,
2099 I.getType(), ResultReg);
2102 void ISel::emitDivRemOperation(MachineBasicBlock *BB,
2103 MachineBasicBlock::iterator IP,
2104 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
2105 const Type *Ty, unsigned ResultReg) {
2106 unsigned Class = getClass(Ty);
2108 case cFP: // Floating point divide
2110 BuildMI(*BB, IP, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2111 } else { // Floating point remainder...
2112 MachineInstr *TheCall =
2113 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
2114 std::vector<ValueRecord> Args;
2115 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
2116 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
2117 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
2121 static const char *FnName[] =
2122 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
2124 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
2125 MachineInstr *TheCall =
2126 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
2128 std::vector<ValueRecord> Args;
2129 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
2130 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
2131 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
2134 case cByte: case cShort: case cInt:
2135 break; // Small integrals, handled below...
2136 default: assert(0 && "Unknown class!");
2139 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
2140 static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
2141 static const unsigned SarOpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
2142 static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
2143 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
2145 static const unsigned DivOpcode[][4] = {
2146 { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
2147 { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
2150 bool isSigned = Ty->isSigned();
2151 unsigned Reg = Regs[Class];
2152 unsigned ExtReg = ExtRegs[Class];
2154 // Put the first operand into one of the A registers...
2155 BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
2158 // Emit a sign extension instruction...
2159 unsigned ShiftResult = makeAnotherReg(Ty);
2160 BuildMI(*BB, IP, SarOpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
2161 BuildMI(*BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
2163 // If unsigned, emit a zeroing instruction... (reg = 0)
2164 BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
2167 // Emit the appropriate divide or remainder instruction...
2168 BuildMI(*BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
2170 // Figure out which register we want to pick the result out of...
2171 unsigned DestReg = isDiv ? Reg : ExtReg;
2173 // Put the result into the destination register...
2174 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
2178 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
2179 /// for constant immediate shift values, and for constant immediate
2180 /// shift values equal to 1. Even the general case is sort of special,
2181 /// because the shift amount has to be in CL, not just any old register.
2183 void ISel::visitShiftInst(ShiftInst &I) {
2184 MachineBasicBlock::iterator IP = BB->end ();
2185 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
2186 I.getOpcode () == Instruction::Shl, I.getType (),
2190 /// emitShiftOperation - Common code shared between visitShiftInst and
2191 /// constant expression support.
2192 void ISel::emitShiftOperation(MachineBasicBlock *MBB,
2193 MachineBasicBlock::iterator IP,
2194 Value *Op, Value *ShiftAmount, bool isLeftShift,
2195 const Type *ResultTy, unsigned DestReg) {
2196 unsigned SrcReg = getReg (Op, MBB, IP);
2197 bool isSigned = ResultTy->isSigned ();
2198 unsigned Class = getClass (ResultTy);
2200 static const unsigned ConstantOperand[][4] = {
2201 { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
2202 { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
2203 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
2204 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
2207 static const unsigned NonConstantOperand[][4] = {
2208 { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
2209 { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
2210 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
2211 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
2214 // Longs, as usual, are handled specially...
2215 if (Class == cLong) {
2216 // If we have a constant shift, we can generate much more efficient code
2217 // than otherwise...
2219 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
2220 unsigned Amount = CUI->getValue();
2222 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
2224 BuildMI(*MBB, IP, Opc[3], 3,
2225 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addImm(Amount);
2226 BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
2228 BuildMI(*MBB, IP, Opc[3], 3,
2229 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
2230 BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
2232 } else { // Shifting more than 32 bits
2236 BuildMI(*MBB, IP, X86::SHL32ri, 2,
2237 DestReg + 1).addReg(SrcReg).addImm(Amount);
2239 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
2241 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2244 BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
2245 DestReg).addReg(SrcReg+1).addImm(Amount);
2247 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
2249 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2253 unsigned TmpReg = makeAnotherReg(Type::IntTy);
2255 if (!isLeftShift && isSigned) {
2256 // If this is a SHR of a Long, then we need to do funny sign extension
2257 // stuff. TmpReg gets the value to use as the high-part if we are
2258 // shifting more than 32 bits.
2259 BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
2261 // Other shifts use a fixed zero value if the shift is more than 32
2263 BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
2266 // Initialize CL with the shift amount...
2267 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
2268 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2270 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
2271 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
2273 // TmpReg2 = shld inHi, inLo
2274 BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
2276 // TmpReg3 = shl inLo, CL
2277 BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
2279 // Set the flags to indicate whether the shift was by more than 32 bits.
2280 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2282 // DestHi = (>32) ? TmpReg3 : TmpReg2;
2283 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2284 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
2285 // DestLo = (>32) ? TmpReg : TmpReg3;
2286 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2287 DestReg).addReg(TmpReg3).addReg(TmpReg);
2289 // TmpReg2 = shrd inLo, inHi
2290 BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
2292 // TmpReg3 = s[ah]r inHi, CL
2293 BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
2296 // Set the flags to indicate whether the shift was by more than 32 bits.
2297 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2299 // DestLo = (>32) ? TmpReg3 : TmpReg2;
2300 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2301 DestReg).addReg(TmpReg2).addReg(TmpReg3);
2303 // DestHi = (>32) ? TmpReg : TmpReg3;
2304 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2305 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
2311 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
2312 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
2313 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
2315 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
2316 BuildMI(*MBB, IP, Opc[Class], 2,
2317 DestReg).addReg(SrcReg).addImm(CUI->getValue());
2318 } else { // The shift amount is non-constant.
2319 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
2320 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2322 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
2323 BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
2328 void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
2329 unsigned &IndexReg, unsigned &Disp) {
2330 BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
2331 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
2332 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
2333 BaseReg, Scale, IndexReg, Disp))
2335 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
2336 if (CE->getOpcode() == Instruction::GetElementPtr)
2337 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
2338 BaseReg, Scale, IndexReg, Disp))
2342 // If it's not foldable, reset addr mode.
2343 BaseReg = getReg(Addr);
2344 Scale = 1; IndexReg = 0; Disp = 0;
2348 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
2349 /// instruction. The load and store instructions are the only place where we
2350 /// need to worry about the memory layout of the target machine.
2352 void ISel::visitLoadInst(LoadInst &I) {
2353 // Check to see if this load instruction is going to be folded into a binary
2354 // instruction, like add. If so, we don't want to emit it. Wouldn't a real
2355 // pattern matching instruction selector be nice?
2356 if (I.hasOneUse() && getClassB(I.getType()) < cFP) {
2357 Instruction *User = cast<Instruction>(I.use_back());
2358 switch (User->getOpcode()) {
2359 default: User = 0; break;
2360 case Instruction::Add:
2361 case Instruction::Sub:
2362 case Instruction::And:
2363 case Instruction::Or:
2364 case Instruction::Xor:
2369 // Okay, we found a user. If the load is the first operand and there is
2370 // no second operand load, reverse the operand ordering. Note that this
2371 // can fail for a subtract (ie, no change will be made).
2372 if (!isa<LoadInst>(User->getOperand(1)))
2373 cast<BinaryOperator>(User)->swapOperands();
2375 // Okay, now that everything is set up, if this load is used by the second
2376 // operand, and if there are no instructions that invalidate the load
2377 // before the binary operator, eliminate the load.
2378 if (User->getOperand(1) == &I &&
2379 isSafeToFoldLoadIntoInstruction(I, *User))
2380 return; // Eliminate the load!
2384 unsigned DestReg = getReg(I);
2385 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
2386 getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
2388 unsigned Class = getClassB(I.getType());
2389 if (Class == cLong) {
2390 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
2391 BaseReg, Scale, IndexReg, Disp);
2392 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
2393 BaseReg, Scale, IndexReg, Disp+4);
2397 static const unsigned Opcodes[] = {
2398 X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m
2400 unsigned Opcode = Opcodes[Class];
2401 if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
2402 addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
2403 BaseReg, Scale, IndexReg, Disp);
2406 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
2409 void ISel::visitStoreInst(StoreInst &I) {
2410 unsigned BaseReg, Scale, IndexReg, Disp;
2411 getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
2413 const Type *ValTy = I.getOperand(0)->getType();
2414 unsigned Class = getClassB(ValTy);
2416 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
2417 uint64_t Val = CI->getRawValue();
2418 if (Class == cLong) {
2419 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
2420 BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
2421 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
2422 BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
2424 static const unsigned Opcodes[] = {
2425 X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
2427 unsigned Opcode = Opcodes[Class];
2428 addFullAddress(BuildMI(BB, Opcode, 5),
2429 BaseReg, Scale, IndexReg, Disp).addImm(Val);
2431 } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
2432 addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
2433 BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
2435 if (Class == cLong) {
2436 unsigned ValReg = getReg(I.getOperand(0));
2437 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
2438 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
2439 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
2440 BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
2442 unsigned ValReg = getReg(I.getOperand(0));
2443 static const unsigned Opcodes[] = {
2444 X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
2446 unsigned Opcode = Opcodes[Class];
2447 if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
2448 addFullAddress(BuildMI(BB, Opcode, 1+4),
2449 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
2455 /// visitCastInst - Here we have various kinds of copying with or without sign
2456 /// extension going on.
2458 void ISel::visitCastInst(CastInst &CI) {
2459 Value *Op = CI.getOperand(0);
2460 // If this is a cast from a 32-bit integer to a Long type, and the only uses
2461 // of the case are GEP instructions, then the cast does not need to be
2462 // generated explicitly, it will be folded into the GEP.
2463 if (CI.getType() == Type::LongTy &&
2464 (Op->getType() == Type::IntTy || Op->getType() == Type::UIntTy)) {
2465 bool AllUsesAreGEPs = true;
2466 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
2467 if (!isa<GetElementPtrInst>(*I)) {
2468 AllUsesAreGEPs = false;
2472 // No need to codegen this cast if all users are getelementptr instrs...
2473 if (AllUsesAreGEPs) return;
2476 unsigned DestReg = getReg(CI);
2477 MachineBasicBlock::iterator MI = BB->end();
2478 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
2481 /// emitCastOperation - Common code shared between visitCastInst and constant
2482 /// expression cast support.
2484 void ISel::emitCastOperation(MachineBasicBlock *BB,
2485 MachineBasicBlock::iterator IP,
2486 Value *Src, const Type *DestTy,
2488 unsigned SrcReg = getReg(Src, BB, IP);
2489 const Type *SrcTy = Src->getType();
2490 unsigned SrcClass = getClassB(SrcTy);
2491 unsigned DestClass = getClassB(DestTy);
2493 // Implement casts to bool by using compare on the operand followed by set if
2494 // not zero on the result.
2495 if (DestTy == Type::BoolTy) {
2498 BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
2501 BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
2504 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
2507 unsigned TmpReg = makeAnotherReg(Type::IntTy);
2508 BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
2512 BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
2513 BuildMI(*BB, IP, X86::FNSTSW8r, 0);
2514 BuildMI(*BB, IP, X86::SAHF, 1);
2518 // If the zero flag is not set, then the value is true, set the byte to
2520 BuildMI(*BB, IP, X86::SETNEr, 1, DestReg);
2524 static const unsigned RegRegMove[] = {
2525 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
2528 // Implement casts between values of the same type class (as determined by
2529 // getClass) by using a register-to-register move.
2530 if (SrcClass == DestClass) {
2531 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
2532 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
2533 } else if (SrcClass == cFP) {
2534 if (SrcTy == Type::FloatTy) { // double -> float
2535 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
2536 BuildMI(*BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
2537 } else { // float -> double
2538 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
2539 "Unknown cFP member!");
2540 // Truncate from double to float by storing to memory as short, then
2542 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
2543 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
2544 addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5), FrameIdx).addReg(SrcReg);
2545 addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
2547 } else if (SrcClass == cLong) {
2548 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
2549 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
2551 assert(0 && "Cannot handle this type of cast instruction!");
2557 // Handle cast of SMALLER int to LARGER int using a move with sign extension
2558 // or zero extension, depending on whether the source type was signed.
2559 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
2560 SrcClass < DestClass) {
2561 bool isLong = DestClass == cLong;
2562 if (isLong) DestClass = cInt;
2564 static const unsigned Opc[][4] = {
2565 { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
2566 { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
2569 bool isUnsigned = SrcTy->isUnsigned();
2570 BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
2571 DestReg).addReg(SrcReg);
2573 if (isLong) { // Handle upper 32 bits as appropriate...
2574 if (isUnsigned) // Zero out top bits...
2575 BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2576 else // Sign extend bottom half...
2577 BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
2582 // Special case long -> int ...
2583 if (SrcClass == cLong && DestClass == cInt) {
2584 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
2588 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
2589 // move out of AX or AL.
2590 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
2591 && SrcClass > DestClass) {
2592 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
2593 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
2594 BuildMI(*BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
2598 // Handle casts from integer to floating point now...
2599 if (DestClass == cFP) {
2600 // Promote the integer to a type supported by FLD. We do this because there
2601 // are no unsigned FLD instructions, so we must promote an unsigned value to
2602 // a larger signed value, then use FLD on the larger value.
2604 const Type *PromoteType = 0;
2605 unsigned PromoteOpcode;
2606 unsigned RealDestReg = DestReg;
2607 switch (SrcTy->getPrimitiveID()) {
2608 case Type::BoolTyID:
2609 case Type::SByteTyID:
2610 // We don't have the facilities for directly loading byte sized data from
2611 // memory (even signed). Promote it to 16 bits.
2612 PromoteType = Type::ShortTy;
2613 PromoteOpcode = X86::MOVSX16rr8;
2615 case Type::UByteTyID:
2616 PromoteType = Type::ShortTy;
2617 PromoteOpcode = X86::MOVZX16rr8;
2619 case Type::UShortTyID:
2620 PromoteType = Type::IntTy;
2621 PromoteOpcode = X86::MOVZX32rr16;
2623 case Type::UIntTyID: {
2624 // Make a 64 bit temporary... and zero out the top of it...
2625 unsigned TmpReg = makeAnotherReg(Type::LongTy);
2626 BuildMI(*BB, IP, X86::MOV32rr, 1, TmpReg).addReg(SrcReg);
2627 BuildMI(*BB, IP, X86::MOV32ri, 1, TmpReg+1).addImm(0);
2628 SrcTy = Type::LongTy;
2633 case Type::ULongTyID:
2634 // Don't fild into the read destination.
2635 DestReg = makeAnotherReg(Type::DoubleTy);
2637 default: // No promotion needed...
2642 unsigned TmpReg = makeAnotherReg(PromoteType);
2643 BuildMI(*BB, IP, PromoteOpcode, 1, TmpReg).addReg(SrcReg);
2644 SrcTy = PromoteType;
2645 SrcClass = getClass(PromoteType);
2649 // Spill the integer to memory and reload it from there...
2651 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
2653 if (SrcClass == cLong) {
2654 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
2655 FrameIdx).addReg(SrcReg);
2656 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
2657 FrameIdx, 4).addReg(SrcReg+1);
2659 static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
2660 addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
2661 FrameIdx).addReg(SrcReg);
2664 static const unsigned Op2[] =
2665 { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
2666 addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
2668 // We need special handling for unsigned 64-bit integer sources. If the
2669 // input number has the "sign bit" set, then we loaded it incorrectly as a
2670 // negative 64-bit number. In this case, add an offset value.
2671 if (SrcTy == Type::ULongTy) {
2672 // Emit a test instruction to see if the dynamic input value was signed.
2673 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
2675 // If the sign bit is set, get a pointer to an offset, otherwise get a
2676 // pointer to a zero.
2677 MachineConstantPool *CP = F->getConstantPool();
2678 unsigned Zero = makeAnotherReg(Type::IntTy);
2679 Constant *Null = Constant::getNullValue(Type::UIntTy);
2680 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
2681 CP->getConstantPoolIndex(Null));
2682 unsigned Offset = makeAnotherReg(Type::IntTy);
2683 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
2685 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
2686 CP->getConstantPoolIndex(OffsetCst));
2687 unsigned Addr = makeAnotherReg(Type::IntTy);
2688 BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
2690 // Load the constant for an add. FIXME: this could make an 'fadd' that
2691 // reads directly from memory, but we don't support these yet.
2692 unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
2693 addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
2695 BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
2696 .addReg(ConstReg).addReg(DestReg);
2702 // Handle casts from floating point to integer now...
2703 if (SrcClass == cFP) {
2704 // Change the floating point control register to use "round towards zero"
2705 // mode when truncating to an integer value.
2707 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
2708 addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
2710 // Load the old value of the high byte of the control word...
2711 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
2712 addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
2715 // Set the high part to be round to zero...
2716 addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
2717 CWFrameIdx, 1).addImm(12);
2719 // Reload the modified control word now...
2720 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
2722 // Restore the memory image of control word to original value
2723 addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
2724 CWFrameIdx, 1).addReg(HighPartOfCW);
2726 // We don't have the facilities for directly storing byte sized data to
2727 // memory. Promote it to 16 bits. We also must promote unsigned values to
2728 // larger classes because we only have signed FP stores.
2729 unsigned StoreClass = DestClass;
2730 const Type *StoreTy = DestTy;
2731 if (StoreClass == cByte || DestTy->isUnsigned())
2732 switch (StoreClass) {
2733 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
2734 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
2735 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
2736 // The following treatment of cLong may not be perfectly right,
2737 // but it survives chains of casts of the form
2738 // double->ulong->double.
2739 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
2740 default: assert(0 && "Unknown store class!");
2743 // Spill the integer to memory and reload it from there...
2745 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
2747 static const unsigned Op1[] =
2748 { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
2749 addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
2750 FrameIdx).addReg(SrcReg);
2752 if (DestClass == cLong) {
2753 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
2754 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
2757 static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
2758 addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
2761 // Reload the original control word now...
2762 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
2766 // Anything we haven't handled already, we can't (yet) handle at all.
2767 assert(0 && "Unhandled cast instruction!");
2771 /// visitVANextInst - Implement the va_next instruction...
2773 void ISel::visitVANextInst(VANextInst &I) {
2774 unsigned VAList = getReg(I.getOperand(0));
2775 unsigned DestReg = getReg(I);
2778 switch (I.getArgType()->getPrimitiveID()) {
2781 assert(0 && "Error: bad type for va_next instruction!");
2783 case Type::PointerTyID:
2784 case Type::UIntTyID:
2788 case Type::ULongTyID:
2789 case Type::LongTyID:
2790 case Type::DoubleTyID:
2795 // Increment the VAList pointer...
2796 BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
2799 void ISel::visitVAArgInst(VAArgInst &I) {
2800 unsigned VAList = getReg(I.getOperand(0));
2801 unsigned DestReg = getReg(I);
2803 switch (I.getType()->getPrimitiveID()) {
2806 assert(0 && "Error: bad type for va_next instruction!");
2808 case Type::PointerTyID:
2809 case Type::UIntTyID:
2811 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
2813 case Type::ULongTyID:
2814 case Type::LongTyID:
2815 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
2816 addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
2818 case Type::DoubleTyID:
2819 addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
2824 /// visitGetElementPtrInst - instruction-select GEP instructions
2826 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
2827 // If this GEP instruction will be folded into all of its users, we don't need
2828 // to explicitly calculate it!
2829 unsigned A, B, C, D;
2830 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
2831 // Check all of the users of the instruction to see if they are loads and
2833 bool AllWillFold = true;
2834 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI)
2835 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Load)
2836 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Store ||
2837 cast<Instruction>(*UI)->getOperand(0) == &I) {
2838 AllWillFold = false;
2842 // If the instruction is foldable, and will be folded into all users, don't
2844 if (AllWillFold) return;
2847 unsigned outputReg = getReg(I);
2848 emitGEPOperation(BB, BB->end(), I.getOperand(0),
2849 I.op_begin()+1, I.op_end(), outputReg);
2852 /// getGEPIndex - Inspect the getelementptr operands specified with GEPOps and
2853 /// GEPTypes (the derived types being stepped through at each level). On return
2854 /// from this function, if some indexes of the instruction are representable as
2855 /// an X86 lea instruction, the machine operands are put into the Ops
2856 /// instruction and the consumed indexes are poped from the GEPOps/GEPTypes
2857 /// lists. Otherwise, GEPOps.size() is returned. If this returns a an
2858 /// addressing mode that only partially consumes the input, the BaseReg input of
2859 /// the addressing mode must be left free.
2861 /// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
2863 void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
2864 std::vector<Value*> &GEPOps,
2865 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
2866 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
2867 const TargetData &TD = TM.getTargetData();
2869 // Clear out the state we are working with...
2870 BaseReg = 0; // No base register
2871 Scale = 1; // Unit scale
2872 IndexReg = 0; // No index register
2873 Disp = 0; // No displacement
2875 // While there are GEP indexes that can be folded into the current address,
2876 // keep processing them.
2877 while (!GEPTypes.empty()) {
2878 if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
2879 // It's a struct access. CUI is the index into the structure,
2880 // which names the field. This index must have unsigned type.
2881 const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
2883 // Use the TargetData structure to pick out what the layout of the
2884 // structure is in memory. Since the structure index must be constant, we
2885 // can get its value and use it to find the right byte offset from the
2886 // StructLayout class's list of structure member offsets.
2887 Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
2888 GEPOps.pop_back(); // Consume a GEP operand
2889 GEPTypes.pop_back();
2891 // It's an array or pointer access: [ArraySize x ElementType].
2892 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
2893 Value *idx = GEPOps.back();
2895 // idx is the index into the array. Unlike with structure
2896 // indices, we may not know its actual value at code-generation
2899 // If idx is a constant, fold it into the offset.
2900 unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
2901 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
2902 Disp += TypeSize*CSI->getValue();
2903 } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
2904 Disp += TypeSize*CUI->getValue();
2906 // If the index reg is already taken, we can't handle this index.
2907 if (IndexReg) return;
2909 // If this is a size that we can handle, then add the index as
2911 case 1: case 2: case 4: case 8:
2912 // These are all acceptable scales on X86.
2916 // Otherwise, we can't handle this scale
2920 if (CastInst *CI = dyn_cast<CastInst>(idx))
2921 if (CI->getOperand(0)->getType() == Type::IntTy ||
2922 CI->getOperand(0)->getType() == Type::UIntTy)
2923 idx = CI->getOperand(0);
2925 IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
2928 GEPOps.pop_back(); // Consume a GEP operand
2929 GEPTypes.pop_back();
2933 // GEPTypes is empty, which means we have a single operand left. See if we
2934 // can set it as the base register.
2936 // FIXME: When addressing modes are more powerful/correct, we could load
2937 // global addresses directly as 32-bit immediates.
2938 assert(BaseReg == 0);
2939 BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
2940 GEPOps.pop_back(); // Consume the last GEP operand
2944 /// isGEPFoldable - Return true if the specified GEP can be completely
2945 /// folded into the addressing mode of a load/store or lea instruction.
2946 bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
2947 Value *Src, User::op_iterator IdxBegin,
2948 User::op_iterator IdxEnd, unsigned &BaseReg,
2949 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
2950 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
2951 Src = CPR->getValue();
2953 std::vector<Value*> GEPOps;
2954 GEPOps.resize(IdxEnd-IdxBegin+1);
2956 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
2958 std::vector<const Type*> GEPTypes;
2959 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
2960 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
2962 MachineBasicBlock::iterator IP;
2963 if (MBB) IP = MBB->end();
2964 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
2966 // We can fold it away iff the getGEPIndex call eliminated all operands.
2967 return GEPOps.empty();
2970 void ISel::emitGEPOperation(MachineBasicBlock *MBB,
2971 MachineBasicBlock::iterator IP,
2972 Value *Src, User::op_iterator IdxBegin,
2973 User::op_iterator IdxEnd, unsigned TargetReg) {
2974 const TargetData &TD = TM.getTargetData();
2975 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
2976 Src = CPR->getValue();
2978 std::vector<Value*> GEPOps;
2979 GEPOps.resize(IdxEnd-IdxBegin+1);
2981 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
2983 std::vector<const Type*> GEPTypes;
2984 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
2985 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
2987 // Keep emitting instructions until we consume the entire GEP instruction.
2988 while (!GEPOps.empty()) {
2989 unsigned OldSize = GEPOps.size();
2990 unsigned BaseReg, Scale, IndexReg, Disp;
2991 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
2993 if (GEPOps.size() != OldSize) {
2994 // getGEPIndex consumed some of the input. Build an LEA instruction here.
2995 unsigned NextTarget = 0;
2996 if (!GEPOps.empty()) {
2997 assert(BaseReg == 0 &&
2998 "getGEPIndex should have left the base register open for chaining!");
2999 NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
3002 if (IndexReg == 0 && Disp == 0)
3003 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3005 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
3006 BaseReg, Scale, IndexReg, Disp);
3008 TargetReg = NextTarget;
3009 } else if (GEPTypes.empty()) {
3010 // The getGEPIndex operation didn't want to build an LEA. Check to see if
3011 // all operands are consumed but the base pointer. If so, just load it
3012 // into the register.
3013 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
3014 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
3016 unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
3017 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3019 break; // we are now done
3022 // It's an array or pointer access: [ArraySize x ElementType].
3023 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3024 Value *idx = GEPOps.back();
3025 GEPOps.pop_back(); // Consume a GEP operand
3026 GEPTypes.pop_back();
3028 // Many GEP instructions use a [cast (int/uint) to LongTy] as their
3029 // operand on X86. Handle this case directly now...
3030 if (CastInst *CI = dyn_cast<CastInst>(idx))
3031 if (CI->getOperand(0)->getType() == Type::IntTy ||
3032 CI->getOperand(0)->getType() == Type::UIntTy)
3033 idx = CI->getOperand(0);
3035 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
3036 // must find the size of the pointed-to type (Not coincidentally, the next
3037 // type is the type of the elements in the array).
3038 const Type *ElTy = SqTy->getElementType();
3039 unsigned elementSize = TD.getTypeSize(ElTy);
3041 // If idxReg is a constant, we don't need to perform the multiply!
3042 if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) {
3043 if (!CSI->isNullValue()) {
3044 unsigned Offset = elementSize*CSI->getRawValue();
3045 unsigned Reg = makeAnotherReg(Type::UIntTy);
3046 BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
3047 .addReg(Reg).addImm(Offset);
3048 --IP; // Insert the next instruction before this one.
3049 TargetReg = Reg; // Codegen the rest of the GEP into this
3051 } else if (elementSize == 1) {
3052 // If the element size is 1, we don't have to multiply, just add
3053 unsigned idxReg = getReg(idx, MBB, IP);
3054 unsigned Reg = makeAnotherReg(Type::UIntTy);
3055 BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
3056 --IP; // Insert the next instruction before this one.
3057 TargetReg = Reg; // Codegen the rest of the GEP into this
3059 unsigned idxReg = getReg(idx, MBB, IP);
3060 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
3062 // Make sure we can back the iterator up to point to the first
3063 // instruction emitted.
3064 MachineBasicBlock::iterator BeforeIt = IP;
3065 if (IP == MBB->begin())
3066 BeforeIt = MBB->end();
3069 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
3071 // Emit an ADD to add OffsetReg to the basePtr.
3072 unsigned Reg = makeAnotherReg(Type::UIntTy);
3073 BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
3074 .addReg(Reg).addReg(OffsetReg);
3076 // Step to the first instruction of the multiply.
3077 if (BeforeIt == MBB->end())
3082 TargetReg = Reg; // Codegen the rest of the GEP into this
3089 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
3090 /// frame manager, otherwise do it the hard way.
3092 void ISel::visitAllocaInst(AllocaInst &I) {
3093 // Find the data size of the alloca inst's getAllocatedType.
3094 const Type *Ty = I.getAllocatedType();
3095 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
3097 // If this is a fixed size alloca in the entry block for the function,
3098 // statically stack allocate the space.
3100 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getArraySize())) {
3101 if (I.getParent() == I.getParent()->getParent()->begin()) {
3102 TySize *= CUI->getValue(); // Get total allocated size...
3103 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
3105 // Create a new stack object using the frame manager...
3106 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
3107 addFrameReference(BuildMI(BB, X86::LEA32r, 5, getReg(I)), FrameIdx);
3112 // Create a register to hold the temporary result of multiplying the type size
3113 // constant by the variable amount.
3114 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
3115 unsigned SrcReg1 = getReg(I.getArraySize());
3117 // TotalSizeReg = mul <numelements>, <TypeSize>
3118 MachineBasicBlock::iterator MBBI = BB->end();
3119 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
3121 // AddedSize = add <TotalSizeReg>, 15
3122 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
3123 BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
3125 // AlignedSize = and <AddedSize>, ~15
3126 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
3127 BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
3129 // Subtract size from stack pointer, thereby allocating some space.
3130 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
3132 // Put a pointer to the space into the result register, by copying
3133 // the stack pointer.
3134 BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
3136 // Inform the Frame Information that we have just allocated a variable-sized
3138 F->getFrameInfo()->CreateVariableSizedObject();
3141 /// visitMallocInst - Malloc instructions are code generated into direct calls
3142 /// to the library malloc.
3144 void ISel::visitMallocInst(MallocInst &I) {
3145 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
3148 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
3149 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
3151 Arg = makeAnotherReg(Type::UIntTy);
3152 unsigned Op0Reg = getReg(I.getOperand(0));
3153 MachineBasicBlock::iterator MBBI = BB->end();
3154 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
3157 std::vector<ValueRecord> Args;
3158 Args.push_back(ValueRecord(Arg, Type::UIntTy));
3159 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3160 1).addExternalSymbol("malloc", true);
3161 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
3165 /// visitFreeInst - Free instructions are code gen'd to call the free libc
3168 void ISel::visitFreeInst(FreeInst &I) {
3169 std::vector<ValueRecord> Args;
3170 Args.push_back(ValueRecord(I.getOperand(0)));
3171 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3172 1).addExternalSymbol("free", true);
3173 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
3176 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
3177 /// into a machine code representation is a very simple peep-hole fashion. The
3178 /// generated code sucks but the implementation is nice and simple.
3180 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
3181 return new ISel(TM);