1 //===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicLowering.h"
22 #include "llvm/Pass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SSARegMap.h"
28 #include "llvm/Target/MRegisterInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Support/InstVisitor.h"
31 #include "llvm/Support/CFG.h"
36 /// BMI - A special BuildMI variant that takes an iterator to insert the
37 /// instruction at as well as a basic block. This is the version for when you
38 /// have a destination register in mind.
39 inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
40 MachineBasicBlock::iterator &I,
41 int Opcode, unsigned NumOperands,
43 assert(I >= MBB->begin() && I <= MBB->end() && "Bad iterator!");
44 MachineInstr *MI = new MachineInstr(Opcode, NumOperands+1, true, true);
45 I = MBB->insert(I, MI)+1;
46 return MachineInstrBuilder(MI).addReg(DestReg, MOTy::Def);
49 /// BMI - A special BuildMI variant that takes an iterator to insert the
50 /// instruction at as well as a basic block.
51 inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
52 MachineBasicBlock::iterator &I,
53 int Opcode, unsigned NumOperands) {
54 assert(I >= MBB->begin() && I <= MBB->end() && "Bad iterator!");
55 MachineInstr *MI = new MachineInstr(Opcode, NumOperands, true, true);
56 I = MBB->insert(I, MI)+1;
57 return MachineInstrBuilder(MI);
62 struct ISel : public FunctionPass, InstVisitor<ISel> {
64 MachineFunction *F; // The function we are compiling into
65 MachineBasicBlock *BB; // The current MBB we are compiling
66 int VarArgsFrameIndex; // FrameIndex for start of varargs area
68 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
70 // MBBMap - Mapping between LLVM BB -> Machine BB
71 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
73 ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
75 /// runOnFunction - Top level implementation of instruction selection for
76 /// the entire function.
78 bool runOnFunction(Function &Fn) {
79 // First pass over the function, lower any unknown intrinsic functions
80 // with the IntrinsicLowering class.
81 LowerUnknownIntrinsicFunctionCalls(Fn);
83 F = &MachineFunction::construct(&Fn, TM);
85 // Create all of the machine basic blocks for the function...
86 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
87 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
91 // Copy incoming arguments off of the stack...
92 LoadArgumentsToVirtualRegs(Fn);
94 // Instruction select everything except PHI nodes
97 // Select the PHI nodes
103 // We always build a machine code representation for the function
107 virtual const char *getPassName() const {
108 return "X86 Simple Instruction Selection";
111 /// visitBasicBlock - This method is called when we are visiting a new basic
112 /// block. This simply creates a new MachineBasicBlock to emit code into
113 /// and adds it to the current MachineFunction. Subsequent visit* for
114 /// instructions will be invoked for all instructions in the basic block.
116 void visitBasicBlock(BasicBlock &LLVM_BB) {
117 BB = MBBMap[&LLVM_BB];
120 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
121 /// function, lowering any calls to unknown intrinsic functions into the
122 /// equivalent LLVM code.
123 void LowerUnknownIntrinsicFunctionCalls(Function &F);
125 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
126 /// from the stack into virtual registers.
128 void LoadArgumentsToVirtualRegs(Function &F);
130 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
131 /// because we have to generate our sources into the source basic blocks,
132 /// not the current one.
134 void SelectPHINodes();
136 // Visitation methods for various instructions. These methods simply emit
137 // fixed X86 code for each instruction.
140 // Control flow operators
141 void visitReturnInst(ReturnInst &RI);
142 void visitBranchInst(BranchInst &BI);
148 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
149 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
151 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
152 const std::vector<ValueRecord> &Args);
153 void visitCallInst(CallInst &I);
154 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
156 // Arithmetic operators
157 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
158 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
159 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
160 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI,
161 unsigned DestReg, const Type *DestTy,
162 unsigned Op0Reg, unsigned Op1Reg);
163 void doMultiplyConst(MachineBasicBlock *MBB,
164 MachineBasicBlock::iterator &MBBI,
165 unsigned DestReg, const Type *DestTy,
166 unsigned Op0Reg, unsigned Op1Val);
167 void visitMul(BinaryOperator &B);
169 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
170 void visitRem(BinaryOperator &B) { visitDivRem(B); }
171 void visitDivRem(BinaryOperator &B);
174 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
175 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
176 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
178 // Comparison operators...
179 void visitSetCondInst(SetCondInst &I);
180 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
181 MachineBasicBlock *MBB,
182 MachineBasicBlock::iterator &MBBI);
184 // Memory Instructions
185 void visitLoadInst(LoadInst &I);
186 void visitStoreInst(StoreInst &I);
187 void visitGetElementPtrInst(GetElementPtrInst &I);
188 void visitAllocaInst(AllocaInst &I);
189 void visitMallocInst(MallocInst &I);
190 void visitFreeInst(FreeInst &I);
193 void visitShiftInst(ShiftInst &I);
194 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
195 void visitCastInst(CastInst &I);
196 void visitVANextInst(VANextInst &I);
197 void visitVAArgInst(VAArgInst &I);
199 void visitInstruction(Instruction &I) {
200 std::cerr << "Cannot instruction select: " << I;
204 /// promote32 - Make a value 32-bits wide, and put it somewhere.
206 void promote32(unsigned targetReg, const ValueRecord &VR);
208 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
209 /// constant expression GEP support.
211 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator&IP,
212 Value *Src, User::op_iterator IdxBegin,
213 User::op_iterator IdxEnd, unsigned TargetReg);
215 /// emitCastOperation - Common code shared between visitCastInst and
216 /// constant expression cast support.
217 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator&IP,
218 Value *Src, const Type *DestTy, unsigned TargetReg);
220 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
221 /// and constant expression support.
222 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
223 MachineBasicBlock::iterator &IP,
224 Value *Op0, Value *Op1,
225 unsigned OperatorClass, unsigned TargetReg);
227 void emitDivRemOperation(MachineBasicBlock *BB,
228 MachineBasicBlock::iterator &IP,
229 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
230 const Type *Ty, unsigned TargetReg);
232 /// emitSetCCOperation - Common code shared between visitSetCondInst and
233 /// constant expression support.
234 void emitSetCCOperation(MachineBasicBlock *BB,
235 MachineBasicBlock::iterator &IP,
236 Value *Op0, Value *Op1, unsigned Opcode,
239 /// emitShiftOperation - Common code shared between visitShiftInst and
240 /// constant expression support.
241 void emitShiftOperation(MachineBasicBlock *MBB,
242 MachineBasicBlock::iterator &IP,
243 Value *Op, Value *ShiftAmount, bool isLeftShift,
244 const Type *ResultTy, unsigned DestReg);
247 /// copyConstantToRegister - Output the instructions required to put the
248 /// specified constant into the specified register.
250 void copyConstantToRegister(MachineBasicBlock *MBB,
251 MachineBasicBlock::iterator &MBBI,
252 Constant *C, unsigned Reg);
254 /// makeAnotherReg - This method returns the next register number we haven't
257 /// Long values are handled somewhat specially. They are always allocated
258 /// as pairs of 32 bit integer values. The register number returned is the
259 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
260 /// of the long value.
262 unsigned makeAnotherReg(const Type *Ty) {
263 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
264 "Current target doesn't have X86 reg info??");
265 const X86RegisterInfo *MRI =
266 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
267 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
268 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
269 // Create the lower part
270 F->getSSARegMap()->createVirtualRegister(RC);
271 // Create the upper part.
272 return F->getSSARegMap()->createVirtualRegister(RC)-1;
275 // Add the mapping of regnumber => reg class to MachineFunction
276 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
277 return F->getSSARegMap()->createVirtualRegister(RC);
280 /// getReg - This method turns an LLVM value into a register number. This
281 /// is guaranteed to produce the same register number for a particular value
282 /// every time it is queried.
284 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
285 unsigned getReg(Value *V) {
286 // Just append to the end of the current bb.
287 MachineBasicBlock::iterator It = BB->end();
288 return getReg(V, BB, It);
290 unsigned getReg(Value *V, MachineBasicBlock *MBB,
291 MachineBasicBlock::iterator &IPt) {
292 unsigned &Reg = RegMap[V];
294 Reg = makeAnotherReg(V->getType());
298 // If this operand is a constant, emit the code to copy the constant into
299 // the register here...
301 if (Constant *C = dyn_cast<Constant>(V)) {
302 copyConstantToRegister(MBB, IPt, C, Reg);
303 RegMap.erase(V); // Assign a new name to this constant if ref'd again
304 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
305 // Move the address of the global into the register
306 BMI(MBB, IPt, X86::MOVir32, 1, Reg).addGlobalAddress(GV);
307 RegMap.erase(V); // Assign a new name to this address if ref'd again
315 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
319 cByte, cShort, cInt, cFP, cLong
322 /// getClass - Turn a primitive type into a "class" number which is based on the
323 /// size of the type, and whether or not it is floating point.
325 static inline TypeClass getClass(const Type *Ty) {
326 switch (Ty->getPrimitiveID()) {
327 case Type::SByteTyID:
328 case Type::UByteTyID: return cByte; // Byte operands are class #0
329 case Type::ShortTyID:
330 case Type::UShortTyID: return cShort; // Short operands are class #1
333 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
335 case Type::FloatTyID:
336 case Type::DoubleTyID: return cFP; // Floating Point is #3
339 case Type::ULongTyID: return cLong; // Longs are class #4
341 assert(0 && "Invalid type to getClass!");
342 return cByte; // not reached
346 // getClassB - Just like getClass, but treat boolean values as bytes.
347 static inline TypeClass getClassB(const Type *Ty) {
348 if (Ty == Type::BoolTy) return cByte;
353 /// copyConstantToRegister - Output the instructions required to put the
354 /// specified constant into the specified register.
356 void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
357 MachineBasicBlock::iterator &IP,
358 Constant *C, unsigned R) {
359 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
361 switch (CE->getOpcode()) {
362 case Instruction::GetElementPtr:
363 emitGEPOperation(MBB, IP, CE->getOperand(0),
364 CE->op_begin()+1, CE->op_end(), R);
366 case Instruction::Cast:
367 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
370 case Instruction::Xor: ++Class; // FALL THROUGH
371 case Instruction::Or: ++Class; // FALL THROUGH
372 case Instruction::And: ++Class; // FALL THROUGH
373 case Instruction::Sub: ++Class; // FALL THROUGH
374 case Instruction::Add:
375 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
379 case Instruction::Mul: {
380 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
381 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
382 doMultiply(MBB, IP, R, CE->getType(), Op0Reg, Op1Reg);
385 case Instruction::Div:
386 case Instruction::Rem: {
387 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
388 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
389 emitDivRemOperation(MBB, IP, Op0Reg, Op1Reg,
390 CE->getOpcode() == Instruction::Div,
395 case Instruction::SetNE:
396 case Instruction::SetEQ:
397 case Instruction::SetLT:
398 case Instruction::SetGT:
399 case Instruction::SetLE:
400 case Instruction::SetGE:
401 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
405 case Instruction::Shl:
406 case Instruction::Shr:
407 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
408 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
412 std::cerr << "Offending expr: " << C << "\n";
413 assert(0 && "Constant expression not yet handled!\n");
417 if (C->getType()->isIntegral()) {
418 unsigned Class = getClassB(C->getType());
420 if (Class == cLong) {
421 // Copy the value into the register pair.
422 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
423 BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(Val & 0xFFFFFFFF);
424 BMI(MBB, IP, X86::MOVir32, 1, R+1).addZImm(Val >> 32);
428 assert(Class <= cInt && "Type not handled yet!");
430 static const unsigned IntegralOpcodeTab[] = {
431 X86::MOVir8, X86::MOVir16, X86::MOVir32
434 if (C->getType() == Type::BoolTy) {
435 BMI(MBB, IP, X86::MOVir8, 1, R).addZImm(C == ConstantBool::True);
437 ConstantInt *CI = cast<ConstantInt>(C);
438 BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue());
440 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
441 if (CFP->isExactlyValue(+0.0))
442 BMI(MBB, IP, X86::FLD0, 0, R);
443 else if (CFP->isExactlyValue(+1.0))
444 BMI(MBB, IP, X86::FLD1, 0, R);
446 // Otherwise we need to spill the constant to memory...
447 MachineConstantPool *CP = F->getConstantPool();
448 unsigned CPI = CP->getConstantPoolIndex(CFP);
449 const Type *Ty = CFP->getType();
451 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
452 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLDr32 : X86::FLDr64;
453 addConstantPoolReference(BMI(MBB, IP, LoadOpcode, 4, R), CPI);
456 } else if (isa<ConstantPointerNull>(C)) {
457 // Copy zero (null pointer) to the register.
458 BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(0);
459 } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
460 unsigned SrcReg = getReg(CPR->getValue(), MBB, IP);
461 BMI(MBB, IP, X86::MOVrr32, 1, R).addReg(SrcReg);
463 std::cerr << "Offending constant: " << C << "\n";
464 assert(0 && "Type not handled yet!");
468 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
469 /// the stack into virtual registers.
471 void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
472 // Emit instructions to load the arguments... On entry to a function on the
473 // X86, the stack frame looks like this:
475 // [ESP] -- return address
476 // [ESP + 4] -- first argument (leftmost lexically)
477 // [ESP + 8] -- second argument, if first argument is four bytes in size
480 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
481 MachineFrameInfo *MFI = F->getFrameInfo();
483 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
484 unsigned Reg = getReg(*I);
486 int FI; // Frame object index
487 switch (getClassB(I->getType())) {
489 FI = MFI->CreateFixedObject(1, ArgOffset);
490 addFrameReference(BuildMI(BB, X86::MOVmr8, 4, Reg), FI);
493 FI = MFI->CreateFixedObject(2, ArgOffset);
494 addFrameReference(BuildMI(BB, X86::MOVmr16, 4, Reg), FI);
497 FI = MFI->CreateFixedObject(4, ArgOffset);
498 addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
501 FI = MFI->CreateFixedObject(8, ArgOffset);
502 addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
503 addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg+1), FI, 4);
504 ArgOffset += 4; // longs require 4 additional bytes
508 if (I->getType() == Type::FloatTy) {
509 Opcode = X86::FLDr32;
510 FI = MFI->CreateFixedObject(4, ArgOffset);
512 Opcode = X86::FLDr64;
513 FI = MFI->CreateFixedObject(8, ArgOffset);
514 ArgOffset += 4; // doubles require 4 additional bytes
516 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
519 assert(0 && "Unhandled argument type!");
521 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
524 // If the function takes variable number of arguments, add a frame offset for
525 // the start of the first vararg value... this is used to expand
527 if (Fn.getFunctionType()->isVarArg())
528 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
532 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
533 /// because we have to generate our sources into the source basic blocks, not
536 void ISel::SelectPHINodes() {
537 const TargetInstrInfo &TII = TM.getInstrInfo();
538 const Function &LF = *F->getFunction(); // The LLVM function...
539 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
540 const BasicBlock *BB = I;
541 MachineBasicBlock *MBB = MBBMap[I];
543 // Loop over all of the PHI nodes in the LLVM basic block...
544 unsigned NumPHIs = 0;
545 for (BasicBlock::const_iterator I = BB->begin();
546 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
548 // Create a new machine instr PHI node, and insert it.
549 unsigned PHIReg = getReg(*PN);
550 MachineInstr *PhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg);
551 MBB->insert(MBB->begin()+NumPHIs++, PhiMI);
553 MachineInstr *LongPhiMI = 0;
554 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy) {
555 LongPhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg+1);
556 MBB->insert(MBB->begin()+NumPHIs++, LongPhiMI);
559 // PHIValues - Map of blocks to incoming virtual registers. We use this
560 // so that we only initialize one incoming value for a particular block,
561 // even if the block has multiple entries in the PHI node.
563 std::map<MachineBasicBlock*, unsigned> PHIValues;
565 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
566 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
568 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
569 PHIValues.lower_bound(PredMBB);
571 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
572 // We already inserted an initialization of the register for this
573 // predecessor. Recycle it.
574 ValReg = EntryIt->second;
577 // Get the incoming value into a virtual register.
579 Value *Val = PN->getIncomingValue(i);
581 // If this is a constant or GlobalValue, we may have to insert code
582 // into the basic block to compute it into a virtual register.
583 if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
584 // Because we don't want to clobber any values which might be in
585 // physical registers with the computation of this constant (which
586 // might be arbitrarily complex if it is a constant expression),
587 // just insert the computation at the top of the basic block.
588 MachineBasicBlock::iterator PI = PredMBB->begin();
590 // Skip over any PHI nodes though!
591 while (PI != PredMBB->end() && (*PI)->getOpcode() == X86::PHI)
594 ValReg = getReg(Val, PredMBB, PI);
596 ValReg = getReg(Val);
599 // Remember that we inserted a value for this PHI for this predecessor
600 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
603 PhiMI->addRegOperand(ValReg);
604 PhiMI->addMachineBasicBlockOperand(PredMBB);
606 LongPhiMI->addRegOperand(ValReg+1);
607 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
614 // canFoldSetCCIntoBranch - Return the setcc instruction if we can fold it into
615 // the conditional branch instruction which is the only user of the cc
616 // instruction. This is the case if the conditional branch is the only user of
617 // the setcc, and if the setcc is in the same basic block as the conditional
618 // branch. We also don't handle long arguments below, so we reject them here as
621 static SetCondInst *canFoldSetCCIntoBranch(Value *V) {
622 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
623 if (SCI->hasOneUse() && isa<BranchInst>(SCI->use_back()) &&
624 SCI->getParent() == cast<BranchInst>(SCI->use_back())->getParent()) {
625 const Type *Ty = SCI->getOperand(0)->getType();
626 if (Ty != Type::LongTy && Ty != Type::ULongTy)
632 // Return a fixed numbering for setcc instructions which does not depend on the
633 // order of the opcodes.
635 static unsigned getSetCCNumber(unsigned Opcode) {
637 default: assert(0 && "Unknown setcc instruction!");
638 case Instruction::SetEQ: return 0;
639 case Instruction::SetNE: return 1;
640 case Instruction::SetLT: return 2;
641 case Instruction::SetGE: return 3;
642 case Instruction::SetGT: return 4;
643 case Instruction::SetLE: return 5;
647 // LLVM -> X86 signed X86 unsigned
648 // ----- ---------- ------------
649 // seteq -> sete sete
650 // setne -> setne setne
651 // setlt -> setl setb
652 // setge -> setge setae
653 // setgt -> setg seta
654 // setle -> setle setbe
656 // sets // Used by comparison with 0 optimization
658 static const unsigned SetCCOpcodeTab[2][8] = {
659 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
661 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
662 X86::SETSr, X86::SETNSr },
665 // EmitComparison - This function emits a comparison of the two operands,
666 // returning the extended setcc code to use.
667 unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
668 MachineBasicBlock *MBB,
669 MachineBasicBlock::iterator &IP) {
670 // The arguments are already supposed to be of the same type.
671 const Type *CompTy = Op0->getType();
672 unsigned Class = getClassB(CompTy);
673 unsigned Op0r = getReg(Op0, MBB, IP);
675 // Special case handling of: cmp R, i
676 if (Class == cByte || Class == cShort || Class == cInt)
677 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
678 uint64_t Op1v = cast<ConstantInt>(CI)->getRawValue();
680 // Mask off any upper bits of the constant, if there are any...
681 Op1v &= (1ULL << (8 << Class)) - 1;
683 // If this is a comparison against zero, emit more efficient code. We
684 // can't handle unsigned comparisons against zero unless they are == or
685 // !=. These should have been strength reduced already anyway.
686 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
687 static const unsigned TESTTab[] = {
688 X86::TESTrr8, X86::TESTrr16, X86::TESTrr32
690 BMI(MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
692 if (OpNum == 2) return 6; // Map jl -> js
693 if (OpNum == 3) return 7; // Map jg -> jns
697 static const unsigned CMPTab[] = {
698 X86::CMPri8, X86::CMPri16, X86::CMPri32
701 BMI(MBB, IP, CMPTab[Class], 2).addReg(Op0r).addZImm(Op1v);
705 unsigned Op1r = getReg(Op1, MBB, IP);
707 default: assert(0 && "Unknown type class!");
708 // Emit: cmp <var1>, <var2> (do the comparison). We can
709 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
712 BMI(MBB, IP, X86::CMPrr8, 2).addReg(Op0r).addReg(Op1r);
715 BMI(MBB, IP, X86::CMPrr16, 2).addReg(Op0r).addReg(Op1r);
718 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
721 BMI(MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
722 BMI(MBB, IP, X86::FNSTSWr8, 0);
723 BMI(MBB, IP, X86::SAHF, 1);
727 if (OpNum < 2) { // seteq, setne
728 unsigned LoTmp = makeAnotherReg(Type::IntTy);
729 unsigned HiTmp = makeAnotherReg(Type::IntTy);
730 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
731 BMI(MBB, IP, X86::XORrr32, 2, LoTmp).addReg(Op0r).addReg(Op1r);
732 BMI(MBB, IP, X86::XORrr32, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
733 BMI(MBB, IP, X86::ORrr32, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
734 break; // Allow the sete or setne to be generated from flags set by OR
736 // Emit a sequence of code which compares the high and low parts once
737 // each, then uses a conditional move to handle the overflow case. For
738 // example, a setlt for long would generate code like this:
740 // AL = lo(op1) < lo(op2) // Signedness depends on operands
741 // BL = hi(op1) < hi(op2) // Always unsigned comparison
742 // dest = hi(op1) == hi(op2) ? AL : BL;
745 // FIXME: This would be much better if we had hierarchical register
746 // classes! Until then, hardcode registers so that we can deal with their
747 // aliases (because we don't have conditional byte moves).
749 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
750 BMI(MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
751 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r+1).addReg(Op1r+1);
752 BMI(MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
753 BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
754 BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
755 BMI(MBB, IP, X86::CMOVErr16, 2, X86::BX).addReg(X86::BX).addReg(X86::AX);
756 // NOTE: visitSetCondInst knows that the value is dumped into the BL
757 // register at this point for long values...
765 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
766 /// register, then move it to wherever the result should be.
768 void ISel::visitSetCondInst(SetCondInst &I) {
769 if (canFoldSetCCIntoBranch(&I)) return; // Fold this into a branch...
771 unsigned DestReg = getReg(I);
772 MachineBasicBlock::iterator MII = BB->end();
773 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
777 /// emitSetCCOperation - Common code shared between visitSetCondInst and
778 /// constant expression support.
779 void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
780 MachineBasicBlock::iterator &IP,
781 Value *Op0, Value *Op1, unsigned Opcode,
782 unsigned TargetReg) {
783 unsigned OpNum = getSetCCNumber(Opcode);
784 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
786 const Type *CompTy = Op0->getType();
787 unsigned CompClass = getClassB(CompTy);
788 bool isSigned = CompTy->isSigned() && CompClass != cFP;
790 if (CompClass != cLong || OpNum < 2) {
791 // Handle normal comparisons with a setcc instruction...
792 BMI(MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
794 // Handle long comparisons by copying the value which is already in BL into
795 // the register we want...
796 BMI(MBB, IP, X86::MOVrr8, 1, TargetReg).addReg(X86::BL);
803 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
804 /// operand, in the specified target register.
805 void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
806 bool isUnsigned = VR.Ty->isUnsigned();
808 // Make sure we have the register number for this value...
809 unsigned Reg = VR.Val ? getReg(VR.Val) : VR.Reg;
811 switch (getClassB(VR.Ty)) {
813 // Extend value into target register (8->32)
815 BuildMI(BB, X86::MOVZXr32r8, 1, targetReg).addReg(Reg);
817 BuildMI(BB, X86::MOVSXr32r8, 1, targetReg).addReg(Reg);
820 // Extend value into target register (16->32)
822 BuildMI(BB, X86::MOVZXr32r16, 1, targetReg).addReg(Reg);
824 BuildMI(BB, X86::MOVSXr32r16, 1, targetReg).addReg(Reg);
827 // Move value into target register (32->32)
828 BuildMI(BB, X86::MOVrr32, 1, targetReg).addReg(Reg);
831 assert(0 && "Unpromotable operand class in promote32");
835 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
836 /// we have the following possibilities:
838 /// ret void: No return value, simply emit a 'ret' instruction
839 /// ret sbyte, ubyte : Extend value into EAX and return
840 /// ret short, ushort: Extend value into EAX and return
841 /// ret int, uint : Move value into EAX and return
842 /// ret pointer : Move value into EAX and return
843 /// ret long, ulong : Move value into EAX/EDX and return
844 /// ret float/double : Top of FP stack
846 void ISel::visitReturnInst(ReturnInst &I) {
847 if (I.getNumOperands() == 0) {
849 BuildMI(BB, X86::FP_REG_KILL, 0);
851 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
855 Value *RetVal = I.getOperand(0);
856 unsigned RetReg = getReg(RetVal);
857 switch (getClassB(RetVal->getType())) {
858 case cByte: // integral return values: extend or move into EAX and return
861 promote32(X86::EAX, ValueRecord(RetReg, RetVal->getType()));
862 // Declare that EAX is live on exit
863 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
865 case cFP: // Floats & Doubles: Return in ST(0)
866 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
867 // Declare that top-of-stack is live on exit
868 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
871 BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(RetReg);
872 BuildMI(BB, X86::MOVrr32, 1, X86::EDX).addReg(RetReg+1);
873 // Declare that EAX & EDX are live on exit
874 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
880 // Emit a 'ret' instruction
882 BuildMI(BB, X86::FP_REG_KILL, 0);
884 BuildMI(BB, X86::RET, 0);
887 // getBlockAfter - Return the basic block which occurs lexically after the
889 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
890 Function::iterator I = BB; ++I; // Get iterator to next block
891 return I != BB->getParent()->end() ? &*I : 0;
894 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
895 /// compensation code on critical edges. As such, it requires that we kill all
896 /// FP registers on the exit from any blocks that either ARE critical edges, or
897 /// branch to a block that has incoming critical edges.
899 /// Note that this kill instruction will eventually be eliminated when
900 /// restrictions in the stackifier are relaxed.
902 static bool RequiresFPRegKill(const BasicBlock *BB) {
904 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
905 const BasicBlock *Succ = *SI;
906 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
907 ++PI; // Block have at least one predecessory
908 if (PI != PE) { // If it has exactly one, this isn't crit edge
909 // If this block has more than one predecessor, check all of the
910 // predecessors to see if they have multiple successors. If so, then the
911 // block we are analyzing needs an FPRegKill.
912 for (PI = pred_begin(Succ); PI != PE; ++PI) {
913 const BasicBlock *Pred = *PI;
914 succ_const_iterator SI2 = succ_begin(Pred);
915 ++SI2; // There must be at least one successor of this block.
916 if (SI2 != succ_end(Pred))
917 return true; // Yes, we must insert the kill on this edge.
921 // If we got this far, there is no need to insert the kill instruction.
928 /// visitBranchInst - Handle conditional and unconditional branches here. Note
929 /// that since code layout is frozen at this point, that if we are trying to
930 /// jump to a block that is the immediate successor of the current block, we can
931 /// just make a fall-through (but we don't currently).
933 void ISel::visitBranchInst(BranchInst &BI) {
934 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
936 if (!BI.isConditional()) { // Unconditional branch?
937 if (RequiresFPRegKill(BI.getParent()))
938 BuildMI(BB, X86::FP_REG_KILL, 0);
939 if (BI.getSuccessor(0) != NextBB)
940 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
944 // See if we can fold the setcc into the branch itself...
945 SetCondInst *SCI = canFoldSetCCIntoBranch(BI.getCondition());
947 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
948 // computed some other way...
949 unsigned condReg = getReg(BI.getCondition());
950 BuildMI(BB, X86::CMPri8, 2).addReg(condReg).addZImm(0);
951 if (RequiresFPRegKill(BI.getParent()))
952 BuildMI(BB, X86::FP_REG_KILL, 0);
953 if (BI.getSuccessor(1) == NextBB) {
954 if (BI.getSuccessor(0) != NextBB)
955 BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
957 BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1));
959 if (BI.getSuccessor(0) != NextBB)
960 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
965 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
966 MachineBasicBlock::iterator MII = BB->end();
967 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
969 const Type *CompTy = SCI->getOperand(0)->getType();
970 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
973 // LLVM -> X86 signed X86 unsigned
974 // ----- ---------- ------------
982 // js // Used by comparison with 0 optimization
985 static const unsigned OpcodeTab[2][8] = {
986 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
987 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
991 if (RequiresFPRegKill(BI.getParent()))
992 BuildMI(BB, X86::FP_REG_KILL, 0);
993 if (BI.getSuccessor(0) != NextBB) {
994 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
995 if (BI.getSuccessor(1) != NextBB)
996 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1));
998 // Change to the inverse condition...
999 if (BI.getSuccessor(1) != NextBB) {
1001 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1));
1007 /// doCall - This emits an abstract call instruction, setting up the arguments
1008 /// and the return value as appropriate. For the actual function call itself,
1009 /// it inserts the specified CallMI instruction into the stream.
1011 void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1012 const std::vector<ValueRecord> &Args) {
1014 // Count how many bytes are to be pushed on the stack...
1015 unsigned NumBytes = 0;
1017 if (!Args.empty()) {
1018 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1019 switch (getClassB(Args[i].Ty)) {
1020 case cByte: case cShort: case cInt:
1021 NumBytes += 4; break;
1023 NumBytes += 8; break;
1025 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1027 default: assert(0 && "Unknown class!");
1030 // Adjust the stack pointer for the new arguments...
1031 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(NumBytes);
1033 // Arguments go on the stack in reverse order, as specified by the ABI.
1034 unsigned ArgOffset = 0;
1035 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1036 unsigned ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1037 switch (getClassB(Args[i].Ty)) {
1040 // Promote arg to 32 bits wide into a temporary register...
1041 unsigned R = makeAnotherReg(Type::UIntTy);
1042 promote32(R, Args[i]);
1043 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
1044 X86::ESP, ArgOffset).addReg(R);
1048 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
1049 X86::ESP, ArgOffset).addReg(ArgReg);
1052 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
1053 X86::ESP, ArgOffset).addReg(ArgReg);
1054 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
1055 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1056 ArgOffset += 4; // 8 byte entry, not 4.
1060 if (Args[i].Ty == Type::FloatTy) {
1061 addRegOffset(BuildMI(BB, X86::FSTr32, 5),
1062 X86::ESP, ArgOffset).addReg(ArgReg);
1064 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1065 addRegOffset(BuildMI(BB, X86::FSTr64, 5),
1066 X86::ESP, ArgOffset).addReg(ArgReg);
1067 ArgOffset += 4; // 8 byte entry, not 4.
1071 default: assert(0 && "Unknown class!");
1076 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(0);
1079 BB->push_back(CallMI);
1081 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addZImm(NumBytes);
1083 // If there is a return value, scavenge the result from the location the call
1086 if (Ret.Ty != Type::VoidTy) {
1087 unsigned DestClass = getClassB(Ret.Ty);
1088 switch (DestClass) {
1092 // Integral results are in %eax, or the appropriate portion
1094 static const unsigned regRegMove[] = {
1095 X86::MOVrr8, X86::MOVrr16, X86::MOVrr32
1097 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1098 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1101 case cFP: // Floating-point return values live in %ST(0)
1102 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1104 case cLong: // Long values are left in EDX:EAX
1105 BuildMI(BB, X86::MOVrr32, 1, Ret.Reg).addReg(X86::EAX);
1106 BuildMI(BB, X86::MOVrr32, 1, Ret.Reg+1).addReg(X86::EDX);
1108 default: assert(0 && "Unknown class!");
1114 /// visitCallInst - Push args on stack and do a procedure call instruction.
1115 void ISel::visitCallInst(CallInst &CI) {
1116 MachineInstr *TheCall;
1117 if (Function *F = CI.getCalledFunction()) {
1118 // Is it an intrinsic function call?
1119 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1120 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1124 // Emit a CALL instruction with PC-relative displacement.
1125 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1126 } else { // Emit an indirect call...
1127 unsigned Reg = getReg(CI.getCalledValue());
1128 TheCall = BuildMI(X86::CALLr32, 1).addReg(Reg);
1131 std::vector<ValueRecord> Args;
1132 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1133 Args.push_back(ValueRecord(CI.getOperand(i)));
1135 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1136 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1140 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1141 /// function, lowering any calls to unknown intrinsic functions into the
1142 /// equivalent LLVM code.
1143 void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1144 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1145 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1146 if (CallInst *CI = dyn_cast<CallInst>(I++))
1147 if (Function *F = CI->getCalledFunction())
1148 switch (F->getIntrinsicID()) {
1149 case Intrinsic::not_intrinsic:
1150 case Intrinsic::va_start:
1151 case Intrinsic::va_copy:
1152 case Intrinsic::va_end:
1153 // We directly implement these intrinsics
1156 // All other intrinsic calls we must lower.
1157 Instruction *Before = CI->getPrev();
1158 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1159 if (Before) { // Move iterator to instruction after call
1168 void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1169 unsigned TmpReg1, TmpReg2;
1171 case Intrinsic::va_start:
1172 // Get the address of the first vararg value...
1173 TmpReg1 = getReg(CI);
1174 addFrameReference(BuildMI(BB, X86::LEAr32, 5, TmpReg1), VarArgsFrameIndex);
1177 case Intrinsic::va_copy:
1178 TmpReg1 = getReg(CI);
1179 TmpReg2 = getReg(CI.getOperand(1));
1180 BuildMI(BB, X86::MOVrr32, 1, TmpReg1).addReg(TmpReg2);
1182 case Intrinsic::va_end: return; // Noop on X86
1184 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
1189 /// visitSimpleBinary - Implement simple binary operators for integral types...
1190 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1192 void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1193 unsigned DestReg = getReg(B);
1194 MachineBasicBlock::iterator MI = BB->end();
1195 emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
1196 OperatorClass, DestReg);
1199 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
1200 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
1203 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
1204 /// and constant expression support.
1206 void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
1207 MachineBasicBlock::iterator &IP,
1208 Value *Op0, Value *Op1,
1209 unsigned OperatorClass, unsigned DestReg) {
1210 unsigned Class = getClassB(Op0->getType());
1212 // sub 0, X -> neg X
1213 if (OperatorClass == 1 && Class != cLong)
1214 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0)) {
1215 if (CI->isNullValue()) {
1216 unsigned op1Reg = getReg(Op1, MBB, IP);
1218 default: assert(0 && "Unknown class for this function!");
1220 BMI(MBB, IP, X86::NEGr8, 1, DestReg).addReg(op1Reg);
1223 BMI(MBB, IP, X86::NEGr16, 1, DestReg).addReg(op1Reg);
1226 BMI(MBB, IP, X86::NEGr32, 1, DestReg).addReg(op1Reg);
1230 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
1231 if (CFP->isExactlyValue(-0.0)) {
1233 unsigned op1Reg = getReg(Op1, MBB, IP);
1234 BMI(MBB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
1238 if (!isa<ConstantInt>(Op1) || Class == cLong) {
1239 static const unsigned OpcodeTab[][4] = {
1240 // Arithmetic operators
1241 { X86::ADDrr8, X86::ADDrr16, X86::ADDrr32, X86::FpADD }, // ADD
1242 { X86::SUBrr8, X86::SUBrr16, X86::SUBrr32, X86::FpSUB }, // SUB
1244 // Bitwise operators
1245 { X86::ANDrr8, X86::ANDrr16, X86::ANDrr32, 0 }, // AND
1246 { X86:: ORrr8, X86:: ORrr16, X86:: ORrr32, 0 }, // OR
1247 { X86::XORrr8, X86::XORrr16, X86::XORrr32, 0 }, // XOR
1250 bool isLong = false;
1251 if (Class == cLong) {
1253 Class = cInt; // Bottom 32 bits are handled just like ints
1256 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1257 assert(Opcode && "Floating point arguments to logical inst?");
1258 unsigned Op0r = getReg(Op0, MBB, IP);
1259 unsigned Op1r = getReg(Op1, MBB, IP);
1260 BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
1262 if (isLong) { // Handle the upper 32 bits of long values...
1263 static const unsigned TopTab[] = {
1264 X86::ADCrr32, X86::SBBrr32, X86::ANDrr32, X86::ORrr32, X86::XORrr32
1266 BMI(MBB, IP, TopTab[OperatorClass], 2,
1267 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
1272 // Special case: op Reg, <const>
1273 ConstantInt *Op1C = cast<ConstantInt>(Op1);
1274 unsigned Op0r = getReg(Op0, MBB, IP);
1276 // xor X, -1 -> not X
1277 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
1278 static unsigned const NOTTab[] = { X86::NOTr8, X86::NOTr16, X86::NOTr32 };
1279 BMI(MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
1283 // add X, -1 -> dec X
1284 if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
1285 static unsigned const DECTab[] = { X86::DECr8, X86::DECr16, X86::DECr32 };
1286 BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1290 // add X, 1 -> inc X
1291 if (OperatorClass == 0 && Op1C->equalsInt(1)) {
1292 static unsigned const DECTab[] = { X86::INCr8, X86::INCr16, X86::INCr32 };
1293 BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1297 static const unsigned OpcodeTab[][3] = {
1298 // Arithmetic operators
1299 { X86::ADDri8, X86::ADDri16, X86::ADDri32 }, // ADD
1300 { X86::SUBri8, X86::SUBri16, X86::SUBri32 }, // SUB
1302 // Bitwise operators
1303 { X86::ANDri8, X86::ANDri16, X86::ANDri32 }, // AND
1304 { X86:: ORri8, X86:: ORri16, X86:: ORri32 }, // OR
1305 { X86::XORri8, X86::XORri16, X86::XORri32 }, // XOR
1308 assert(Class < 3 && "General code handles 64-bit integer types!");
1309 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1310 uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
1312 // Mask off any upper bits of the constant, if there are any...
1313 Op1v &= (1ULL << (8 << Class)) - 1;
1314 BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addZImm(Op1v);
1317 /// doMultiply - Emit appropriate instructions to multiply together the
1318 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
1319 /// result should be given as DestTy.
1321 void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI,
1322 unsigned DestReg, const Type *DestTy,
1323 unsigned op0Reg, unsigned op1Reg) {
1324 unsigned Class = getClass(DestTy);
1326 case cFP: // Floating point multiply
1327 BMI(BB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg);
1331 BMI(BB, MBBI, Class == cInt ? X86::IMULrr32 : X86::IMULrr16, 2, DestReg)
1332 .addReg(op0Reg).addReg(op1Reg);
1335 // Must use the MUL instruction, which forces use of AL...
1336 BMI(MBB, MBBI, X86::MOVrr8, 1, X86::AL).addReg(op0Reg);
1337 BMI(MBB, MBBI, X86::MULr8, 1).addReg(op1Reg);
1338 BMI(MBB, MBBI, X86::MOVrr8, 1, DestReg).addReg(X86::AL);
1341 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
1345 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
1346 // returns zero when the input is not exactly a power of two.
1347 static unsigned ExactLog2(unsigned Val) {
1348 if (Val == 0) return 0;
1351 if (Val & 1) return 0;
1358 void ISel::doMultiplyConst(MachineBasicBlock *MBB,
1359 MachineBasicBlock::iterator &IP,
1360 unsigned DestReg, const Type *DestTy,
1361 unsigned op0Reg, unsigned ConstRHS) {
1362 unsigned Class = getClass(DestTy);
1364 // If the element size is exactly a power of 2, use a shift to get it.
1365 if (unsigned Shift = ExactLog2(ConstRHS)) {
1367 default: assert(0 && "Unknown class for this function!");
1369 BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1372 BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1375 BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1380 if (Class == cShort) {
1381 BMI(MBB, IP, X86::IMULri16, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
1383 } else if (Class == cInt) {
1384 BMI(MBB, IP, X86::IMULri32, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
1388 // Most general case, emit a normal multiply...
1389 static const unsigned MOVirTab[] = {
1390 X86::MOVir8, X86::MOVir16, X86::MOVir32
1393 unsigned TmpReg = makeAnotherReg(DestTy);
1394 BMI(MBB, IP, MOVirTab[Class], 1, TmpReg).addZImm(ConstRHS);
1396 // Emit a MUL to multiply the register holding the index by
1397 // elementSize, putting the result in OffsetReg.
1398 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
1401 /// visitMul - Multiplies are not simple binary operators because they must deal
1402 /// with the EAX register explicitly.
1404 void ISel::visitMul(BinaryOperator &I) {
1405 unsigned Op0Reg = getReg(I.getOperand(0));
1406 unsigned DestReg = getReg(I);
1408 // Simple scalar multiply?
1409 if (I.getType() != Type::LongTy && I.getType() != Type::ULongTy) {
1410 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1))) {
1411 unsigned Val = (unsigned)CI->getRawValue(); // Cannot be 64-bit constant
1412 MachineBasicBlock::iterator MBBI = BB->end();
1413 doMultiplyConst(BB, MBBI, DestReg, I.getType(), Op0Reg, Val);
1415 unsigned Op1Reg = getReg(I.getOperand(1));
1416 MachineBasicBlock::iterator MBBI = BB->end();
1417 doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg);
1420 unsigned Op1Reg = getReg(I.getOperand(1));
1422 // Long value. We have to do things the hard way...
1423 // Multiply the two low parts... capturing carry into EDX
1424 BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(Op0Reg);
1425 BuildMI(BB, X86::MULr32, 1).addReg(Op1Reg); // AL*BL
1427 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
1428 BuildMI(BB, X86::MOVrr32, 1, DestReg).addReg(X86::EAX); // AL*BL
1429 BuildMI(BB, X86::MOVrr32, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
1431 MachineBasicBlock::iterator MBBI = BB->end();
1432 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
1433 BMI(BB, MBBI, X86::IMULrr32, 2, AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
1435 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
1436 BuildMI(BB, X86::ADDrr32, 2, // AH*BL+(AL*BL >> 32)
1437 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
1440 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
1441 BMI(BB, MBBI, X86::IMULrr32, 2, ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
1443 BuildMI(BB, X86::ADDrr32, 2, // AL*BH + AH*BL + (AL*BL >> 32)
1444 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
1449 /// visitDivRem - Handle division and remainder instructions... these
1450 /// instruction both require the same instructions to be generated, they just
1451 /// select the result from a different register. Note that both of these
1452 /// instructions work differently for signed and unsigned operands.
1454 void ISel::visitDivRem(BinaryOperator &I) {
1455 unsigned Op0Reg = getReg(I.getOperand(0));
1456 unsigned Op1Reg = getReg(I.getOperand(1));
1457 unsigned ResultReg = getReg(I);
1459 MachineBasicBlock::iterator IP = BB->end();
1460 emitDivRemOperation(BB, IP, Op0Reg, Op1Reg, I.getOpcode() == Instruction::Div,
1461 I.getType(), ResultReg);
1464 void ISel::emitDivRemOperation(MachineBasicBlock *BB,
1465 MachineBasicBlock::iterator &IP,
1466 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
1467 const Type *Ty, unsigned ResultReg) {
1468 unsigned Class = getClass(Ty);
1470 case cFP: // Floating point divide
1472 BMI(BB, IP, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
1473 } else { // Floating point remainder...
1474 MachineInstr *TheCall =
1475 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
1476 std::vector<ValueRecord> Args;
1477 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
1478 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
1479 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
1483 static const char *FnName[] =
1484 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
1486 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
1487 MachineInstr *TheCall =
1488 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
1490 std::vector<ValueRecord> Args;
1491 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
1492 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
1493 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
1496 case cByte: case cShort: case cInt:
1497 break; // Small integrals, handled below...
1498 default: assert(0 && "Unknown class!");
1501 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
1502 static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
1503 static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
1504 static const unsigned ClrOpcode[]={ X86::MOVir8, X86::MOVir16, X86::MOVir32 };
1505 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
1507 static const unsigned DivOpcode[][4] = {
1508 { X86::DIVr8 , X86::DIVr16 , X86::DIVr32 , 0 }, // Unsigned division
1509 { X86::IDIVr8, X86::IDIVr16, X86::IDIVr32, 0 }, // Signed division
1512 bool isSigned = Ty->isSigned();
1513 unsigned Reg = Regs[Class];
1514 unsigned ExtReg = ExtRegs[Class];
1516 // Put the first operand into one of the A registers...
1517 BMI(BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
1520 // Emit a sign extension instruction...
1521 unsigned ShiftResult = makeAnotherReg(Ty);
1522 BMI(BB, IP, SarOpcode[Class], 2, ShiftResult).addReg(Op0Reg).addZImm(31);
1523 BMI(BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
1525 // If unsigned, emit a zeroing instruction... (reg = 0)
1526 BMI(BB, IP, ClrOpcode[Class], 2, ExtReg).addZImm(0);
1529 // Emit the appropriate divide or remainder instruction...
1530 BMI(BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
1532 // Figure out which register we want to pick the result out of...
1533 unsigned DestReg = isDiv ? Reg : ExtReg;
1535 // Put the result into the destination register...
1536 BMI(BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
1540 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
1541 /// for constant immediate shift values, and for constant immediate
1542 /// shift values equal to 1. Even the general case is sort of special,
1543 /// because the shift amount has to be in CL, not just any old register.
1545 void ISel::visitShiftInst(ShiftInst &I) {
1546 MachineBasicBlock::iterator IP = BB->end ();
1547 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
1548 I.getOpcode () == Instruction::Shl, I.getType (),
1552 /// emitShiftOperation - Common code shared between visitShiftInst and
1553 /// constant expression support.
1554 void ISel::emitShiftOperation(MachineBasicBlock *MBB,
1555 MachineBasicBlock::iterator &IP,
1556 Value *Op, Value *ShiftAmount, bool isLeftShift,
1557 const Type *ResultTy, unsigned DestReg) {
1558 unsigned SrcReg = getReg (Op, MBB, IP);
1559 bool isSigned = ResultTy->isSigned ();
1560 unsigned Class = getClass (ResultTy);
1562 static const unsigned ConstantOperand[][4] = {
1563 { X86::SHRir8, X86::SHRir16, X86::SHRir32, X86::SHRDir32 }, // SHR
1564 { X86::SARir8, X86::SARir16, X86::SARir32, X86::SHRDir32 }, // SAR
1565 { X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SHL
1566 { X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SAL = SHL
1569 static const unsigned NonConstantOperand[][4] = {
1570 { X86::SHRrr8, X86::SHRrr16, X86::SHRrr32 }, // SHR
1571 { X86::SARrr8, X86::SARrr16, X86::SARrr32 }, // SAR
1572 { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SHL
1573 { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SAL = SHL
1576 // Longs, as usual, are handled specially...
1577 if (Class == cLong) {
1578 // If we have a constant shift, we can generate much more efficient code
1579 // than otherwise...
1581 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1582 unsigned Amount = CUI->getValue();
1584 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1586 BMI(MBB, IP, Opc[3], 3,
1587 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addZImm(Amount);
1588 BMI(MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addZImm(Amount);
1590 BMI(MBB, IP, Opc[3], 3,
1591 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addZImm(Amount);
1592 BMI(MBB, IP, Opc[2], 2, DestReg+1).addReg(SrcReg+1).addZImm(Amount);
1594 } else { // Shifting more than 32 bits
1597 BMI(MBB, IP, X86::SHLir32, 2,
1598 DestReg + 1).addReg(SrcReg).addZImm(Amount);
1599 BMI(MBB, IP, X86::MOVir32, 1,
1600 DestReg).addZImm(0);
1602 unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
1603 BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
1604 BMI(MBB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
1608 unsigned TmpReg = makeAnotherReg(Type::IntTy);
1610 if (!isLeftShift && isSigned) {
1611 // If this is a SHR of a Long, then we need to do funny sign extension
1612 // stuff. TmpReg gets the value to use as the high-part if we are
1613 // shifting more than 32 bits.
1614 BMI(MBB, IP, X86::SARir32, 2, TmpReg).addReg(SrcReg).addZImm(31);
1616 // Other shifts use a fixed zero value if the shift is more than 32
1618 BMI(MBB, IP, X86::MOVir32, 1, TmpReg).addZImm(0);
1621 // Initialize CL with the shift amount...
1622 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
1623 BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
1625 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
1626 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
1628 // TmpReg2 = shld inHi, inLo
1629 BMI(MBB, IP, X86::SHLDrr32, 2, TmpReg2).addReg(SrcReg+1).addReg(SrcReg);
1630 // TmpReg3 = shl inLo, CL
1631 BMI(MBB, IP, X86::SHLrr32, 1, TmpReg3).addReg(SrcReg);
1633 // Set the flags to indicate whether the shift was by more than 32 bits.
1634 BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
1636 // DestHi = (>32) ? TmpReg3 : TmpReg2;
1637 BMI(MBB, IP, X86::CMOVNErr32, 2,
1638 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
1639 // DestLo = (>32) ? TmpReg : TmpReg3;
1640 BMI(MBB, IP, X86::CMOVNErr32, 2,
1641 DestReg).addReg(TmpReg3).addReg(TmpReg);
1643 // TmpReg2 = shrd inLo, inHi
1644 BMI(MBB, IP, X86::SHRDrr32, 2, TmpReg2).addReg(SrcReg).addReg(SrcReg+1);
1645 // TmpReg3 = s[ah]r inHi, CL
1646 BMI(MBB, IP, isSigned ? X86::SARrr32 : X86::SHRrr32, 1, TmpReg3)
1649 // Set the flags to indicate whether the shift was by more than 32 bits.
1650 BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
1652 // DestLo = (>32) ? TmpReg3 : TmpReg2;
1653 BMI(MBB, IP, X86::CMOVNErr32, 2,
1654 DestReg).addReg(TmpReg2).addReg(TmpReg3);
1656 // DestHi = (>32) ? TmpReg : TmpReg3;
1657 BMI(MBB, IP, X86::CMOVNErr32, 2,
1658 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
1664 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1665 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
1666 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
1668 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1669 BMI(MBB, IP, Opc[Class], 2,
1670 DestReg).addReg(SrcReg).addZImm(CUI->getValue());
1671 } else { // The shift amount is non-constant.
1672 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
1673 BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
1675 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
1676 BMI(MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
1681 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
1682 /// instruction. The load and store instructions are the only place where we
1683 /// need to worry about the memory layout of the target machine.
1685 void ISel::visitLoadInst(LoadInst &I) {
1686 unsigned SrcAddrReg = getReg(I.getOperand(0));
1687 unsigned DestReg = getReg(I);
1689 unsigned Class = getClassB(I.getType());
1691 if (Class == cLong) {
1692 addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), SrcAddrReg);
1693 addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), SrcAddrReg, 4);
1697 static const unsigned Opcodes[] = {
1698 X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FLDr32
1700 unsigned Opcode = Opcodes[Class];
1701 if (I.getType() == Type::DoubleTy) Opcode = X86::FLDr64;
1702 addDirectMem(BuildMI(BB, Opcode, 4, DestReg), SrcAddrReg);
1705 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
1708 void ISel::visitStoreInst(StoreInst &I) {
1709 unsigned ValReg = getReg(I.getOperand(0));
1710 unsigned AddressReg = getReg(I.getOperand(1));
1712 const Type *ValTy = I.getOperand(0)->getType();
1713 unsigned Class = getClassB(ValTy);
1715 if (Class == cLong) {
1716 addDirectMem(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg).addReg(ValReg);
1717 addRegOffset(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg,4).addReg(ValReg+1);
1721 static const unsigned Opcodes[] = {
1722 X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FSTr32
1724 unsigned Opcode = Opcodes[Class];
1725 if (ValTy == Type::DoubleTy) Opcode = X86::FSTr64;
1726 addDirectMem(BuildMI(BB, Opcode, 1+4), AddressReg).addReg(ValReg);
1730 /// visitCastInst - Here we have various kinds of copying with or without
1731 /// sign extension going on.
1732 void ISel::visitCastInst(CastInst &CI) {
1733 Value *Op = CI.getOperand(0);
1734 // If this is a cast from a 32-bit integer to a Long type, and the only uses
1735 // of the case are GEP instructions, then the cast does not need to be
1736 // generated explicitly, it will be folded into the GEP.
1737 if (CI.getType() == Type::LongTy &&
1738 (Op->getType() == Type::IntTy || Op->getType() == Type::UIntTy)) {
1739 bool AllUsesAreGEPs = true;
1740 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
1741 if (!isa<GetElementPtrInst>(*I)) {
1742 AllUsesAreGEPs = false;
1746 // No need to codegen this cast if all users are getelementptr instrs...
1747 if (AllUsesAreGEPs) return;
1750 unsigned DestReg = getReg(CI);
1751 MachineBasicBlock::iterator MI = BB->end();
1752 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
1755 /// emitCastOperation - Common code shared between visitCastInst and
1756 /// constant expression cast support.
1757 void ISel::emitCastOperation(MachineBasicBlock *BB,
1758 MachineBasicBlock::iterator &IP,
1759 Value *Src, const Type *DestTy,
1761 unsigned SrcReg = getReg(Src, BB, IP);
1762 const Type *SrcTy = Src->getType();
1763 unsigned SrcClass = getClassB(SrcTy);
1764 unsigned DestClass = getClassB(DestTy);
1766 // Implement casts to bool by using compare on the operand followed by set if
1767 // not zero on the result.
1768 if (DestTy == Type::BoolTy) {
1771 BMI(BB, IP, X86::TESTrr8, 2).addReg(SrcReg).addReg(SrcReg);
1774 BMI(BB, IP, X86::TESTrr16, 2).addReg(SrcReg).addReg(SrcReg);
1777 BMI(BB, IP, X86::TESTrr32, 2).addReg(SrcReg).addReg(SrcReg);
1780 unsigned TmpReg = makeAnotherReg(Type::IntTy);
1781 BMI(BB, IP, X86::ORrr32, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
1785 assert(0 && "FIXME: implement cast FP to bool");
1789 // If the zero flag is not set, then the value is true, set the byte to
1791 BMI(BB, IP, X86::SETNEr, 1, DestReg);
1795 static const unsigned RegRegMove[] = {
1796 X86::MOVrr8, X86::MOVrr16, X86::MOVrr32, X86::FpMOV, X86::MOVrr32
1799 // Implement casts between values of the same type class (as determined by
1800 // getClass) by using a register-to-register move.
1801 if (SrcClass == DestClass) {
1802 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
1803 BMI(BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
1804 } else if (SrcClass == cFP) {
1805 if (SrcTy == Type::FloatTy) { // double -> float
1806 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
1807 BMI(BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
1808 } else { // float -> double
1809 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
1810 "Unknown cFP member!");
1811 // Truncate from double to float by storing to memory as short, then
1813 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
1814 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
1815 addFrameReference(BMI(BB, IP, X86::FSTr32, 5), FrameIdx).addReg(SrcReg);
1816 addFrameReference(BMI(BB, IP, X86::FLDr32, 5, DestReg), FrameIdx);
1818 } else if (SrcClass == cLong) {
1819 BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
1820 BMI(BB, IP, X86::MOVrr32, 1, DestReg+1).addReg(SrcReg+1);
1822 assert(0 && "Cannot handle this type of cast instruction!");
1828 // Handle cast of SMALLER int to LARGER int using a move with sign extension
1829 // or zero extension, depending on whether the source type was signed.
1830 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
1831 SrcClass < DestClass) {
1832 bool isLong = DestClass == cLong;
1833 if (isLong) DestClass = cInt;
1835 static const unsigned Opc[][4] = {
1836 { X86::MOVSXr16r8, X86::MOVSXr32r8, X86::MOVSXr32r16, X86::MOVrr32 }, // s
1837 { X86::MOVZXr16r8, X86::MOVZXr32r8, X86::MOVZXr32r16, X86::MOVrr32 } // u
1840 bool isUnsigned = SrcTy->isUnsigned();
1841 BMI(BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
1842 DestReg).addReg(SrcReg);
1844 if (isLong) { // Handle upper 32 bits as appropriate...
1845 if (isUnsigned) // Zero out top bits...
1846 BMI(BB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
1847 else // Sign extend bottom half...
1848 BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31);
1853 // Special case long -> int ...
1854 if (SrcClass == cLong && DestClass == cInt) {
1855 BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
1859 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
1860 // move out of AX or AL.
1861 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
1862 && SrcClass > DestClass) {
1863 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
1864 BMI(BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
1865 BMI(BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
1869 // Handle casts from integer to floating point now...
1870 if (DestClass == cFP) {
1871 // Promote the integer to a type supported by FLD. We do this because there
1872 // are no unsigned FLD instructions, so we must promote an unsigned value to
1873 // a larger signed value, then use FLD on the larger value.
1875 const Type *PromoteType = 0;
1876 unsigned PromoteOpcode;
1877 switch (SrcTy->getPrimitiveID()) {
1878 case Type::BoolTyID:
1879 case Type::SByteTyID:
1880 // We don't have the facilities for directly loading byte sized data from
1881 // memory (even signed). Promote it to 16 bits.
1882 PromoteType = Type::ShortTy;
1883 PromoteOpcode = X86::MOVSXr16r8;
1885 case Type::UByteTyID:
1886 PromoteType = Type::ShortTy;
1887 PromoteOpcode = X86::MOVZXr16r8;
1889 case Type::UShortTyID:
1890 PromoteType = Type::IntTy;
1891 PromoteOpcode = X86::MOVZXr32r16;
1893 case Type::UIntTyID: {
1894 // Make a 64 bit temporary... and zero out the top of it...
1895 unsigned TmpReg = makeAnotherReg(Type::LongTy);
1896 BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
1897 BMI(BB, IP, X86::MOVir32, 1, TmpReg+1).addZImm(0);
1898 SrcTy = Type::LongTy;
1903 case Type::ULongTyID:
1904 assert("FIXME: not implemented: cast ulong X to fp type!");
1905 default: // No promotion needed...
1910 unsigned TmpReg = makeAnotherReg(PromoteType);
1911 BMI(BB, IP, SrcTy->isSigned() ? X86::MOVSXr16r8 : X86::MOVZXr16r8,
1912 1, TmpReg).addReg(SrcReg);
1913 SrcTy = PromoteType;
1914 SrcClass = getClass(PromoteType);
1918 // Spill the integer to memory and reload it from there...
1920 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
1922 if (SrcClass == cLong) {
1923 addFrameReference(BMI(BB, IP, X86::MOVrm32, 5), FrameIdx).addReg(SrcReg);
1924 addFrameReference(BMI(BB, IP, X86::MOVrm32, 5),
1925 FrameIdx, 4).addReg(SrcReg+1);
1927 static const unsigned Op1[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
1928 addFrameReference(BMI(BB, IP, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg);
1931 static const unsigned Op2[] =
1932 { 0/*byte*/, X86::FILDr16, X86::FILDr32, 0/*FP*/, X86::FILDr64 };
1933 addFrameReference(BMI(BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
1937 // Handle casts from floating point to integer now...
1938 if (SrcClass == cFP) {
1939 // Change the floating point control register to use "round towards zero"
1940 // mode when truncating to an integer value.
1942 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1943 addFrameReference(BMI(BB, IP, X86::FNSTCWm16, 4), CWFrameIdx);
1945 // Load the old value of the high byte of the control word...
1946 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
1947 addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1);
1949 // Set the high part to be round to zero...
1950 addFrameReference(BMI(BB, IP, X86::MOVim8, 5), CWFrameIdx, 1).addZImm(12);
1952 // Reload the modified control word now...
1953 addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
1955 // Restore the memory image of control word to original value
1956 addFrameReference(BMI(BB, IP, X86::MOVrm8, 5),
1957 CWFrameIdx, 1).addReg(HighPartOfCW);
1959 // We don't have the facilities for directly storing byte sized data to
1960 // memory. Promote it to 16 bits. We also must promote unsigned values to
1961 // larger classes because we only have signed FP stores.
1962 unsigned StoreClass = DestClass;
1963 const Type *StoreTy = DestTy;
1964 if (StoreClass == cByte || DestTy->isUnsigned())
1965 switch (StoreClass) {
1966 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
1967 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
1968 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
1969 // The following treatment of cLong may not be perfectly right,
1970 // but it survives chains of casts of the form
1971 // double->ulong->double.
1972 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
1973 default: assert(0 && "Unknown store class!");
1976 // Spill the integer to memory and reload it from there...
1978 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
1980 static const unsigned Op1[] =
1981 { 0, X86::FISTr16, X86::FISTr32, 0, X86::FISTPr64 };
1982 addFrameReference(BMI(BB, IP, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg);
1984 if (DestClass == cLong) {
1985 addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg), FrameIdx);
1986 addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg+1), FrameIdx, 4);
1988 static const unsigned Op2[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
1989 addFrameReference(BMI(BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
1992 // Reload the original control word now...
1993 addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
1997 // Anything we haven't handled already, we can't (yet) handle at all.
1998 assert(0 && "Unhandled cast instruction!");
2002 /// visitVANextInst - Implement the va_next instruction...
2004 void ISel::visitVANextInst(VANextInst &I) {
2005 unsigned VAList = getReg(I.getOperand(0));
2006 unsigned DestReg = getReg(I);
2009 switch (I.getArgType()->getPrimitiveID()) {
2012 assert(0 && "Error: bad type for va_next instruction!");
2014 case Type::PointerTyID:
2015 case Type::UIntTyID:
2019 case Type::ULongTyID:
2020 case Type::LongTyID:
2021 case Type::DoubleTyID:
2026 // Increment the VAList pointer...
2027 BuildMI(BB, X86::ADDri32, 2, DestReg).addReg(VAList).addZImm(Size);
2030 void ISel::visitVAArgInst(VAArgInst &I) {
2031 unsigned VAList = getReg(I.getOperand(0));
2032 unsigned DestReg = getReg(I);
2034 switch (I.getType()->getPrimitiveID()) {
2037 assert(0 && "Error: bad type for va_next instruction!");
2039 case Type::PointerTyID:
2040 case Type::UIntTyID:
2042 addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
2044 case Type::ULongTyID:
2045 case Type::LongTyID:
2046 addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
2047 addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), VAList, 4);
2049 case Type::DoubleTyID:
2050 addDirectMem(BuildMI(BB, X86::FLDr64, 4, DestReg), VAList);
2056 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
2057 unsigned outputReg = getReg(I);
2058 MachineBasicBlock::iterator MI = BB->end();
2059 emitGEPOperation(BB, MI, I.getOperand(0),
2060 I.op_begin()+1, I.op_end(), outputReg);
2063 void ISel::emitGEPOperation(MachineBasicBlock *MBB,
2064 MachineBasicBlock::iterator &IP,
2065 Value *Src, User::op_iterator IdxBegin,
2066 User::op_iterator IdxEnd, unsigned TargetReg) {
2067 const TargetData &TD = TM.getTargetData();
2068 const Type *Ty = Src->getType();
2069 unsigned BaseReg = getReg(Src, MBB, IP);
2071 // GEPs have zero or more indices; we must perform a struct access
2072 // or array access for each one.
2073 for (GetElementPtrInst::op_iterator oi = IdxBegin,
2074 oe = IdxEnd; oi != oe; ++oi) {
2076 unsigned NextReg = BaseReg;
2077 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2078 // It's a struct access. idx is the index into the structure,
2079 // which names the field. This index must have ubyte type.
2080 const ConstantUInt *CUI = cast<ConstantUInt>(idx);
2081 assert(CUI->getType() == Type::UByteTy
2082 && "Funny-looking structure index in GEP");
2083 // Use the TargetData structure to pick out what the layout of
2084 // the structure is in memory. Since the structure index must
2085 // be constant, we can get its value and use it to find the
2086 // right byte offset from the StructLayout class's list of
2087 // structure member offsets.
2088 unsigned idxValue = CUI->getValue();
2089 unsigned FieldOff = TD.getStructLayout(StTy)->MemberOffsets[idxValue];
2091 NextReg = makeAnotherReg(Type::UIntTy);
2092 // Emit an ADD to add FieldOff to the basePtr.
2093 BMI(MBB, IP, X86::ADDri32, 2,NextReg).addReg(BaseReg).addZImm(FieldOff);
2095 // The next type is the member of the structure selected by the
2097 Ty = StTy->getElementTypes()[idxValue];
2098 } else if (const SequentialType *SqTy = cast<SequentialType>(Ty)) {
2099 // It's an array or pointer access: [ArraySize x ElementType].
2101 // idx is the index into the array. Unlike with structure
2102 // indices, we may not know its actual value at code-generation
2104 assert(idx->getType() == Type::LongTy && "Bad GEP array index!");
2106 // Most GEP instructions use a [cast (int/uint) to LongTy] as their
2107 // operand on X86. Handle this case directly now...
2108 if (CastInst *CI = dyn_cast<CastInst>(idx))
2109 if (CI->getOperand(0)->getType() == Type::IntTy ||
2110 CI->getOperand(0)->getType() == Type::UIntTy)
2111 idx = CI->getOperand(0);
2113 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
2114 // must find the size of the pointed-to type (Not coincidentally, the next
2115 // type is the type of the elements in the array).
2116 Ty = SqTy->getElementType();
2117 unsigned elementSize = TD.getTypeSize(Ty);
2119 // If idxReg is a constant, we don't need to perform the multiply!
2120 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
2121 if (!CSI->isNullValue()) {
2122 unsigned Offset = elementSize*CSI->getValue();
2123 NextReg = makeAnotherReg(Type::UIntTy);
2124 BMI(MBB, IP, X86::ADDri32, 2,NextReg).addReg(BaseReg).addZImm(Offset);
2126 } else if (elementSize == 1) {
2127 // If the element size is 1, we don't have to multiply, just add
2128 unsigned idxReg = getReg(idx, MBB, IP);
2129 NextReg = makeAnotherReg(Type::UIntTy);
2130 BMI(MBB, IP, X86::ADDrr32, 2, NextReg).addReg(BaseReg).addReg(idxReg);
2132 unsigned idxReg = getReg(idx, MBB, IP);
2133 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
2135 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
2137 // Emit an ADD to add OffsetReg to the basePtr.
2138 NextReg = makeAnotherReg(Type::UIntTy);
2139 BMI(MBB, IP, X86::ADDrr32, 2,NextReg).addReg(BaseReg).addReg(OffsetReg);
2142 // Now that we are here, further indices refer to subtypes of this
2143 // one, so we don't need to worry about BaseReg itself, anymore.
2146 // After we have processed all the indices, the result is left in
2147 // BaseReg. Move it to the register where we were expected to
2148 // put the answer. A 32-bit move should do it, because we are in
2150 BMI(MBB, IP, X86::MOVrr32, 1, TargetReg).addReg(BaseReg);
2154 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
2155 /// frame manager, otherwise do it the hard way.
2157 void ISel::visitAllocaInst(AllocaInst &I) {
2158 // Find the data size of the alloca inst's getAllocatedType.
2159 const Type *Ty = I.getAllocatedType();
2160 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
2162 // If this is a fixed size alloca in the entry block for the function,
2163 // statically stack allocate the space.
2165 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getArraySize())) {
2166 if (I.getParent() == I.getParent()->getParent()->begin()) {
2167 TySize *= CUI->getValue(); // Get total allocated size...
2168 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
2170 // Create a new stack object using the frame manager...
2171 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
2172 addFrameReference(BuildMI(BB, X86::LEAr32, 5, getReg(I)), FrameIdx);
2177 // Create a register to hold the temporary result of multiplying the type size
2178 // constant by the variable amount.
2179 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
2180 unsigned SrcReg1 = getReg(I.getArraySize());
2182 // TotalSizeReg = mul <numelements>, <TypeSize>
2183 MachineBasicBlock::iterator MBBI = BB->end();
2184 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
2186 // AddedSize = add <TotalSizeReg>, 15
2187 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
2188 BuildMI(BB, X86::ADDri32, 2, AddedSizeReg).addReg(TotalSizeReg).addZImm(15);
2190 // AlignedSize = and <AddedSize>, ~15
2191 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
2192 BuildMI(BB, X86::ANDri32, 2, AlignedSize).addReg(AddedSizeReg).addZImm(~15);
2194 // Subtract size from stack pointer, thereby allocating some space.
2195 BuildMI(BB, X86::SUBrr32, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
2197 // Put a pointer to the space into the result register, by copying
2198 // the stack pointer.
2199 BuildMI(BB, X86::MOVrr32, 1, getReg(I)).addReg(X86::ESP);
2201 // Inform the Frame Information that we have just allocated a variable-sized
2203 F->getFrameInfo()->CreateVariableSizedObject();
2206 /// visitMallocInst - Malloc instructions are code generated into direct calls
2207 /// to the library malloc.
2209 void ISel::visitMallocInst(MallocInst &I) {
2210 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
2213 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
2214 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
2216 Arg = makeAnotherReg(Type::UIntTy);
2217 unsigned Op0Reg = getReg(I.getOperand(0));
2218 MachineBasicBlock::iterator MBBI = BB->end();
2219 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
2222 std::vector<ValueRecord> Args;
2223 Args.push_back(ValueRecord(Arg, Type::UIntTy));
2224 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2225 1).addExternalSymbol("malloc", true);
2226 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
2230 /// visitFreeInst - Free instructions are code gen'd to call the free libc
2233 void ISel::visitFreeInst(FreeInst &I) {
2234 std::vector<ValueRecord> Args;
2235 Args.push_back(ValueRecord(I.getOperand(0)));
2236 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2237 1).addExternalSymbol("free", true);
2238 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
2241 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
2242 /// into a machine code representation is a very simple peep-hole fashion. The
2243 /// generated code sucks but the implementation is nice and simple.
2245 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
2246 return new ISel(TM);