1 //===-- X86ISelSimple.cpp - A simple instruction selector for x86 ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/Pass.h"
22 #include "llvm/CodeGen/IntrinsicLowering.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/SSARegMap.h"
27 #include "llvm/Target/MRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Support/GetElementPtrTypeIterator.h"
31 #include "llvm/Support/InstVisitor.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/ADT/Statistic.h"
38 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
40 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
44 cByte, cShort, cInt, cFP, cLong
48 /// getClass - Turn a primitive type into a "class" number which is based on the
49 /// size of the type, and whether or not it is floating point.
51 static inline TypeClass getClass(const Type *Ty) {
52 switch (Ty->getTypeID()) {
54 case Type::UByteTyID: return cByte; // Byte operands are class #0
56 case Type::UShortTyID: return cShort; // Short operands are class #1
59 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
62 case Type::DoubleTyID: return cFP; // Floating Point is #3
65 case Type::ULongTyID: return cLong; // Longs are class #4
67 assert(0 && "Invalid type to getClass!");
68 return cByte; // not reached
72 // getClassB - Just like getClass, but treat boolean values as bytes.
73 static inline TypeClass getClassB(const Type *Ty) {
74 if (Ty == Type::BoolTy) return cByte;
79 struct X86ISel : public FunctionPass, InstVisitor<X86ISel> {
81 MachineFunction *F; // The function we are compiling into
82 MachineBasicBlock *BB; // The current MBB we are compiling
83 int VarArgsFrameIndex; // FrameIndex for start of varargs area
84 int ReturnAddressIndex; // FrameIndex for the return address
86 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
88 // MBBMap - Mapping between LLVM BB -> Machine BB
89 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
91 // AllocaMap - Mapping from fixed sized alloca instructions to the
92 // FrameIndex for the alloca.
93 std::map<AllocaInst*, unsigned> AllocaMap;
95 X86ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
97 /// runOnFunction - Top level implementation of instruction selection for
98 /// the entire function.
100 bool runOnFunction(Function &Fn) {
101 // Lazily create a stack slot for the return address if needed.
102 ReturnAddressIndex = 0;
104 // First pass over the function, lower any unknown intrinsic functions
105 // with the IntrinsicLowering class.
106 LowerUnknownIntrinsicFunctionCalls(Fn);
108 F = &MachineFunction::construct(&Fn, TM);
110 // Create all of the machine basic blocks for the function...
111 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
112 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
116 // Copy incoming arguments off of the stack...
117 LoadArgumentsToVirtualRegs(Fn);
119 // If this is main, emit special code.
120 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
121 EmitSpecialCodeForMain();
123 // Instruction select everything except PHI nodes
126 // Select the PHI nodes
129 // Insert the FP_REG_KILL instructions into blocks that need them.
136 // We always build a machine code representation for the function
140 virtual const char *getPassName() const {
141 return "X86 Simple Instruction Selection";
144 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
145 /// the main function.
146 void EmitSpecialCodeForMain();
148 /// visitBasicBlock - This method is called when we are visiting a new basic
149 /// block. This simply creates a new MachineBasicBlock to emit code into
150 /// and adds it to the current MachineFunction. Subsequent visit* for
151 /// instructions will be invoked for all instructions in the basic block.
153 void visitBasicBlock(BasicBlock &LLVM_BB) {
154 BB = MBBMap[&LLVM_BB];
157 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
158 /// function, lowering any calls to unknown intrinsic functions into the
159 /// equivalent LLVM code.
161 void LowerUnknownIntrinsicFunctionCalls(Function &F);
163 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
164 /// from the stack into virtual registers.
166 void LoadArgumentsToVirtualRegs(Function &F);
168 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
169 /// because we have to generate our sources into the source basic blocks,
170 /// not the current one.
172 void SelectPHINodes();
174 /// InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks
175 /// that need them. This only occurs due to the floating point stackifier
176 /// not being aggressive enough to handle arbitrary global stackification.
178 void InsertFPRegKills();
180 // Visitation methods for various instructions. These methods simply emit
181 // fixed X86 code for each instruction.
184 // Control flow operators
185 void visitReturnInst(ReturnInst &RI);
186 void visitBranchInst(BranchInst &BI);
187 void visitUnreachableInst(UnreachableInst &UI) {}
193 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
194 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
196 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
197 const std::vector<ValueRecord> &Args);
198 void visitCallInst(CallInst &I);
199 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
201 // Arithmetic operators
202 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
203 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
204 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
205 void visitMul(BinaryOperator &B);
207 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
208 void visitRem(BinaryOperator &B) { visitDivRem(B); }
209 void visitDivRem(BinaryOperator &B);
212 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
213 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
214 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
216 // Comparison operators...
217 void visitSetCondInst(SetCondInst &I);
218 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
219 MachineBasicBlock *MBB,
220 MachineBasicBlock::iterator MBBI);
221 void visitSelectInst(SelectInst &SI);
224 // Memory Instructions
225 void visitLoadInst(LoadInst &I);
226 void visitStoreInst(StoreInst &I);
227 void visitGetElementPtrInst(GetElementPtrInst &I);
228 void visitAllocaInst(AllocaInst &I);
229 void visitMallocInst(MallocInst &I);
230 void visitFreeInst(FreeInst &I);
233 void visitShiftInst(ShiftInst &I);
234 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
235 void visitCastInst(CastInst &I);
236 void visitVAArgInst(VAArgInst &I);
238 void visitInstruction(Instruction &I) {
239 std::cerr << "Cannot instruction select: " << I;
243 /// promote32 - Make a value 32-bits wide, and put it somewhere.
245 void promote32(unsigned targetReg, const ValueRecord &VR);
247 /// getAddressingMode - Get the addressing mode to use to address the
248 /// specified value. The returned value should be used with addFullAddress.
249 void getAddressingMode(Value *Addr, X86AddressMode &AM);
252 /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
254 void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
255 std::vector<Value*> &GEPOps,
256 std::vector<const Type*> &GEPTypes,
259 /// isGEPFoldable - Return true if the specified GEP can be completely
260 /// folded into the addressing mode of a load/store or lea instruction.
261 bool isGEPFoldable(MachineBasicBlock *MBB,
262 Value *Src, User::op_iterator IdxBegin,
263 User::op_iterator IdxEnd, X86AddressMode &AM);
265 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
266 /// constant expression GEP support.
268 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
269 Value *Src, User::op_iterator IdxBegin,
270 User::op_iterator IdxEnd, unsigned TargetReg);
272 /// emitCastOperation - Common code shared between visitCastInst and
273 /// constant expression cast support.
275 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
276 Value *Src, const Type *DestTy, unsigned TargetReg);
278 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
279 /// and constant expression support.
281 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
282 MachineBasicBlock::iterator IP,
283 Value *Op0, Value *Op1,
284 unsigned OperatorClass, unsigned TargetReg);
286 /// emitBinaryFPOperation - This method handles emission of floating point
287 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
288 void emitBinaryFPOperation(MachineBasicBlock *BB,
289 MachineBasicBlock::iterator IP,
290 Value *Op0, Value *Op1,
291 unsigned OperatorClass, unsigned TargetReg);
293 void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
294 Value *Op0, Value *Op1, unsigned TargetReg);
296 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
297 unsigned DestReg, const Type *DestTy,
298 unsigned Op0Reg, unsigned Op1Reg);
299 void doMultiplyConst(MachineBasicBlock *MBB,
300 MachineBasicBlock::iterator MBBI,
301 unsigned DestReg, const Type *DestTy,
302 unsigned Op0Reg, unsigned Op1Val);
304 void emitDivRemOperation(MachineBasicBlock *BB,
305 MachineBasicBlock::iterator IP,
306 Value *Op0, Value *Op1, bool isDiv,
309 /// emitSetCCOperation - Common code shared between visitSetCondInst and
310 /// constant expression support.
312 void emitSetCCOperation(MachineBasicBlock *BB,
313 MachineBasicBlock::iterator IP,
314 Value *Op0, Value *Op1, unsigned Opcode,
317 /// emitShiftOperation - Common code shared between visitShiftInst and
318 /// constant expression support.
320 void emitShiftOperation(MachineBasicBlock *MBB,
321 MachineBasicBlock::iterator IP,
322 Value *Op, Value *ShiftAmount, bool isLeftShift,
323 const Type *ResultTy, unsigned DestReg);
325 // Emit code for a 'SHLD DestReg, Op0, Op1, Amt' operation, where Amt is a
327 void doSHLDConst(MachineBasicBlock *MBB,
328 MachineBasicBlock::iterator MBBI,
329 unsigned DestReg, unsigned Op0Reg, unsigned Op1Reg,
332 /// emitSelectOperation - Common code shared between visitSelectInst and the
333 /// constant expression support.
334 void emitSelectOperation(MachineBasicBlock *MBB,
335 MachineBasicBlock::iterator IP,
336 Value *Cond, Value *TrueVal, Value *FalseVal,
339 /// copyConstantToRegister - Output the instructions required to put the
340 /// specified constant into the specified register.
342 void copyConstantToRegister(MachineBasicBlock *MBB,
343 MachineBasicBlock::iterator MBBI,
344 Constant *C, unsigned Reg);
346 void emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
347 unsigned LHS, unsigned RHS);
349 /// makeAnotherReg - This method returns the next register number we haven't
352 /// Long values are handled somewhat specially. They are always allocated
353 /// as pairs of 32 bit integer values. The register number returned is the
354 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
355 /// of the long value.
357 unsigned makeAnotherReg(const Type *Ty) {
358 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
359 "Current target doesn't have X86 reg info??");
360 const X86RegisterInfo *MRI =
361 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
362 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
363 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
364 // Create the lower part
365 F->getSSARegMap()->createVirtualRegister(RC);
366 // Create the upper part.
367 return F->getSSARegMap()->createVirtualRegister(RC)-1;
370 // Add the mapping of regnumber => reg class to MachineFunction
371 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
372 return F->getSSARegMap()->createVirtualRegister(RC);
375 /// getReg - This method turns an LLVM value into a register number.
377 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
378 unsigned getReg(Value *V) {
379 // Just append to the end of the current bb.
380 MachineBasicBlock::iterator It = BB->end();
381 return getReg(V, BB, It);
383 unsigned getReg(Value *V, MachineBasicBlock *MBB,
384 MachineBasicBlock::iterator IPt);
386 /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
387 /// that is to be statically allocated with the initial stack frame
389 unsigned getFixedSizedAllocaFI(AllocaInst *AI);
393 /// dyn_castFixedAlloca - If the specified value is a fixed size alloca
394 /// instruction in the entry block, return it. Otherwise, return a null
396 static AllocaInst *dyn_castFixedAlloca(Value *V) {
397 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
398 BasicBlock *BB = AI->getParent();
399 if (isa<ConstantUInt>(AI->getArraySize()) && BB ==&BB->getParent()->front())
405 /// getReg - This method turns an LLVM value into a register number.
407 unsigned X86ISel::getReg(Value *V, MachineBasicBlock *MBB,
408 MachineBasicBlock::iterator IPt) {
409 // If this operand is a constant, emit the code to copy the constant into
410 // the register here...
411 if (Constant *C = dyn_cast<Constant>(V)) {
412 unsigned Reg = makeAnotherReg(V->getType());
413 copyConstantToRegister(MBB, IPt, C, Reg);
415 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
416 // Do not emit noop casts at all, unless it's a double -> float cast.
417 if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()) &&
418 (CI->getType() != Type::FloatTy ||
419 CI->getOperand(0)->getType() != Type::DoubleTy))
420 return getReg(CI->getOperand(0), MBB, IPt);
421 } else if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
422 // If the alloca address couldn't be folded into the instruction addressing,
423 // emit an explicit LEA as appropriate.
424 unsigned Reg = makeAnotherReg(V->getType());
425 unsigned FI = getFixedSizedAllocaFI(AI);
426 addFrameReference(BuildMI(*MBB, IPt, X86::LEA32r, 4, Reg), FI);
430 unsigned &Reg = RegMap[V];
432 Reg = makeAnotherReg(V->getType());
439 /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
440 /// that is to be statically allocated with the initial stack frame
442 unsigned X86ISel::getFixedSizedAllocaFI(AllocaInst *AI) {
443 // Already computed this?
444 std::map<AllocaInst*, unsigned>::iterator I = AllocaMap.lower_bound(AI);
445 if (I != AllocaMap.end() && I->first == AI) return I->second;
447 const Type *Ty = AI->getAllocatedType();
448 ConstantUInt *CUI = cast<ConstantUInt>(AI->getArraySize());
449 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
450 TySize *= CUI->getValue(); // Get total allocated size...
451 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
453 // Create a new stack object using the frame manager...
454 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
455 AllocaMap.insert(I, std::make_pair(AI, FrameIdx));
460 /// copyConstantToRegister - Output the instructions required to put the
461 /// specified constant into the specified register.
463 void X86ISel::copyConstantToRegister(MachineBasicBlock *MBB,
464 MachineBasicBlock::iterator IP,
465 Constant *C, unsigned R) {
466 if (isa<UndefValue>(C)) {
467 switch (getClassB(C->getType())) {
469 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
470 BuildMI(*MBB, IP, X86::FLD0, 0, R);
473 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, R+1);
476 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, R);
479 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
481 switch (CE->getOpcode()) {
482 case Instruction::GetElementPtr:
483 emitGEPOperation(MBB, IP, CE->getOperand(0),
484 CE->op_begin()+1, CE->op_end(), R);
486 case Instruction::Cast:
487 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
490 case Instruction::Xor: ++Class; // FALL THROUGH
491 case Instruction::Or: ++Class; // FALL THROUGH
492 case Instruction::And: ++Class; // FALL THROUGH
493 case Instruction::Sub: ++Class; // FALL THROUGH
494 case Instruction::Add:
495 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
499 case Instruction::Mul:
500 emitMultiply(MBB, IP, CE->getOperand(0), CE->getOperand(1), R);
503 case Instruction::Div:
504 case Instruction::Rem:
505 emitDivRemOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
506 CE->getOpcode() == Instruction::Div, R);
509 case Instruction::SetNE:
510 case Instruction::SetEQ:
511 case Instruction::SetLT:
512 case Instruction::SetGT:
513 case Instruction::SetLE:
514 case Instruction::SetGE:
515 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
519 case Instruction::Shl:
520 case Instruction::Shr:
521 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
522 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
525 case Instruction::Select:
526 emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
527 CE->getOperand(2), R);
531 std::cerr << "Offending expr: " << *C << "\n";
532 assert(0 && "Constant expression not yet handled!\n");
536 if (C->getType()->isIntegral()) {
537 unsigned Class = getClassB(C->getType());
539 if (Class == cLong) {
540 // Copy the value into the register pair.
541 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
542 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
543 BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
547 assert(Class <= cInt && "Type not handled yet!");
549 static const unsigned IntegralOpcodeTab[] = {
550 X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
553 if (C->getType() == Type::BoolTy) {
554 BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
556 ConstantInt *CI = cast<ConstantInt>(C);
557 BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
559 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
560 if (CFP->isExactlyValue(+0.0))
561 BuildMI(*MBB, IP, X86::FLD0, 0, R);
562 else if (CFP->isExactlyValue(+1.0))
563 BuildMI(*MBB, IP, X86::FLD1, 0, R);
564 else if (CFP->isExactlyValue(-0.0)) {
565 unsigned Tmp = makeAnotherReg(Type::DoubleTy);
566 BuildMI(*MBB, IP, X86::FLD0, 0, Tmp);
567 BuildMI(*MBB, IP, X86::FCHS, 1, R).addReg(Tmp);
568 } else if (CFP->isExactlyValue(-1.0)) {
569 unsigned Tmp = makeAnotherReg(Type::DoubleTy);
570 BuildMI(*MBB, IP, X86::FLD1, 0, Tmp);
571 BuildMI(*MBB, IP, X86::FCHS, 1, R).addReg(Tmp);
572 } else { // FIXME: PI, other native values
573 // FIXME: 2*PI -> LDPI + FADD
575 // Otherwise we need to spill the constant to memory.
576 MachineConstantPool *CP = F->getConstantPool();
578 const Type *Ty = CFP->getType();
580 // If a FP immediate is precise when represented as a float, we put it
581 // into the constant pool as a float, even if it's is statically typed as
583 if (Ty == Type::DoubleTy)
584 if (CFP->isExactlyValue((float)CFP->getValue())) {
586 CFP = cast<ConstantFP>(ConstantExpr::getCast(CFP, Ty));
589 unsigned CPI = CP->getConstantPoolIndex(CFP);
591 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
592 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
593 addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
596 } else if (isa<ConstantPointerNull>(C)) {
597 // Copy zero (null pointer) to the register.
598 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
599 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
600 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(GV);
602 std::cerr << "Offending constant: " << *C << "\n";
603 assert(0 && "Type not handled yet!");
607 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
608 /// the stack into virtual registers.
610 void X86ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
611 // Emit instructions to load the arguments... On entry to a function on the
612 // X86, the stack frame looks like this:
614 // [ESP] -- return address
615 // [ESP + 4] -- first argument (leftmost lexically)
616 // [ESP + 8] -- second argument, if first argument is four bytes in size
619 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
620 MachineFrameInfo *MFI = F->getFrameInfo();
622 for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end();
624 bool ArgLive = !I->use_empty();
625 unsigned Reg = ArgLive ? getReg(*I) : 0;
626 int FI; // Frame object index
628 switch (getClassB(I->getType())) {
631 FI = MFI->CreateFixedObject(1, ArgOffset);
632 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
637 FI = MFI->CreateFixedObject(2, ArgOffset);
638 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
643 FI = MFI->CreateFixedObject(4, ArgOffset);
644 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
649 FI = MFI->CreateFixedObject(8, ArgOffset);
650 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
651 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
653 ArgOffset += 4; // longs require 4 additional bytes
658 if (I->getType() == Type::FloatTy) {
659 Opcode = X86::FLD32m;
660 FI = MFI->CreateFixedObject(4, ArgOffset);
662 Opcode = X86::FLD64m;
663 FI = MFI->CreateFixedObject(8, ArgOffset);
665 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
667 if (I->getType() == Type::DoubleTy)
668 ArgOffset += 4; // doubles require 4 additional bytes
671 assert(0 && "Unhandled argument type!");
673 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
676 // If the function takes variable number of arguments, add a frame offset for
677 // the start of the first vararg value... this is used to expand
679 if (Fn.getFunctionType()->isVarArg())
680 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
682 // Finally, inform the compiler what our live-outs will be, aka, what we will
683 // be returning in registers.
684 if (Fn.getReturnType() != Type::VoidTy)
685 switch (getClassB(Fn.getReturnType())) {
686 default: assert(0 && "Unknown type!");
690 F->addLiveOut(X86::EAX);
693 F->addLiveOut(X86::EAX);
694 F->addLiveOut(X86::EDX);
697 F->addLiveOut(X86::ST0);
702 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
703 /// the main function.
704 void X86ISel::EmitSpecialCodeForMain() {
705 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
706 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
707 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
709 // Set the high part to be 64-bit precision.
710 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
711 CWFrameIdx, 1).addImm(2);
713 // Reload the modified control word now.
714 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
717 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
718 /// because we have to generate our sources into the source basic blocks, not
721 void X86ISel::SelectPHINodes() {
722 const TargetInstrInfo &TII = *TM.getInstrInfo();
723 const Function &LF = *F->getFunction(); // The LLVM function...
724 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
725 const BasicBlock *BB = I;
726 MachineBasicBlock &MBB = *MBBMap[I];
728 // Loop over all of the PHI nodes in the LLVM basic block...
729 MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
730 for (BasicBlock::const_iterator I = BB->begin(); isa<PHINode>(I); ++I) {
731 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I));
733 // Create a new machine instr PHI node, and insert it.
734 unsigned PHIReg = getReg(*PN);
735 MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint,
736 X86::PHI, PN->getNumOperands(), PHIReg);
738 MachineInstr *LongPhiMI = 0;
739 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy)
740 LongPhiMI = BuildMI(MBB, PHIInsertPoint,
741 X86::PHI, PN->getNumOperands(), PHIReg+1);
743 // PHIValues - Map of blocks to incoming virtual registers. We use this
744 // so that we only initialize one incoming value for a particular block,
745 // even if the block has multiple entries in the PHI node.
747 std::map<MachineBasicBlock*, unsigned> PHIValues;
749 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
750 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
752 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
753 PHIValues.lower_bound(PredMBB);
755 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
756 // We already inserted an initialization of the register for this
757 // predecessor. Recycle it.
758 ValReg = EntryIt->second;
761 // Get the incoming value into a virtual register.
763 Value *Val = PN->getIncomingValue(i);
765 // If this is a constant or GlobalValue, we may have to insert code
766 // into the basic block to compute it into a virtual register.
767 if ((isa<Constant>(Val) && !isa<ConstantExpr>(Val))) {
768 // Simple constants get emitted at the end of the basic block,
769 // before any terminator instructions. We "know" that the code to
770 // move a constant into a register will never clobber any flags.
771 ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
773 // Because we don't want to clobber any values which might be in
774 // physical registers with the computation of this constant (which
775 // might be arbitrarily complex if it is a constant expression),
776 // just insert the computation at the top of the basic block.
777 MachineBasicBlock::iterator PI = PredMBB->begin();
779 // Skip over any PHI nodes though!
780 while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
783 ValReg = getReg(Val, PredMBB, PI);
786 // Remember that we inserted a value for this PHI for this predecessor
787 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
790 PhiMI->addRegOperand(ValReg);
791 PhiMI->addMachineBasicBlockOperand(PredMBB);
793 LongPhiMI->addRegOperand(ValReg+1);
794 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
798 // Now that we emitted all of the incoming values for the PHI node, make
799 // sure to reposition the InsertPoint after the PHI that we just added.
800 // This is needed because we might have inserted a constant into this
801 // block, right after the PHI's which is before the old insert point!
802 PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI;
808 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
809 /// compensation code on critical edges. As such, it requires that we kill all
810 /// FP registers on the exit from any blocks that either ARE critical edges, or
811 /// branch to a block that has incoming critical edges.
813 /// Note that this kill instruction will eventually be eliminated when
814 /// restrictions in the stackifier are relaxed.
816 static bool RequiresFPRegKill(const MachineBasicBlock *MBB) {
818 const BasicBlock *BB = MBB->getBasicBlock ();
819 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
820 const BasicBlock *Succ = *SI;
821 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
822 ++PI; // Block have at least one predecessory
823 if (PI != PE) { // If it has exactly one, this isn't crit edge
824 // If this block has more than one predecessor, check all of the
825 // predecessors to see if they have multiple successors. If so, then the
826 // block we are analyzing needs an FPRegKill.
827 for (PI = pred_begin(Succ); PI != PE; ++PI) {
828 const BasicBlock *Pred = *PI;
829 succ_const_iterator SI2 = succ_begin(Pred);
830 ++SI2; // There must be at least one successor of this block.
831 if (SI2 != succ_end(Pred))
832 return true; // Yes, we must insert the kill on this edge.
836 // If we got this far, there is no need to insert the kill instruction.
843 // InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks that
844 // need them. This only occurs due to the floating point stackifier not being
845 // aggressive enough to handle arbitrary global stackification.
847 // Currently we insert an FP_REG_KILL instruction into each block that uses or
848 // defines a floating point virtual register.
850 // When the global register allocators (like linear scan) finally update live
851 // variable analysis, we can keep floating point values in registers across
852 // portions of the CFG that do not involve critical edges. This will be a big
853 // win, but we are waiting on the global allocators before we can do this.
855 // With a bit of work, the floating point stackifier pass can be enhanced to
856 // break critical edges as needed (to make a place to put compensation code),
857 // but this will require some infrastructure improvements as well.
859 void X86ISel::InsertFPRegKills() {
860 SSARegMap &RegMap = *F->getSSARegMap();
862 for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
863 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
864 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
865 MachineOperand& MO = I->getOperand(i);
866 if (MO.isRegister() && MO.getReg()) {
867 unsigned Reg = MO.getReg();
868 if (MRegisterInfo::isVirtualRegister(Reg)) {
869 unsigned RegSize = RegMap.getRegClass(Reg)->getSize();
870 if (RegSize == 10 || RegSize == 8)
875 // If we haven't found an FP register use or def in this basic block, check
876 // to see if any of our successors has an FP PHI node, which will cause a
877 // copy to be inserted into this block.
878 for (MachineBasicBlock::const_succ_iterator SI = BB->succ_begin(),
879 SE = BB->succ_end(); SI != SE; ++SI) {
880 MachineBasicBlock *SBB = *SI;
881 for (MachineBasicBlock::iterator I = SBB->begin();
882 I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
883 const TargetRegisterClass *RC =
884 RegMap.getRegClass(I->getOperand(0).getReg());
885 if (RC->getSize() == 10 || RC->getSize() == 8)
891 // Okay, this block uses an FP register. If the block has successors (ie,
892 // it's not an unwind/return), insert the FP_REG_KILL instruction.
893 if (BB->succ_size() && RequiresFPRegKill(BB)) {
894 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
901 void X86ISel::getAddressingMode(Value *Addr, X86AddressMode &AM) {
902 AM.BaseType = X86AddressMode::RegBase;
903 AM.Base.Reg = 0; AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
904 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
905 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
908 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
909 if (CE->getOpcode() == Instruction::GetElementPtr)
910 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
913 } else if (AllocaInst *AI = dyn_castFixedAlloca(Addr)) {
914 AM.BaseType = X86AddressMode::FrameIndexBase;
915 AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
917 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
922 // If it's not foldable, reset addr mode.
923 AM.BaseType = X86AddressMode::RegBase;
924 AM.Base.Reg = getReg(Addr);
925 AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
928 // canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
929 // it into the conditional branch or select instruction which is the only user
930 // of the cc instruction. This is the case if the conditional branch is the
931 // only user of the setcc. We also don't handle long arguments below, so we
932 // reject them here as well.
934 static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
935 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
936 if (SCI->hasOneUse()) {
937 Instruction *User = cast<Instruction>(SCI->use_back());
938 if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
939 (getClassB(SCI->getOperand(0)->getType()) != cLong ||
940 SCI->getOpcode() == Instruction::SetEQ ||
941 SCI->getOpcode() == Instruction::SetNE) &&
942 (isa<BranchInst>(User) || User->getOperand(0) == V))
948 // Return a fixed numbering for setcc instructions which does not depend on the
949 // order of the opcodes.
951 static unsigned getSetCCNumber(unsigned Opcode) {
953 default: assert(0 && "Unknown setcc instruction!");
954 case Instruction::SetEQ: return 0;
955 case Instruction::SetNE: return 1;
956 case Instruction::SetLT: return 2;
957 case Instruction::SetGE: return 3;
958 case Instruction::SetGT: return 4;
959 case Instruction::SetLE: return 5;
963 // LLVM -> X86 signed X86 unsigned
964 // ----- ---------- ------------
965 // seteq -> sete sete
966 // setne -> setne setne
967 // setlt -> setl setb
968 // setge -> setge setae
969 // setgt -> setg seta
970 // setle -> setle setbe
972 // sets // Used by comparison with 0 optimization
974 static const unsigned SetCCOpcodeTab[2][8] = {
975 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
977 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
978 X86::SETSr, X86::SETNSr },
981 /// emitUCOMr - In the future when we support processors before the P6, this
982 /// wraps the logic for emitting an FUCOMr vs FUCOMIr.
983 void X86ISel::emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
984 unsigned LHS, unsigned RHS) {
985 if (0) { // for processors prior to the P6
986 BuildMI(*MBB, IP, X86::FUCOMr, 2).addReg(LHS).addReg(RHS);
987 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
988 BuildMI(*MBB, IP, X86::SAHF, 1);
990 BuildMI(*MBB, IP, X86::FUCOMIr, 2).addReg(LHS).addReg(RHS);
994 // EmitComparison - This function emits a comparison of the two operands,
995 // returning the extended setcc code to use.
996 unsigned X86ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
997 MachineBasicBlock *MBB,
998 MachineBasicBlock::iterator IP) {
999 // The arguments are already supposed to be of the same type.
1000 const Type *CompTy = Op0->getType();
1001 unsigned Class = getClassB(CompTy);
1003 // Special case handling of: cmp R, i
1004 if (isa<ConstantPointerNull>(Op1)) {
1005 unsigned Op0r = getReg(Op0, MBB, IP);
1006 if (OpNum < 2) // seteq/setne -> test
1007 BuildMI(*MBB, IP, X86::TEST32rr, 2).addReg(Op0r).addReg(Op0r);
1009 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(0);
1012 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
1013 if (Class == cByte || Class == cShort || Class == cInt) {
1014 unsigned Op1v = CI->getRawValue();
1016 // Mask off any upper bits of the constant, if there are any...
1017 Op1v &= (1ULL << (8 << Class)) - 1;
1019 // If this is a comparison against zero, emit more efficient code. We
1020 // can't handle unsigned comparisons against zero unless they are == or
1021 // !=. These should have been strength reduced already anyway.
1022 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
1024 // If this is a comparison against zero and the LHS is an and of a
1025 // register with a constant, use the test to do the and.
1026 if (Instruction *Op0I = dyn_cast<Instruction>(Op0))
1027 if (Op0I->getOpcode() == Instruction::And && Op0->hasOneUse() &&
1028 isa<ConstantInt>(Op0I->getOperand(1))) {
1029 static const unsigned TESTTab[] = {
1030 X86::TEST8ri, X86::TEST16ri, X86::TEST32ri
1034 unsigned LHS = getReg(Op0I->getOperand(0), MBB, IP);
1036 cast<ConstantInt>(Op0I->getOperand(1))->getRawValue();
1037 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(LHS).addImm(Imm);
1039 if (OpNum == 2) return 6; // Map jl -> js
1040 if (OpNum == 3) return 7; // Map jg -> jns
1044 unsigned Op0r = getReg(Op0, MBB, IP);
1045 static const unsigned TESTTab[] = {
1046 X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
1048 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
1050 if (OpNum == 2) return 6; // Map jl -> js
1051 if (OpNum == 3) return 7; // Map jg -> jns
1055 static const unsigned CMPTab[] = {
1056 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
1059 unsigned Op0r = getReg(Op0, MBB, IP);
1060 BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
1063 unsigned Op0r = getReg(Op0, MBB, IP);
1064 assert(Class == cLong && "Unknown integer class!");
1065 unsigned LowCst = CI->getRawValue();
1066 unsigned HiCst = CI->getRawValue() >> 32;
1067 if (OpNum < 2) { // seteq, setne
1068 unsigned LoTmp = Op0r;
1070 LoTmp = makeAnotherReg(Type::IntTy);
1071 BuildMI(*MBB, IP, X86::XOR32ri, 2, LoTmp).addReg(Op0r).addImm(LowCst);
1073 unsigned HiTmp = Op0r+1;
1075 HiTmp = makeAnotherReg(Type::IntTy);
1076 BuildMI(*MBB, IP, X86::XOR32ri, 2,HiTmp).addReg(Op0r+1).addImm(HiCst);
1078 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
1079 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
1082 // Emit a sequence of code which compares the high and low parts once
1083 // each, then uses a conditional move to handle the overflow case. For
1084 // example, a setlt for long would generate code like this:
1086 // AL = lo(op1) < lo(op2) // Always unsigned comparison
1087 // BL = hi(op1) < hi(op2) // Signedness depends on operands
1088 // dest = hi(op1) == hi(op2) ? BL : AL;
1091 // FIXME: This would be much better if we had hierarchical register
1092 // classes! Until then, hardcode registers so that we can deal with
1093 // their aliases (because we don't have conditional byte moves).
1095 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(LowCst);
1096 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
1097 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r+1).addImm(HiCst);
1098 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0,X86::BL);
1099 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
1100 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
1101 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
1103 // NOTE: visitSetCondInst knows that the value is dumped into the BL
1104 // register at this point for long values...
1110 unsigned Op0r = getReg(Op0, MBB, IP);
1112 // Special case handling of comparison against +/- 0.0
1113 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
1114 if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
1115 BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
1116 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
1117 BuildMI(*MBB, IP, X86::SAHF, 1);
1121 unsigned Op1r = getReg(Op1, MBB, IP);
1123 default: assert(0 && "Unknown type class!");
1124 // Emit: cmp <var1>, <var2> (do the comparison). We can
1125 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
1128 BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
1131 BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
1134 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
1137 emitUCOMr(MBB, IP, Op0r, Op1r);
1141 if (OpNum < 2) { // seteq, setne
1142 unsigned LoTmp = makeAnotherReg(Type::IntTy);
1143 unsigned HiTmp = makeAnotherReg(Type::IntTy);
1144 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
1145 BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
1146 BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
1147 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
1148 break; // Allow the sete or setne to be generated from flags set by OR
1150 // Emit a sequence of code which compares the high and low parts once
1151 // each, then uses a conditional move to handle the overflow case. For
1152 // example, a setlt for long would generate code like this:
1154 // AL = lo(op1) < lo(op2) // Signedness depends on operands
1155 // BL = hi(op1) < hi(op2) // Always unsigned comparison
1156 // dest = hi(op1) == hi(op2) ? BL : AL;
1159 // FIXME: This would be much better if we had hierarchical register
1160 // classes! Until then, hardcode registers so that we can deal with their
1161 // aliases (because we don't have conditional byte moves).
1163 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
1164 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
1165 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
1166 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
1167 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
1168 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
1169 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
1171 // NOTE: visitSetCondInst knows that the value is dumped into the BL
1172 // register at this point for long values...
1179 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
1180 /// register, then move it to wherever the result should be.
1182 void X86ISel::visitSetCondInst(SetCondInst &I) {
1183 if (canFoldSetCCIntoBranchOrSelect(&I))
1184 return; // Fold this into a branch or select.
1186 unsigned DestReg = getReg(I);
1187 MachineBasicBlock::iterator MII = BB->end();
1188 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
1192 /// emitSetCCOperation - Common code shared between visitSetCondInst and
1193 /// constant expression support.
1195 void X86ISel::emitSetCCOperation(MachineBasicBlock *MBB,
1196 MachineBasicBlock::iterator IP,
1197 Value *Op0, Value *Op1, unsigned Opcode,
1198 unsigned TargetReg) {
1199 unsigned OpNum = getSetCCNumber(Opcode);
1200 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
1202 const Type *CompTy = Op0->getType();
1203 unsigned CompClass = getClassB(CompTy);
1204 bool isSigned = CompTy->isSigned() && CompClass != cFP;
1206 if (CompClass != cLong || OpNum < 2) {
1207 // Handle normal comparisons with a setcc instruction...
1208 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
1210 // Handle long comparisons by copying the value which is already in BL into
1211 // the register we want...
1212 BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
1216 void X86ISel::visitSelectInst(SelectInst &SI) {
1217 unsigned DestReg = getReg(SI);
1218 MachineBasicBlock::iterator MII = BB->end();
1219 emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
1220 SI.getFalseValue(), DestReg);
1223 /// emitSelect - Common code shared between visitSelectInst and the constant
1224 /// expression support.
1225 void X86ISel::emitSelectOperation(MachineBasicBlock *MBB,
1226 MachineBasicBlock::iterator IP,
1227 Value *Cond, Value *TrueVal, Value *FalseVal,
1229 unsigned SelectClass = getClassB(TrueVal->getType());
1231 // We don't support 8-bit conditional moves. If we have incoming constants,
1232 // transform them into 16-bit constants to avoid having a run-time conversion.
1233 if (SelectClass == cByte) {
1234 if (Constant *T = dyn_cast<Constant>(TrueVal))
1235 TrueVal = ConstantExpr::getCast(T, Type::ShortTy);
1236 if (Constant *F = dyn_cast<Constant>(FalseVal))
1237 FalseVal = ConstantExpr::getCast(F, Type::ShortTy);
1240 unsigned TrueReg = getReg(TrueVal, MBB, IP);
1241 unsigned FalseReg = getReg(FalseVal, MBB, IP);
1242 if (TrueReg == FalseReg) {
1243 static const unsigned Opcode[] = {
1244 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
1246 BuildMI(*MBB, IP, Opcode[SelectClass], 1, DestReg).addReg(TrueReg);
1247 if (SelectClass == cLong)
1248 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(TrueReg+1);
1253 if (SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(Cond)) {
1254 // We successfully folded the setcc into the select instruction.
1256 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1257 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), MBB,
1260 const Type *CompTy = SCI->getOperand(0)->getType();
1261 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1263 // LLVM -> X86 signed X86 unsigned
1264 // ----- ---------- ------------
1265 // seteq -> cmovNE cmovNE
1266 // setne -> cmovE cmovE
1267 // setlt -> cmovGE cmovAE
1268 // setge -> cmovL cmovB
1269 // setgt -> cmovLE cmovBE
1270 // setle -> cmovG cmovA
1272 // cmovNS // Used by comparison with 0 optimization
1275 switch (SelectClass) {
1276 default: assert(0 && "Unknown value class!");
1278 // Annoyingly, we don't have a full set of floating point conditional
1280 static const unsigned OpcodeTab[2][8] = {
1281 { X86::FCMOVNE, X86::FCMOVE, X86::FCMOVAE, X86::FCMOVB,
1282 X86::FCMOVBE, X86::FCMOVA, 0, 0 },
1283 { X86::FCMOVNE, X86::FCMOVE, 0, 0, 0, 0, 0, 0 },
1285 Opcode = OpcodeTab[isSigned][OpNum];
1287 // If opcode == 0, we hit a case that we don't support. Output a setcc
1288 // and compare the result against zero.
1290 unsigned CompClass = getClassB(CompTy);
1292 if (CompClass != cLong || OpNum < 2) {
1293 CondReg = makeAnotherReg(Type::BoolTy);
1294 // Handle normal comparisons with a setcc instruction...
1295 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, CondReg);
1297 // Long comparisons end up in the BL register.
1301 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1302 Opcode = X86::FCMOVE;
1308 static const unsigned OpcodeTab[2][8] = {
1309 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVAE16rr, X86::CMOVB16rr,
1310 X86::CMOVBE16rr, X86::CMOVA16rr, 0, 0 },
1311 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVGE16rr, X86::CMOVL16rr,
1312 X86::CMOVLE16rr, X86::CMOVG16rr, X86::CMOVNS16rr, X86::CMOVS16rr },
1314 Opcode = OpcodeTab[isSigned][OpNum];
1319 static const unsigned OpcodeTab[2][8] = {
1320 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVAE32rr, X86::CMOVB32rr,
1321 X86::CMOVBE32rr, X86::CMOVA32rr, 0, 0 },
1322 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVGE32rr, X86::CMOVL32rr,
1323 X86::CMOVLE32rr, X86::CMOVG32rr, X86::CMOVNS32rr, X86::CMOVS32rr },
1325 Opcode = OpcodeTab[isSigned][OpNum];
1330 // Get the value being branched on, and use it to set the condition codes.
1331 unsigned CondReg = getReg(Cond, MBB, IP);
1332 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1333 switch (SelectClass) {
1334 default: assert(0 && "Unknown value class!");
1335 case cFP: Opcode = X86::FCMOVE; break;
1337 case cShort: Opcode = X86::CMOVE16rr; break;
1339 case cLong: Opcode = X86::CMOVE32rr; break;
1343 unsigned RealDestReg = DestReg;
1346 // Annoyingly enough, X86 doesn't HAVE 8-bit conditional moves. Because of
1347 // this, we have to promote the incoming values to 16 bits, perform a 16-bit
1348 // cmove, then truncate the result.
1349 if (SelectClass == cByte) {
1350 DestReg = makeAnotherReg(Type::ShortTy);
1351 if (getClassB(TrueVal->getType()) == cByte) {
1352 // Promote the true value, by storing it into AL, and reading from AX.
1353 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::AL).addReg(TrueReg);
1354 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::AH).addImm(0);
1355 TrueReg = makeAnotherReg(Type::ShortTy);
1356 BuildMI(*MBB, IP, X86::MOV16rr, 1, TrueReg).addReg(X86::AX);
1358 if (getClassB(FalseVal->getType()) == cByte) {
1359 // Promote the true value, by storing it into CL, and reading from CX.
1360 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(FalseReg);
1361 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::CH).addImm(0);
1362 FalseReg = makeAnotherReg(Type::ShortTy);
1363 BuildMI(*MBB, IP, X86::MOV16rr, 1, FalseReg).addReg(X86::CX);
1367 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(TrueReg).addReg(FalseReg);
1369 switch (SelectClass) {
1371 // We did the computation with 16-bit registers. Truncate back to our
1372 // result by copying into AX then copying out AL.
1373 BuildMI(*MBB, IP, X86::MOV16rr, 1, X86::AX).addReg(DestReg);
1374 BuildMI(*MBB, IP, X86::MOV8rr, 1, RealDestReg).addReg(X86::AL);
1377 // Move the upper half of the value as well.
1378 BuildMI(*MBB, IP, Opcode, 2,DestReg+1).addReg(TrueReg+1).addReg(FalseReg+1);
1385 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
1386 /// operand, in the specified target register.
1388 void X86ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
1389 bool isUnsigned = VR.Ty->isUnsigned() || VR.Ty == Type::BoolTy;
1391 Value *Val = VR.Val;
1392 const Type *Ty = VR.Ty;
1394 if (Constant *C = dyn_cast<Constant>(Val)) {
1395 Val = ConstantExpr::getCast(C, Type::IntTy);
1399 // If this is a simple constant, just emit a MOVri directly to avoid the
1401 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
1402 int TheVal = CI->getRawValue() & 0xFFFFFFFF;
1403 BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
1408 // Make sure we have the register number for this value...
1409 unsigned Reg = Val ? getReg(Val) : VR.Reg;
1411 switch (getClassB(Ty)) {
1413 // Extend value into target register (8->32)
1415 BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
1417 BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
1420 // Extend value into target register (16->32)
1422 BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
1424 BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
1427 // Move value into target register (32->32)
1428 BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
1431 assert(0 && "Unpromotable operand class in promote32");
1435 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
1436 /// we have the following possibilities:
1438 /// ret void: No return value, simply emit a 'ret' instruction
1439 /// ret sbyte, ubyte : Extend value into EAX and return
1440 /// ret short, ushort: Extend value into EAX and return
1441 /// ret int, uint : Move value into EAX and return
1442 /// ret pointer : Move value into EAX and return
1443 /// ret long, ulong : Move value into EAX/EDX and return
1444 /// ret float/double : Top of FP stack
1446 void X86ISel::visitReturnInst(ReturnInst &I) {
1447 if (I.getNumOperands() == 0) {
1448 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
1452 Value *RetVal = I.getOperand(0);
1453 switch (getClassB(RetVal->getType())) {
1454 case cByte: // integral return values: extend or move into EAX and return
1457 promote32(X86::EAX, ValueRecord(RetVal));
1459 case cFP: { // Floats & Doubles: Return in ST(0)
1460 unsigned RetReg = getReg(RetVal);
1461 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
1465 unsigned RetReg = getReg(RetVal);
1466 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
1467 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
1471 visitInstruction(I);
1473 // Emit a 'ret' instruction
1474 BuildMI(BB, X86::RET, 0);
1477 // getBlockAfter - Return the basic block which occurs lexically after the
1479 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
1480 Function::iterator I = BB; ++I; // Get iterator to next block
1481 return I != BB->getParent()->end() ? &*I : 0;
1484 /// visitBranchInst - Handle conditional and unconditional branches here. Note
1485 /// that since code layout is frozen at this point, that if we are trying to
1486 /// jump to a block that is the immediate successor of the current block, we can
1487 /// just make a fall-through (but we don't currently).
1489 void X86ISel::visitBranchInst(BranchInst &BI) {
1490 // Update machine-CFG edges
1491 BB->addSuccessor (MBBMap[BI.getSuccessor(0)]);
1492 if (BI.isConditional())
1493 BB->addSuccessor (MBBMap[BI.getSuccessor(1)]);
1495 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
1497 if (!BI.isConditional()) { // Unconditional branch?
1498 if (BI.getSuccessor(0) != NextBB)
1499 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1503 // See if we can fold the setcc into the branch itself...
1504 SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition());
1506 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
1507 // computed some other way...
1508 unsigned condReg = getReg(BI.getCondition());
1509 BuildMI(BB, X86::TEST8rr, 2).addReg(condReg).addReg(condReg);
1510 if (BI.getSuccessor(1) == NextBB) {
1511 if (BI.getSuccessor(0) != NextBB)
1512 BuildMI(BB, X86::JNE, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1514 BuildMI(BB, X86::JE, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
1516 if (BI.getSuccessor(0) != NextBB)
1517 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1522 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1523 MachineBasicBlock::iterator MII = BB->end();
1524 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
1526 const Type *CompTy = SCI->getOperand(0)->getType();
1527 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1530 // LLVM -> X86 signed X86 unsigned
1531 // ----- ---------- ------------
1539 // js // Used by comparison with 0 optimization
1542 static const unsigned OpcodeTab[2][8] = {
1543 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
1544 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
1545 X86::JS, X86::JNS },
1548 if (BI.getSuccessor(0) != NextBB) {
1549 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
1550 .addMBB(MBBMap[BI.getSuccessor(0)]);
1551 if (BI.getSuccessor(1) != NextBB)
1552 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
1554 // Change to the inverse condition...
1555 if (BI.getSuccessor(1) != NextBB) {
1557 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
1558 .addMBB(MBBMap[BI.getSuccessor(1)]);
1564 /// doCall - This emits an abstract call instruction, setting up the arguments
1565 /// and the return value as appropriate. For the actual function call itself,
1566 /// it inserts the specified CallMI instruction into the stream.
1568 void X86ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1569 const std::vector<ValueRecord> &Args) {
1570 // Count how many bytes are to be pushed on the stack...
1571 unsigned NumBytes = 0;
1573 if (!Args.empty()) {
1574 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1575 switch (getClassB(Args[i].Ty)) {
1576 case cByte: case cShort: case cInt:
1577 NumBytes += 4; break;
1579 NumBytes += 8; break;
1581 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1583 default: assert(0 && "Unknown class!");
1586 // Adjust the stack pointer for the new arguments...
1587 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(NumBytes);
1589 // Arguments go on the stack in reverse order, as specified by the ABI.
1590 unsigned ArgOffset = 0;
1591 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1593 switch (getClassB(Args[i].Ty)) {
1595 if (Args[i].Val && isa<ConstantBool>(Args[i].Val)) {
1596 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1597 .addImm(Args[i].Val == ConstantBool::True);
1602 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1603 // Zero/Sign extend constant, then stuff into memory.
1604 ConstantInt *Val = cast<ConstantInt>(Args[i].Val);
1605 Val = cast<ConstantInt>(ConstantExpr::getCast(Val, Type::IntTy));
1606 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1607 .addImm(Val->getRawValue() & 0xFFFFFFFF);
1609 // Promote arg to 32 bits wide into a temporary register...
1610 ArgReg = makeAnotherReg(Type::UIntTy);
1611 promote32(ArgReg, Args[i]);
1612 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1613 X86::ESP, ArgOffset).addReg(ArgReg);
1617 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1618 unsigned Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1619 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1620 X86::ESP, ArgOffset).addImm(Val);
1621 } else if (Args[i].Val && isa<ConstantPointerNull>(Args[i].Val)) {
1622 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1623 X86::ESP, ArgOffset).addImm(0);
1624 } else if (Args[i].Val && isa<GlobalValue>(Args[i].Val)) {
1625 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1626 .addGlobalAddress(cast<GlobalValue>(Args[i].Val));
1628 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1629 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1630 X86::ESP, ArgOffset).addReg(ArgReg);
1634 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1635 uint64_t Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1636 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1637 X86::ESP, ArgOffset).addImm(Val & ~0U);
1638 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1639 X86::ESP, ArgOffset+4).addImm(Val >> 32ULL);
1641 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1642 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1643 X86::ESP, ArgOffset).addReg(ArgReg);
1644 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1645 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1647 ArgOffset += 4; // 8 byte entry, not 4.
1651 if (ConstantFP *CFP = dyn_cast_or_null<ConstantFP>(Args[i].Val)) {
1652 // Store constant FP values with integer instructions to avoid having
1653 // to load the constants from the constant pool then do a store.
1654 if (CFP->getType() == Type::FloatTy) {
1659 V.F = CFP->getValue();
1660 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1661 X86::ESP, ArgOffset).addImm(V.I);
1667 V.F = CFP->getValue();
1668 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1669 X86::ESP, ArgOffset).addImm((unsigned)V.I);
1670 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1671 X86::ESP, ArgOffset+4).addImm(unsigned(V.I >> 32));
1672 ArgOffset += 4; // 8 byte entry, not 4.
1675 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1676 if (Args[i].Ty == Type::FloatTy) {
1677 addRegOffset(BuildMI(BB, X86::FST32m, 5),
1678 X86::ESP, ArgOffset).addReg(ArgReg);
1680 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1681 addRegOffset(BuildMI(BB, X86::FST64m, 5),
1682 X86::ESP, ArgOffset).addReg(ArgReg);
1683 ArgOffset += 4; // 8 byte entry, not 4.
1688 default: assert(0 && "Unknown class!");
1693 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(0);
1696 BB->push_back(CallMI);
1698 BuildMI(BB, X86::ADJCALLSTACKUP, 2).addImm(NumBytes).addImm(0);
1700 // If there is a return value, scavenge the result from the location the call
1703 if (Ret.Ty != Type::VoidTy) {
1704 unsigned DestClass = getClassB(Ret.Ty);
1705 switch (DestClass) {
1709 // Integral results are in %eax, or the appropriate portion
1711 static const unsigned regRegMove[] = {
1712 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
1714 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1715 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1718 case cFP: // Floating-point return values live in %ST(0)
1719 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1721 case cLong: // Long values are left in EDX:EAX
1722 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
1723 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
1725 default: assert(0 && "Unknown class!");
1731 /// visitCallInst - Push args on stack and do a procedure call instruction.
1732 void X86ISel::visitCallInst(CallInst &CI) {
1733 MachineInstr *TheCall;
1734 if (Function *F = CI.getCalledFunction()) {
1735 // Is it an intrinsic function call?
1736 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1737 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1739 } else if (F->getName() == "fabs" || F->getName() == "fabsf") {
1740 if (CI.getNumOperands() == 2 && // Basic sanity checks.
1741 CI.getOperand(1)->getType()->isFloatingPoint() &&
1742 CI.getType() == CI.getOperand(1)->getType()) {
1743 unsigned op1Reg = getReg(CI.getOperand(1));
1744 unsigned DestReg = getReg(CI);
1745 BuildMI(BB, X86::FABS, 1, DestReg).addReg(op1Reg);
1748 } else if (F->getName() == "sin" && UnsafeFPMath || F->getName() == "sinf") {
1749 if (CI.getNumOperands() == 2 && // Basic sanity checks.
1750 CI.getOperand(1)->getType()->isFloatingPoint() &&
1751 CI.getType() == CI.getOperand(1)->getType()) {
1752 unsigned op1Reg = getReg(CI.getOperand(1));
1753 unsigned DestReg = getReg(CI);
1754 BuildMI(BB, X86::FSIN, 1, DestReg).addReg(op1Reg);
1758 else if (F->getName() == "cos" && UnsafeFPMath || F->getName() == "cosf") {
1759 if (CI.getNumOperands() == 2 && // Basic sanity checks.
1760 CI.getOperand(1)->getType()->isFloatingPoint() &&
1761 CI.getType() == CI.getOperand(1)->getType()) {
1762 unsigned op1Reg = getReg(CI.getOperand(1));
1763 unsigned DestReg = getReg(CI);
1764 BuildMI(BB, X86::FCOS, 1, DestReg).addReg(op1Reg);
1769 // Emit a CALL instruction with PC-relative displacement.
1770 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1771 } else { // Emit an indirect call...
1772 unsigned Reg = getReg(CI.getCalledValue());
1773 TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
1776 std::vector<ValueRecord> Args;
1777 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1778 Args.push_back(ValueRecord(CI.getOperand(i)));
1780 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1781 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1784 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1785 /// function, lowering any calls to unknown intrinsic functions into the
1786 /// equivalent LLVM code.
1788 void X86ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1789 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1790 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1791 if (CallInst *CI = dyn_cast<CallInst>(I++))
1792 if (Function *F = CI->getCalledFunction())
1793 switch (F->getIntrinsicID()) {
1794 case Intrinsic::not_intrinsic:
1795 case Intrinsic::vastart:
1796 case Intrinsic::vacopy:
1797 case Intrinsic::vaend:
1798 case Intrinsic::returnaddress:
1799 case Intrinsic::frameaddress:
1800 case Intrinsic::memcpy:
1801 case Intrinsic::memset:
1802 case Intrinsic::isunordered:
1803 case Intrinsic::sqrt:
1804 case Intrinsic::readport:
1805 case Intrinsic::writeport:
1806 // We directly implement these intrinsics
1808 case Intrinsic::readio: {
1809 // On X86, memory operations are in-order. Lower this intrinsic
1810 // into a volatile load.
1811 LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI);
1812 CI->replaceAllUsesWith(LI);
1813 BB->getInstList().erase(CI);
1816 case Intrinsic::writeio: {
1817 // On X86, memory operations are in-order. Lower this intrinsic
1818 // into a volatile store.
1819 StoreInst *LI = new StoreInst(CI->getOperand(1),
1820 CI->getOperand(2), true, CI);
1821 CI->replaceAllUsesWith(LI);
1822 BB->getInstList().erase(CI);
1826 // All other intrinsic calls we must lower.
1827 Instruction *Before = CI->getPrev();
1828 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1829 if (Before) { // Move iterator to instruction after call
1837 void X86ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1838 unsigned TmpReg1, TmpReg2;
1840 case Intrinsic::vastart:
1841 //FIXME: store to first arg, don't return
1842 // Get the address of the first vararg value...
1843 TmpReg1 = getReg(CI);
1844 addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
1847 case Intrinsic::vacopy:
1848 //FIXME: copy val of second into first (which is a ptr)
1849 TmpReg1 = getReg(CI);
1850 TmpReg2 = getReg(CI.getOperand(1));
1851 BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
1853 case Intrinsic::vaend: return; // Noop on X86
1855 case Intrinsic::returnaddress:
1856 case Intrinsic::frameaddress:
1857 TmpReg1 = getReg(CI);
1858 if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
1859 if (ReturnAddressIndex == 0) {
1860 // Set up a frame object for the return address.
1861 ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
1864 if (ID == Intrinsic::returnaddress) {
1865 // Just load the return address
1866 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
1867 ReturnAddressIndex);
1869 addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
1870 ReturnAddressIndex, -4);
1873 // Values other than zero are not implemented yet.
1874 BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
1878 case Intrinsic::isunordered:
1879 TmpReg1 = getReg(CI.getOperand(1));
1880 TmpReg2 = getReg(CI.getOperand(2));
1881 emitUCOMr(BB, BB->end(), TmpReg2, TmpReg1);
1882 TmpReg2 = getReg(CI);
1883 BuildMI(BB, X86::SETPr, 0, TmpReg2);
1886 case Intrinsic::sqrt:
1887 TmpReg1 = getReg(CI.getOperand(1));
1888 TmpReg2 = getReg(CI);
1889 BuildMI(BB, X86::FSQRT, 1, TmpReg2).addReg(TmpReg1);
1892 case Intrinsic::memcpy: {
1893 assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
1895 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1896 Align = AlignC->getRawValue();
1897 if (Align == 0) Align = 1;
1900 // Turn the byte code into # iterations
1903 switch (Align & 3) {
1904 case 2: // WORD aligned
1905 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1906 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1908 CountReg = makeAnotherReg(Type::IntTy);
1909 unsigned ByteReg = getReg(CI.getOperand(3));
1910 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1912 Opcode = X86::REP_MOVSW;
1914 case 0: // DWORD aligned
1915 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1916 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1918 CountReg = makeAnotherReg(Type::IntTy);
1919 unsigned ByteReg = getReg(CI.getOperand(3));
1920 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1922 Opcode = X86::REP_MOVSD;
1924 default: // BYTE aligned
1925 CountReg = getReg(CI.getOperand(3));
1926 Opcode = X86::REP_MOVSB;
1930 // No matter what the alignment is, we put the source in ESI, the
1931 // destination in EDI, and the count in ECX.
1932 TmpReg1 = getReg(CI.getOperand(1));
1933 TmpReg2 = getReg(CI.getOperand(2));
1934 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1935 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1936 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
1937 BuildMI(BB, Opcode, 0);
1940 case Intrinsic::memset: {
1941 assert(CI.getNumOperands() == 5 && "Illegal llvm.memset call!");
1943 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1944 Align = AlignC->getRawValue();
1945 if (Align == 0) Align = 1;
1948 // Turn the byte code into # iterations
1951 if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
1952 unsigned Val = ValC->getRawValue() & 255;
1954 // If the value is a constant, then we can potentially use larger copies.
1955 switch (Align & 3) {
1956 case 2: // WORD aligned
1957 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1958 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1960 CountReg = makeAnotherReg(Type::IntTy);
1961 unsigned ByteReg = getReg(CI.getOperand(3));
1962 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1964 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
1965 Opcode = X86::REP_STOSW;
1967 case 0: // DWORD aligned
1968 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1969 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1971 CountReg = makeAnotherReg(Type::IntTy);
1972 unsigned ByteReg = getReg(CI.getOperand(3));
1973 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1975 Val = (Val << 8) | Val;
1976 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
1977 Opcode = X86::REP_STOSD;
1979 default: // BYTE aligned
1980 CountReg = getReg(CI.getOperand(3));
1981 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
1982 Opcode = X86::REP_STOSB;
1986 // If it's not a constant value we are storing, just fall back. We could
1987 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
1988 unsigned ValReg = getReg(CI.getOperand(2));
1989 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1990 CountReg = getReg(CI.getOperand(3));
1991 Opcode = X86::REP_STOSB;
1994 // No matter what the alignment is, we put the source in ESI, the
1995 // destination in EDI, and the count in ECX.
1996 TmpReg1 = getReg(CI.getOperand(1));
1997 //TmpReg2 = getReg(CI.getOperand(2));
1998 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1999 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2000 BuildMI(BB, Opcode, 0);
2004 case Intrinsic::readport: {
2005 // First, determine that the size of the operand falls within the acceptable
2006 // range for this architecture.
2008 if (getClassB(CI.getOperand(1)->getType()) != cShort) {
2009 std::cerr << "llvm.readport: Address size is not 16 bits\n";
2013 // Now, move the I/O port address into the DX register and use the IN
2014 // instruction to get the input data.
2016 unsigned Class = getClass(CI.getCalledFunction()->getReturnType());
2017 unsigned DestReg = getReg(CI);
2019 // If the port is a single-byte constant, use the immediate form.
2020 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(1)))
2021 if ((C->getRawValue() & 255) == C->getRawValue()) {
2024 BuildMI(BB, X86::IN8ri, 1).addImm((unsigned char)C->getRawValue());
2025 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
2028 BuildMI(BB, X86::IN16ri, 1).addImm((unsigned char)C->getRawValue());
2029 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AX);
2032 BuildMI(BB, X86::IN32ri, 1).addImm((unsigned char)C->getRawValue());
2033 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::EAX);
2038 unsigned Reg = getReg(CI.getOperand(1));
2039 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
2042 BuildMI(BB, X86::IN8rr, 0);
2043 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
2046 BuildMI(BB, X86::IN16rr, 0);
2047 BuildMI(BB, X86::MOV16rr, 1, DestReg).addReg(X86::AX);
2050 BuildMI(BB, X86::IN32rr, 0);
2051 BuildMI(BB, X86::MOV32rr, 1, DestReg).addReg(X86::EAX);
2054 std::cerr << "Cannot do input on this data type";
2060 case Intrinsic::writeport: {
2061 // First, determine that the size of the operand falls within the
2062 // acceptable range for this architecture.
2063 if (getClass(CI.getOperand(2)->getType()) != cShort) {
2064 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
2068 unsigned Class = getClassB(CI.getOperand(1)->getType());
2069 unsigned ValReg = getReg(CI.getOperand(1));
2072 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
2075 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(ValReg);
2078 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(ValReg);
2081 std::cerr << "llvm.writeport: invalid data type for X86 target";
2086 // If the port is a single-byte constant, use the immediate form.
2087 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(2)))
2088 if ((C->getRawValue() & 255) == C->getRawValue()) {
2089 static const unsigned O[] = { X86::OUT8ir, X86::OUT16ir, X86::OUT32ir };
2090 BuildMI(BB, O[Class], 1).addImm((unsigned char)C->getRawValue());
2094 // Otherwise, move the I/O port address into the DX register and the value
2095 // to write into the AL/AX/EAX register.
2096 static const unsigned Opc[] = { X86::OUT8rr, X86::OUT16rr, X86::OUT32rr };
2097 unsigned Reg = getReg(CI.getOperand(2));
2098 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
2099 BuildMI(BB, Opc[Class], 0);
2103 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
2107 static bool isSafeToFoldLoadIntoInstruction(LoadInst &LI, Instruction &User) {
2108 if (LI.getParent() != User.getParent())
2110 BasicBlock::iterator It = &LI;
2111 // Check all of the instructions between the load and the user. We should
2112 // really use alias analysis here, but for now we just do something simple.
2113 for (++It; It != BasicBlock::iterator(&User); ++It) {
2114 switch (It->getOpcode()) {
2115 case Instruction::Free:
2116 case Instruction::Store:
2117 case Instruction::Call:
2118 case Instruction::Invoke:
2120 case Instruction::Load:
2121 if (cast<LoadInst>(It)->isVolatile() && LI.isVolatile())
2129 /// visitSimpleBinary - Implement simple binary operators for integral types...
2130 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
2133 void X86ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
2134 unsigned DestReg = getReg(B);
2135 MachineBasicBlock::iterator MI = BB->end();
2136 Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
2137 unsigned Class = getClassB(B.getType());
2139 // If this is AND X, C, and it is only used by a setcc instruction, it will
2140 // be folded. There is no need to emit this instruction.
2141 if (B.hasOneUse() && OperatorClass == 2 && isa<ConstantInt>(Op1))
2142 if (Class == cByte || Class == cShort || Class == cInt) {
2143 Instruction *Use = cast<Instruction>(B.use_back());
2144 if (isa<SetCondInst>(Use) &&
2145 Use->getOperand(1) == Constant::getNullValue(B.getType())) {
2146 switch (getSetCCNumber(Use->getOpcode())) {
2151 if (B.getType()->isSigned()) return;
2156 // Special case: op Reg, load [mem]
2157 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1) && Class != cLong &&
2159 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B))
2160 if (!B.swapOperands())
2161 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
2163 if (isa<LoadInst>(Op1) && Class != cLong && Op1->hasOneUse() &&
2164 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op1), B)) {
2168 static const unsigned OpcodeTab[][3] = {
2169 // Arithmetic operators
2170 { X86::ADD8rm, X86::ADD16rm, X86::ADD32rm }, // ADD
2171 { X86::SUB8rm, X86::SUB16rm, X86::SUB32rm }, // SUB
2173 // Bitwise operators
2174 { X86::AND8rm, X86::AND16rm, X86::AND32rm }, // AND
2175 { X86:: OR8rm, X86:: OR16rm, X86:: OR32rm }, // OR
2176 { X86::XOR8rm, X86::XOR16rm, X86::XOR32rm }, // XOR
2178 Opcode = OpcodeTab[OperatorClass][Class];
2180 static const unsigned OpcodeTab[][2] = {
2181 { X86::FADD32m, X86::FADD64m }, // ADD
2182 { X86::FSUB32m, X86::FSUB64m }, // SUB
2184 const Type *Ty = Op0->getType();
2185 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2186 Opcode = OpcodeTab[OperatorClass][Ty == Type::DoubleTy];
2189 unsigned Op0r = getReg(Op0);
2190 if (AllocaInst *AI =
2191 dyn_castFixedAlloca(cast<LoadInst>(Op1)->getOperand(0))) {
2192 unsigned FI = getFixedSizedAllocaFI(AI);
2193 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), FI);
2197 getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), AM);
2199 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), AM);
2204 // If this is a floating point subtract, check to see if we can fold the first
2206 if (Class == cFP && OperatorClass == 1 &&
2207 isa<LoadInst>(Op0) &&
2208 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B)) {
2209 const Type *Ty = Op0->getType();
2210 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2211 unsigned Opcode = Ty == Type::FloatTy ? X86::FSUBR32m : X86::FSUBR64m;
2213 unsigned Op1r = getReg(Op1);
2214 if (AllocaInst *AI =
2215 dyn_castFixedAlloca(cast<LoadInst>(Op0)->getOperand(0))) {
2216 unsigned FI = getFixedSizedAllocaFI(AI);
2217 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), FI);
2220 getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), AM);
2222 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), AM);
2227 emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
2231 /// emitBinaryFPOperation - This method handles emission of floating point
2232 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
2233 void X86ISel::emitBinaryFPOperation(MachineBasicBlock *BB,
2234 MachineBasicBlock::iterator IP,
2235 Value *Op0, Value *Op1,
2236 unsigned OperatorClass, unsigned DestReg) {
2237 // Special case: op Reg, <const fp>
2238 if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1))
2239 if (!Op1C->isExactlyValue(+0.0) && !Op1C->isExactlyValue(+1.0)) {
2240 // Create a constant pool entry for this constant.
2241 MachineConstantPool *CP = F->getConstantPool();
2242 unsigned CPI = CP->getConstantPoolIndex(Op1C);
2243 const Type *Ty = Op1->getType();
2245 static const unsigned OpcodeTab[][4] = {
2246 { X86::FADD32m, X86::FSUB32m, X86::FMUL32m, X86::FDIV32m }, // Float
2247 { X86::FADD64m, X86::FSUB64m, X86::FMUL64m, X86::FDIV64m }, // Double
2250 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2251 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
2252 unsigned Op0r = getReg(Op0, BB, IP);
2253 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
2254 DestReg).addReg(Op0r), CPI);
2258 // Special case: R1 = op <const fp>, R2
2259 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
2260 if (CFP->isExactlyValue(-0.0) && OperatorClass == 1) {
2262 unsigned op1Reg = getReg(Op1, BB, IP);
2263 BuildMI(*BB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
2265 } else if (!CFP->isExactlyValue(+0.0) && !CFP->isExactlyValue(+1.0)) {
2266 // R1 = op CST, R2 --> R1 = opr R2, CST
2268 // Create a constant pool entry for this constant.
2269 MachineConstantPool *CP = F->getConstantPool();
2270 unsigned CPI = CP->getConstantPoolIndex(CFP);
2271 const Type *Ty = CFP->getType();
2273 static const unsigned OpcodeTab[][4] = {
2274 { X86::FADD32m, X86::FSUBR32m, X86::FMUL32m, X86::FDIVR32m }, // Float
2275 { X86::FADD64m, X86::FSUBR64m, X86::FMUL64m, X86::FDIVR64m }, // Double
2278 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2279 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
2280 unsigned Op1r = getReg(Op1, BB, IP);
2281 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
2282 DestReg).addReg(Op1r), CPI);
2287 static const unsigned OpcodeTab[4] = {
2288 X86::FpADD, X86::FpSUB, X86::FpMUL, X86::FpDIV
2291 unsigned Opcode = OpcodeTab[OperatorClass];
2292 unsigned Op0r = getReg(Op0, BB, IP);
2293 unsigned Op1r = getReg(Op1, BB, IP);
2294 BuildMI(*BB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
2297 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
2298 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
2301 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
2302 /// and constant expression support.
2304 void X86ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
2305 MachineBasicBlock::iterator IP,
2306 Value *Op0, Value *Op1,
2307 unsigned OperatorClass,
2309 unsigned Class = getClassB(Op0->getType());
2312 assert(OperatorClass < 2 && "No logical ops for FP!");
2313 emitBinaryFPOperation(MBB, IP, Op0, Op1, OperatorClass, DestReg);
2317 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
2318 if (OperatorClass == 1) {
2319 static unsigned const NEGTab[] = {
2320 X86::NEG8r, X86::NEG16r, X86::NEG32r, 0, X86::NEG32r
2323 // sub 0, X -> neg X
2324 if (CI->isNullValue()) {
2325 unsigned op1Reg = getReg(Op1, MBB, IP);
2326 BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
2328 if (Class == cLong) {
2329 // We just emitted: Dl = neg Sl
2330 // Now emit : T = addc Sh, 0
2332 unsigned T = makeAnotherReg(Type::IntTy);
2333 BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
2334 BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
2337 } else if (Op1->hasOneUse() && Class != cLong) {
2338 // sub C, X -> tmp = neg X; DestReg = add tmp, C. This is better
2339 // than copying C into a temporary register, because of register
2340 // pressure (tmp and destreg can share a register.
2341 static unsigned const ADDRITab[] = {
2342 X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri
2344 unsigned op1Reg = getReg(Op1, MBB, IP);
2345 unsigned Tmp = makeAnotherReg(Op0->getType());
2346 BuildMI(*MBB, IP, NEGTab[Class], 1, Tmp).addReg(op1Reg);
2347 BuildMI(*MBB, IP, ADDRITab[Class], 2,
2348 DestReg).addReg(Tmp).addImm(CI->getRawValue());
2353 // Special case: op Reg, <const int>
2354 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
2355 unsigned Op0r = getReg(Op0, MBB, IP);
2357 // xor X, -1 -> not X
2358 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
2359 static unsigned const NOTTab[] = {
2360 X86::NOT8r, X86::NOT16r, X86::NOT32r, 0, X86::NOT32r
2362 BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
2363 if (Class == cLong) // Invert the top part too
2364 BuildMI(*MBB, IP, X86::NOT32r, 1, DestReg+1).addReg(Op0r+1);
2368 // add X, -1 -> dec X
2369 if (OperatorClass == 0 && Op1C->isAllOnesValue() && Class != cLong) {
2370 // Note that we can't use dec for 64-bit decrements, because it does not
2371 // set the carry flag!
2372 static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
2373 BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
2377 // add X, 1 -> inc X
2378 if (OperatorClass == 0 && Op1C->equalsInt(1) && Class != cLong) {
2379 // Note that we can't use inc for 64-bit increments, because it does not
2380 // set the carry flag!
2381 static unsigned const INCTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
2382 BuildMI(*MBB, IP, INCTab[Class], 1, DestReg).addReg(Op0r);
2386 static const unsigned OpcodeTab[][5] = {
2387 // Arithmetic operators
2388 { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri }, // ADD
2389 { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, X86::SUB32ri }, // SUB
2391 // Bitwise operators
2392 { X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, X86::AND32ri }, // AND
2393 { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri, 0, X86::OR32ri }, // OR
2394 { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, X86::XOR32ri }, // XOR
2397 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2398 unsigned Op1l = cast<ConstantInt>(Op1C)->getRawValue();
2400 if (Class != cLong) {
2401 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2405 // If this is a long value and the high or low bits have a special
2406 // property, emit some special cases.
2407 unsigned Op1h = cast<ConstantInt>(Op1C)->getRawValue() >> 32LL;
2409 // If the constant is zero in the low 32-bits, just copy the low part
2410 // across and apply the normal 32-bit operation to the high parts. There
2411 // will be no carry or borrow into the top.
2413 if (OperatorClass != 2) // All but and...
2414 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0r);
2416 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2417 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg+1)
2418 .addReg(Op0r+1).addImm(Op1h);
2422 // If this is a logical operation and the top 32-bits are zero, just
2423 // operate on the lower 32.
2424 if (Op1h == 0 && OperatorClass > 1) {
2425 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg)
2426 .addReg(Op0r).addImm(Op1l);
2427 if (OperatorClass != 2) // All but and
2428 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(Op0r+1);
2430 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2434 // TODO: We could handle lots of other special cases here, such as AND'ing
2435 // with 0xFFFFFFFF00000000 -> noop, etc.
2437 // Otherwise, code generate the full operation with a constant.
2438 static const unsigned TopTab[] = {
2439 X86::ADC32ri, X86::SBB32ri, X86::AND32ri, X86::OR32ri, X86::XOR32ri
2442 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2443 BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1)
2444 .addReg(Op0r+1).addImm(Op1h);
2448 // Finally, handle the general case now.
2449 static const unsigned OpcodeTab[][5] = {
2450 // Arithmetic operators
2451 { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, 0, X86::ADD32rr }, // ADD
2452 { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, 0, X86::SUB32rr }, // SUB
2454 // Bitwise operators
2455 { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, X86::AND32rr }, // AND
2456 { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0, X86:: OR32rr }, // OR
2457 { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, X86::XOR32rr }, // XOR
2460 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2461 unsigned Op0r = getReg(Op0, MBB, IP);
2462 unsigned Op1r = getReg(Op1, MBB, IP);
2463 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
2465 if (Class == cLong) { // Handle the upper 32 bits of long values...
2466 static const unsigned TopTab[] = {
2467 X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
2469 BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
2470 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
2474 /// doMultiply - Emit appropriate instructions to multiply together the
2475 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
2476 /// result should be given as DestTy.
2478 void X86ISel::doMultiply(MachineBasicBlock *MBB,
2479 MachineBasicBlock::iterator MBBI,
2480 unsigned DestReg, const Type *DestTy,
2481 unsigned op0Reg, unsigned op1Reg) {
2482 unsigned Class = getClass(DestTy);
2486 BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
2487 .addReg(op0Reg).addReg(op1Reg);
2490 // Must use the MUL instruction, which forces use of AL...
2491 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
2492 BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
2493 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
2496 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
2500 /// doMultiplyConst - This function is specialized to efficiently codegen an 8,
2501 /// 16, or 32-bit integer multiply by a constant.
2502 void X86ISel::doMultiplyConst(MachineBasicBlock *MBB,
2503 MachineBasicBlock::iterator IP,
2504 unsigned DestReg, const Type *DestTy,
2505 unsigned op0Reg, unsigned ConstRHS) {
2506 static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
2507 static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
2508 static const unsigned ADDrrTab[] = {X86::ADD8rr, X86::ADD16rr, X86::ADD32rr};
2509 static const unsigned NEGrTab[] = {X86::NEG8r , X86::NEG16r , X86::NEG32r };
2511 unsigned Class = getClass(DestTy);
2514 // Handle special cases here.
2517 TmpReg = makeAnotherReg(DestTy);
2518 BuildMI(*MBB, IP, NEGrTab[Class], 1, TmpReg).addReg(op0Reg);
2519 BuildMI(*MBB, IP, ADDrrTab[Class], 1,DestReg).addReg(TmpReg).addReg(TmpReg);
2522 BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(op0Reg);
2525 BuildMI(*MBB, IP, MOVriTab[Class], 1, DestReg).addImm(0);
2528 BuildMI(*MBB, IP, MOVrrTab[Class], 1, DestReg).addReg(op0Reg);
2531 BuildMI(*MBB, IP, ADDrrTab[Class], 1,DestReg).addReg(op0Reg).addReg(op0Reg);
2536 if (Class == cInt) {
2538 AM.BaseType = X86AddressMode::RegBase;
2539 AM.Base.Reg = op0Reg;
2540 AM.Scale = ConstRHS-1;
2541 AM.IndexReg = op0Reg;
2543 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg), AM);
2549 if (Class == cInt) {
2550 TmpReg = makeAnotherReg(DestTy);
2552 AM.BaseType = X86AddressMode::RegBase;
2553 AM.Base.Reg = op0Reg;
2554 AM.Scale = -ConstRHS-1;
2555 AM.IndexReg = op0Reg;
2557 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TmpReg), AM);
2558 BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(TmpReg);
2563 // If the element size is exactly a power of 2, use a shift to get it.
2564 if (isPowerOf2_32(ConstRHS)) {
2565 unsigned Shift = Log2_32(ConstRHS);
2567 default: assert(0 && "Unknown class for this function!");
2569 BuildMI(*MBB, IP, X86::SHL8ri,2, DestReg).addReg(op0Reg).addImm(Shift);
2572 BuildMI(*MBB, IP, X86::SHL16ri,2, DestReg).addReg(op0Reg).addImm(Shift);
2575 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift);
2580 // If the element size is a negative power of 2, use a shift/neg to get it.
2581 if (isPowerOf2_32(-ConstRHS)) {
2582 unsigned Shift = Log2_32(-ConstRHS);
2583 TmpReg = makeAnotherReg(DestTy);
2584 BuildMI(*MBB, IP, NEGrTab[Class], 1, TmpReg).addReg(op0Reg);
2586 default: assert(0 && "Unknown class for this function!");
2588 BuildMI(*MBB, IP, X86::SHL8ri,2, DestReg).addReg(TmpReg).addImm(Shift);
2591 BuildMI(*MBB, IP, X86::SHL16ri,2, DestReg).addReg(TmpReg).addImm(Shift);
2594 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(TmpReg).addImm(Shift);
2599 if (Class == cShort) {
2600 BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2602 } else if (Class == cInt) {
2603 BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2607 // Most general case, emit a normal multiply...
2608 TmpReg = makeAnotherReg(DestTy);
2609 BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
2611 // Emit a MUL to multiply the register holding the index by
2612 // elementSize, putting the result in OffsetReg.
2613 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
2616 /// visitMul - Multiplies are not simple binary operators because they must deal
2617 /// with the EAX register explicitly.
2619 void X86ISel::visitMul(BinaryOperator &I) {
2620 unsigned ResultReg = getReg(I);
2622 Value *Op0 = I.getOperand(0);
2623 Value *Op1 = I.getOperand(1);
2625 // Fold loads into floating point multiplies.
2626 if (getClass(Op0->getType()) == cFP) {
2627 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
2628 if (!I.swapOperands())
2629 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
2630 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2631 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2632 const Type *Ty = Op0->getType();
2633 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2634 unsigned Opcode = Ty == Type::FloatTy ? X86::FMUL32m : X86::FMUL64m;
2636 unsigned Op0r = getReg(Op0);
2637 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2638 unsigned FI = getFixedSizedAllocaFI(AI);
2639 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
2642 getAddressingMode(LI->getOperand(0), AM);
2644 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
2650 MachineBasicBlock::iterator IP = BB->end();
2651 emitMultiply(BB, IP, Op0, Op1, ResultReg);
2654 void X86ISel::emitMultiply(MachineBasicBlock *MBB,
2655 MachineBasicBlock::iterator IP,
2656 Value *Op0, Value *Op1, unsigned DestReg) {
2657 MachineBasicBlock &BB = *MBB;
2658 TypeClass Class = getClass(Op0->getType());
2660 // Simple scalar multiply?
2661 unsigned Op0Reg = getReg(Op0, &BB, IP);
2666 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2667 unsigned Val = (unsigned)CI->getRawValue(); // Isn't a 64-bit constant
2668 doMultiplyConst(&BB, IP, DestReg, Op0->getType(), Op0Reg, Val);
2670 unsigned Op1Reg = getReg(Op1, &BB, IP);
2671 doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg);
2675 emitBinaryFPOperation(MBB, IP, Op0, Op1, 2, DestReg);
2681 // Long value. We have to do things the hard way...
2682 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2683 unsigned CLow = CI->getRawValue();
2684 unsigned CHi = CI->getRawValue() >> 32;
2687 // If the low part of the constant is all zeros, things are simple.
2688 BuildMI(BB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2689 doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi);
2693 // Multiply the two low parts... capturing carry into EDX
2694 unsigned OverflowReg = 0;
2696 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
2698 unsigned Op1RegL = makeAnotherReg(Type::UIntTy);
2699 OverflowReg = makeAnotherReg(Type::UIntTy);
2700 BuildMI(BB, IP, X86::MOV32ri, 1, Op1RegL).addImm(CLow);
2701 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2702 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1RegL); // AL*BL
2704 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2705 BuildMI(BB, IP, X86::MOV32rr, 1,
2706 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2709 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2710 doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow);
2712 unsigned AHBLplusOverflowReg;
2714 AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2715 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2716 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2718 AHBLplusOverflowReg = AHBLReg;
2722 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(AHBLplusOverflowReg);
2724 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2725 doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi);
2727 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2728 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2733 // General 64x64 multiply
2735 unsigned Op1Reg = getReg(Op1, &BB, IP);
2736 // Multiply the two low parts... capturing carry into EDX
2737 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2738 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
2740 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
2741 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2742 BuildMI(BB, IP, X86::MOV32rr, 1,
2743 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2745 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2746 BuildMI(BB, IP, X86::IMUL32rr, 2,
2747 AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
2749 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2750 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2751 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2753 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2754 BuildMI(BB, IP, X86::IMUL32rr, 2,
2755 ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
2757 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2758 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2762 /// visitDivRem - Handle division and remainder instructions... these
2763 /// instruction both require the same instructions to be generated, they just
2764 /// select the result from a different register. Note that both of these
2765 /// instructions work differently for signed and unsigned operands.
2767 void X86ISel::visitDivRem(BinaryOperator &I) {
2768 unsigned ResultReg = getReg(I);
2769 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2771 // Fold loads into floating point divides.
2772 if (getClass(Op0->getType()) == cFP) {
2773 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2774 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2775 const Type *Ty = Op0->getType();
2776 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2777 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIV32m : X86::FDIV64m;
2779 unsigned Op0r = getReg(Op0);
2780 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2781 unsigned FI = getFixedSizedAllocaFI(AI);
2782 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
2785 getAddressingMode(LI->getOperand(0), AM);
2787 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
2792 if (LoadInst *LI = dyn_cast<LoadInst>(Op0))
2793 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2794 const Type *Ty = Op0->getType();
2795 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2796 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIVR32m : X86::FDIVR64m;
2798 unsigned Op1r = getReg(Op1);
2799 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2800 unsigned FI = getFixedSizedAllocaFI(AI);
2801 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), FI);
2804 getAddressingMode(LI->getOperand(0), AM);
2805 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), AM);
2812 MachineBasicBlock::iterator IP = BB->end();
2813 emitDivRemOperation(BB, IP, Op0, Op1,
2814 I.getOpcode() == Instruction::Div, ResultReg);
2817 void X86ISel::emitDivRemOperation(MachineBasicBlock *BB,
2818 MachineBasicBlock::iterator IP,
2819 Value *Op0, Value *Op1, bool isDiv,
2820 unsigned ResultReg) {
2821 const Type *Ty = Op0->getType();
2822 unsigned Class = getClass(Ty);
2824 case cFP: // Floating point divide
2826 emitBinaryFPOperation(BB, IP, Op0, Op1, 3, ResultReg);
2828 } else { // Floating point remainder...
2829 unsigned Op0Reg = getReg(Op0, BB, IP);
2830 unsigned Op1Reg = getReg(Op1, BB, IP);
2831 MachineInstr *TheCall =
2832 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
2833 std::vector<ValueRecord> Args;
2834 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
2835 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
2836 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
2840 static const char *FnName[] =
2841 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
2842 unsigned Op0Reg = getReg(Op0, BB, IP);
2843 unsigned Op1Reg = getReg(Op1, BB, IP);
2844 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
2845 MachineInstr *TheCall =
2846 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
2848 std::vector<ValueRecord> Args;
2849 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
2850 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
2851 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
2854 case cByte: case cShort: case cInt:
2855 break; // Small integrals, handled below...
2856 default: assert(0 && "Unknown class!");
2859 static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
2860 static const unsigned NEGOpcode[]={ X86::NEG8r, X86::NEG16r, X86::NEG32r };
2861 static const unsigned SAROpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
2862 static const unsigned SHROpcode[]={ X86::SHR8ri, X86::SHR16ri, X86::SHR32ri };
2863 static const unsigned ADDOpcode[]={ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
2865 // Special case signed division by power of 2.
2866 if (ConstantSInt *CI = dyn_cast<ConstantSInt>(Op1))
2868 assert(Class != cLong && "This doesn't handle 64-bit divides!");
2869 int V = CI->getValue();
2871 if (V == 1) { // X /s 1 => X
2872 unsigned Op0Reg = getReg(Op0, BB, IP);
2873 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(Op0Reg);
2877 if (V == -1) { // X /s -1 => -X
2878 unsigned Op0Reg = getReg(Op0, BB, IP);
2879 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(Op0Reg);
2883 if (V == 2 || V == -2) { // X /s 2
2884 static const unsigned CMPOpcode[] = {
2885 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
2887 static const unsigned SBBOpcode[] = {
2888 X86::SBB8ri, X86::SBB16ri, X86::SBB32ri
2890 unsigned Op0Reg = getReg(Op0, BB, IP);
2891 unsigned SignBit = 1 << (CI->getType()->getPrimitiveSize()*8-1);
2892 BuildMI(*BB, IP, CMPOpcode[Class], 2).addReg(Op0Reg).addImm(SignBit);
2894 unsigned TmpReg = makeAnotherReg(Op0->getType());
2895 BuildMI(*BB, IP, SBBOpcode[Class], 2, TmpReg).addReg(Op0Reg).addImm(-1);
2897 unsigned TmpReg2 = V == 2 ? ResultReg : makeAnotherReg(Op0->getType());
2898 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg2).addReg(TmpReg).addImm(1);
2900 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg2);
2906 if (V < 0) { // Not a positive power of 2?
2908 isNeg = true; // Maybe it's a negative power of 2.
2910 if (isPowerOf2_32(V)) {
2911 unsigned Log = Log2_32(V);
2913 unsigned Op0Reg = getReg(Op0, BB, IP);
2914 unsigned TmpReg = makeAnotherReg(Op0->getType());
2915 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg)
2916 .addReg(Op0Reg).addImm(Log-1);
2917 unsigned TmpReg2 = makeAnotherReg(Op0->getType());
2918 BuildMI(*BB, IP, SHROpcode[Class], 2, TmpReg2)
2919 .addReg(TmpReg).addImm(CI->getType()->getPrimitiveSizeInBits()-Log);
2920 unsigned TmpReg3 = makeAnotherReg(Op0->getType());
2921 BuildMI(*BB, IP, ADDOpcode[Class], 2, TmpReg3)
2922 .addReg(Op0Reg).addReg(TmpReg2);
2924 unsigned TmpReg4 = isNeg ? makeAnotherReg(Op0->getType()) : ResultReg;
2925 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg4)
2926 .addReg(TmpReg3).addImm(Log);
2928 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg4);
2932 assert(Class != cLong && "This doesn't handle 64-bit remainder!");
2933 int V = CI->getValue();
2935 if (V == 2 || V == -2) { // X % 2, X % -2
2936 static const unsigned SExtOpcode[] = { X86::CBW, X86::CWD, X86::CDQ };
2937 static const unsigned BaseReg[] = { X86::AL , X86::AX , X86::EAX };
2938 static const unsigned SExtReg[] = { X86::AH , X86::DX , X86::EDX };
2939 static const unsigned ANDOpcode[] = {
2940 X86::AND8ri, X86::AND16ri, X86::AND32ri
2942 static const unsigned XOROpcode[] = {
2943 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr
2945 static const unsigned SUBOpcode[] = {
2946 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr
2949 // Sign extend result into reg of -1 or 0.
2950 unsigned Op0Reg = getReg(Op0, BB, IP);
2951 BuildMI(*BB, IP, MovOpcode[Class], 1, BaseReg[Class]).addReg(Op0Reg);
2952 BuildMI(*BB, IP, SExtOpcode[Class], 0);
2953 unsigned TmpReg0 = makeAnotherReg(Op0->getType());
2954 BuildMI(*BB, IP, MovOpcode[Class], 1, TmpReg0).addReg(SExtReg[Class]);
2956 unsigned TmpReg1 = makeAnotherReg(Op0->getType());
2957 BuildMI(*BB, IP, ANDOpcode[Class], 2, TmpReg1).addReg(Op0Reg).addImm(1);
2959 unsigned TmpReg2 = makeAnotherReg(Op0->getType());
2960 BuildMI(*BB, IP, XOROpcode[Class], 2,
2961 TmpReg2).addReg(TmpReg1).addReg(TmpReg0);
2962 BuildMI(*BB, IP, SUBOpcode[Class], 2,
2963 ResultReg).addReg(TmpReg2).addReg(TmpReg0);
2968 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
2969 static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
2970 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
2971 static const unsigned SExOpcode[]={ X86::CBW , X86::CWD , X86::CDQ };
2973 static const unsigned DivOpcode[][4] = {
2974 { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
2975 { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
2978 unsigned Reg = Regs[Class];
2979 unsigned ExtReg = ExtRegs[Class];
2981 // Put the first operand into one of the A registers...
2982 unsigned Op0Reg = getReg(Op0, BB, IP);
2983 unsigned Op1Reg = getReg(Op1, BB, IP);
2984 BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
2986 if (Ty->isSigned()) {
2987 // Emit a sign extension instruction.
2988 BuildMI(*BB, IP, SExOpcode[Class], 0);
2990 // Emit the appropriate divide or remainder instruction...
2991 BuildMI(*BB, IP, DivOpcode[1][Class], 1).addReg(Op1Reg);
2993 // If unsigned, emit a zeroing instruction... (reg = 0)
2994 BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
2996 // Emit the appropriate divide or remainder instruction...
2997 BuildMI(*BB, IP, DivOpcode[0][Class], 1).addReg(Op1Reg);
3000 // Figure out which register we want to pick the result out of...
3001 unsigned DestReg = isDiv ? Reg : ExtReg;
3003 // Put the result into the destination register...
3004 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
3008 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
3009 /// for constant immediate shift values, and for constant immediate
3010 /// shift values equal to 1. Even the general case is sort of special,
3011 /// because the shift amount has to be in CL, not just any old register.
3013 void X86ISel::visitShiftInst(ShiftInst &I) {
3014 MachineBasicBlock::iterator IP = BB->end ();
3015 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
3016 I.getOpcode () == Instruction::Shl, I.getType (),
3020 /// Emit code for a 'SHLD DestReg, Op0, Op1, Amt' operation, where Amt is a
3022 void X86ISel::doSHLDConst(MachineBasicBlock *MBB,
3023 MachineBasicBlock::iterator IP,
3024 unsigned DestReg, unsigned Op0Reg, unsigned Op1Reg,
3026 // SHLD is a very inefficient operation on every processor, try to do
3027 // somethign simpler for common values of 'Amt'.
3029 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
3030 } else if (Amt == 1) {
3031 unsigned Tmp = makeAnotherReg(Type::UIntTy);
3032 BuildMI(*MBB, IP, X86::ADD32rr, 2, Tmp).addReg(Op1Reg).addReg(Op1Reg);
3033 BuildMI(*MBB, IP, X86::ADC32rr, 2, DestReg).addReg(Op0Reg).addReg(Op0Reg);
3034 } else if (Amt == 2 || Amt == 3) {
3035 // On the P4 and Athlon it is cheaper to replace shld ..., 2|3 with a
3036 // shift/lea pair. NOTE: This should not be done on the P6 family!
3037 unsigned Tmp = makeAnotherReg(Type::UIntTy);
3038 BuildMI(*MBB, IP, X86::SHR32ri, 2, Tmp).addReg(Op1Reg).addImm(32-Amt);
3040 AM.BaseType = X86AddressMode::RegBase;
3042 AM.Scale = 1 << Amt;
3043 AM.IndexReg = Op0Reg;
3045 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 4, DestReg), AM);
3047 // NOTE: It is always cheaper on the P4 to emit SHLD as two shifts and an OR
3048 // than it is to emit a real SHLD.
3050 BuildMI(*MBB, IP, X86::SHLD32rri8, 3,
3051 DestReg).addReg(Op0Reg).addReg(Op1Reg).addImm(Amt);
3055 /// emitShiftOperation - Common code shared between visitShiftInst and
3056 /// constant expression support.
3057 void X86ISel::emitShiftOperation(MachineBasicBlock *MBB,
3058 MachineBasicBlock::iterator IP,
3059 Value *Op, Value *ShiftAmount,
3060 bool isLeftShift, const Type *ResultTy,
3062 unsigned SrcReg = getReg (Op, MBB, IP);
3063 bool isSigned = ResultTy->isSigned ();
3064 unsigned Class = getClass (ResultTy);
3066 static const unsigned ConstantOperand[][3] = {
3067 { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri }, // SHR
3068 { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri }, // SAR
3069 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri }, // SHL
3070 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri }, // SAL = SHL
3073 static const unsigned NonConstantOperand[][3] = {
3074 { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
3075 { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
3076 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
3077 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
3080 // Longs, as usual, are handled specially.
3081 if (Class == cLong) {
3082 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
3083 unsigned Amount = CUI->getValue();
3084 if (Amount == 1 && isLeftShift) { // X << 1 == X+X
3085 BuildMI(*MBB, IP, X86::ADD32rr, 2,
3086 DestReg).addReg(SrcReg).addReg(SrcReg);
3087 BuildMI(*MBB, IP, X86::ADC32rr, 2,
3088 DestReg+1).addReg(SrcReg+1).addReg(SrcReg+1);
3089 } else if (Amount < 32) {
3090 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
3092 doSHLDConst(MBB, IP, DestReg+1, SrcReg+1, SrcReg, Amount);
3093 BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
3095 BuildMI(*MBB, IP, X86::SHRD32rri8, 3,
3096 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
3097 BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
3099 } else if (Amount == 32) {
3101 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
3102 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
3104 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
3106 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
3108 BuildMI(*MBB, IP, X86::SAR32ri, 2,
3109 DestReg+1).addReg(SrcReg).addImm(31);
3112 } else { // Shifting more than 32 bits
3115 BuildMI(*MBB, IP, X86::SHL32ri, 2,
3116 DestReg + 1).addReg(SrcReg).addImm(Amount);
3117 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
3119 BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
3120 DestReg).addReg(SrcReg+1).addImm(Amount);
3122 BuildMI(*MBB, IP, X86::SAR32ri, 2,
3123 DestReg+1).addReg(SrcReg+1).addImm(31);
3125 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
3129 unsigned TmpReg = makeAnotherReg(Type::IntTy);
3130 if (!isLeftShift && isSigned) {
3131 // If this is a SHR of a Long, then we need to do funny sign extension
3132 // stuff. TmpReg gets the value to use as the high-part if we are
3133 // shifting more than 32 bits.
3134 BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
3136 // Other shifts use a fixed zero value if the shift is more than 32
3138 BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
3141 // Initialize CL with the shift amount...
3142 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
3143 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
3145 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
3146 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
3148 // TmpReg2 = shld inHi, inLo
3149 BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
3151 // TmpReg3 = shl inLo, CL
3152 BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
3154 // Set the flags to indicate whether the shift was by more than 32 bits.
3155 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
3157 // DestHi = (>32) ? TmpReg3 : TmpReg2;
3158 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
3159 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
3160 // DestLo = (>32) ? TmpReg : TmpReg3;
3161 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
3162 DestReg).addReg(TmpReg3).addReg(TmpReg);
3164 // TmpReg2 = shrd inLo, inHi
3165 BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
3167 // TmpReg3 = s[ah]r inHi, CL
3168 BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
3171 // Set the flags to indicate whether the shift was by more than 32 bits.
3172 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
3174 // DestLo = (>32) ? TmpReg3 : TmpReg2;
3175 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
3176 DestReg).addReg(TmpReg2).addReg(TmpReg3);
3178 // DestHi = (>32) ? TmpReg : TmpReg3;
3179 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
3180 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
3186 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
3187 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
3188 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
3190 if (CUI->getValue() == 1 && isLeftShift) { // X << 1 -> X+X
3191 static const int AddOpC[] = { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
3192 BuildMI(*MBB, IP, AddOpC[Class], 2,DestReg).addReg(SrcReg).addReg(SrcReg);
3194 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
3195 BuildMI(*MBB, IP, Opc[Class], 2,
3196 DestReg).addReg(SrcReg).addImm(CUI->getValue());
3198 } else { // The shift amount is non-constant.
3199 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
3200 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
3202 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
3203 BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
3208 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
3209 /// instruction. The load and store instructions are the only place where we
3210 /// need to worry about the memory layout of the target machine.
3212 void X86ISel::visitLoadInst(LoadInst &I) {
3213 // Check to see if this load instruction is going to be folded into a binary
3214 // instruction, like add. If so, we don't want to emit it. Wouldn't a real
3215 // pattern matching instruction selector be nice?
3216 unsigned Class = getClassB(I.getType());
3217 if (I.hasOneUse()) {
3218 Instruction *User = cast<Instruction>(I.use_back());
3219 switch (User->getOpcode()) {
3220 case Instruction::Cast:
3221 // If this is a cast from a signed-integer type to a floating point type,
3222 // fold the cast here.
3223 if (getClassB(User->getType()) == cFP &&
3224 (I.getType() == Type::ShortTy || I.getType() == Type::IntTy ||
3225 I.getType() == Type::LongTy)) {
3226 unsigned DestReg = getReg(User);
3227 static const unsigned Opcode[] = {
3228 0/*BYTE*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m
3231 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
3232 unsigned FI = getFixedSizedAllocaFI(AI);
3233 addFrameReference(BuildMI(BB, Opcode[Class], 4, DestReg), FI);
3236 getAddressingMode(I.getOperand(0), AM);
3237 addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg), AM);
3245 case Instruction::Add:
3246 case Instruction::Sub:
3247 case Instruction::And:
3248 case Instruction::Or:
3249 case Instruction::Xor:
3250 if (Class == cLong) User = 0;
3252 case Instruction::Mul:
3253 case Instruction::Div:
3254 if (Class != cFP) User = 0;
3255 break; // Folding only implemented for floating point.
3256 default: User = 0; break;
3260 // Okay, we found a user. If the load is the first operand and there is
3261 // no second operand load, reverse the operand ordering. Note that this
3262 // can fail for a subtract (ie, no change will be made).
3263 bool Swapped = false;
3264 if (!isa<LoadInst>(User->getOperand(1)))
3265 Swapped = !cast<BinaryOperator>(User)->swapOperands();
3267 // Okay, now that everything is set up, if this load is used by the second
3268 // operand, and if there are no instructions that invalidate the load
3269 // before the binary operator, eliminate the load.
3270 if (User->getOperand(1) == &I &&
3271 isSafeToFoldLoadIntoInstruction(I, *User))
3272 return; // Eliminate the load!
3274 // If this is a floating point sub or div, we won't be able to swap the
3275 // operands, but we will still be able to eliminate the load.
3276 if (Class == cFP && User->getOperand(0) == &I &&
3277 !isa<LoadInst>(User->getOperand(1)) &&
3278 (User->getOpcode() == Instruction::Sub ||
3279 User->getOpcode() == Instruction::Div) &&
3280 isSafeToFoldLoadIntoInstruction(I, *User))
3281 return; // Eliminate the load!
3283 // If we swapped the operands to the instruction, but couldn't fold the
3284 // load anyway, swap them back. We don't want to break add X, int
3286 if (Swapped) cast<BinaryOperator>(User)->swapOperands();
3290 static const unsigned Opcodes[] = {
3291 X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m, X86::MOV32rm
3293 unsigned Opcode = Opcodes[Class];
3294 if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
3296 unsigned DestReg = getReg(I);
3298 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
3299 unsigned FI = getFixedSizedAllocaFI(AI);
3300 if (Class == cLong) {
3301 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg), FI);
3302 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), FI, 4);
3304 addFrameReference(BuildMI(BB, Opcode, 4, DestReg), FI);
3308 getAddressingMode(I.getOperand(0), AM);
3310 if (Class == cLong) {
3311 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg), AM);
3313 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), AM);
3315 addFullAddress(BuildMI(BB, Opcode, 4, DestReg), AM);
3320 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
3323 void X86ISel::visitStoreInst(StoreInst &I) {
3325 getAddressingMode(I.getOperand(1), AM);
3327 const Type *ValTy = I.getOperand(0)->getType();
3328 unsigned Class = getClassB(ValTy);
3330 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
3331 uint64_t Val = CI->getRawValue();
3332 if (Class == cLong) {
3333 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val & ~0U);
3335 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val>>32);
3337 static const unsigned Opcodes[] = {
3338 X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
3340 unsigned Opcode = Opcodes[Class];
3341 addFullAddress(BuildMI(BB, Opcode, 5), AM).addImm(Val);
3343 } else if (isa<ConstantPointerNull>(I.getOperand(0))) {
3344 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(0);
3345 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(0))) {
3346 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addGlobalAddress(GV);
3347 } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
3348 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CB->getValue());
3349 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) {
3350 // Store constant FP values with integer instructions to avoid having to
3351 // load the constants from the constant pool then do a store.
3352 if (CFP->getType() == Type::FloatTy) {
3357 V.F = CFP->getValue();
3358 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(V.I);
3364 V.F = CFP->getValue();
3365 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm((unsigned)V.I);
3367 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(
3368 unsigned(V.I >> 32));
3371 } else if (Class == cLong) {
3372 unsigned ValReg = getReg(I.getOperand(0));
3373 addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg);
3375 addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg+1);
3377 // FIXME: stop emitting these two instructions:
3378 // movl $global,%eax
3380 // when one instruction will suffice. That includes when the global
3381 // has an offset applied to it.
3382 unsigned ValReg = getReg(I.getOperand(0));
3383 static const unsigned Opcodes[] = {
3384 X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
3386 unsigned Opcode = Opcodes[Class];
3387 if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
3389 addFullAddress(BuildMI(BB, Opcode, 1+4), AM).addReg(ValReg);
3394 /// visitCastInst - Here we have various kinds of copying with or without sign
3395 /// extension going on.
3397 void X86ISel::visitCastInst(CastInst &CI) {
3398 Value *Op = CI.getOperand(0);
3400 unsigned SrcClass = getClassB(Op->getType());
3401 unsigned DestClass = getClassB(CI.getType());
3402 // Noop casts are not emitted: getReg will return the source operand as the
3403 // register to use for any uses of the noop cast.
3404 if (DestClass == SrcClass) {
3405 // The only detail in this plan is that casts from double -> float are
3406 // truncating operations that we have to codegen through memory (despite
3407 // the fact that the source/dest registers are the same class).
3408 if (CI.getType() != Type::FloatTy || Op->getType() != Type::DoubleTy)
3412 // If this is a cast from a 32-bit integer to a Long type, and the only uses
3413 // of the case are GEP instructions, then the cast does not need to be
3414 // generated explicitly, it will be folded into the GEP.
3415 if (DestClass == cLong && SrcClass == cInt) {
3416 bool AllUsesAreGEPs = true;
3417 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
3418 if (!isa<GetElementPtrInst>(*I)) {
3419 AllUsesAreGEPs = false;
3423 // No need to codegen this cast if all users are getelementptr instrs...
3424 if (AllUsesAreGEPs) return;
3427 // If this cast converts a load from a short,int, or long integer to a FP
3428 // value, we will have folded this cast away.
3429 if (DestClass == cFP && isa<LoadInst>(Op) && Op->hasOneUse() &&
3430 (Op->getType() == Type::ShortTy || Op->getType() == Type::IntTy ||
3431 Op->getType() == Type::LongTy))
3435 unsigned DestReg = getReg(CI);
3436 MachineBasicBlock::iterator MI = BB->end();
3437 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
3440 /// emitCastOperation - Common code shared between visitCastInst and constant
3441 /// expression cast support.
3443 void X86ISel::emitCastOperation(MachineBasicBlock *BB,
3444 MachineBasicBlock::iterator IP,
3445 Value *Src, const Type *DestTy,
3447 const Type *SrcTy = Src->getType();
3448 unsigned SrcClass = getClassB(SrcTy);
3449 unsigned DestClass = getClassB(DestTy);
3450 unsigned SrcReg = getReg(Src, BB, IP);
3452 // Implement casts to bool by using compare on the operand followed by set if
3453 // not zero on the result.
3454 if (DestTy == Type::BoolTy) {
3457 BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
3460 BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
3463 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
3466 unsigned TmpReg = makeAnotherReg(Type::IntTy);
3467 BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
3471 BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
3472 BuildMI(*BB, IP, X86::FNSTSW8r, 0);
3473 BuildMI(*BB, IP, X86::SAHF, 1);
3477 // If the zero flag is not set, then the value is true, set the byte to
3479 BuildMI(*BB, IP, X86::SETNEr, 1, DestReg);
3483 static const unsigned RegRegMove[] = {
3484 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
3487 // Implement casts between values of the same type class (as determined by
3488 // getClass) by using a register-to-register move.
3489 if (SrcClass == DestClass) {
3490 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
3491 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
3492 } else if (SrcClass == cFP) {
3493 if (SrcTy == Type::FloatTy) { // double -> float
3494 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
3495 BuildMI(*BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
3496 } else { // float -> double
3497 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
3498 "Unknown cFP member!");
3499 // Truncate from double to float by storing to memory as short, then
3501 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
3502 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
3503 addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5),
3504 FrameIdx).addReg(SrcReg);
3505 addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
3507 } else if (SrcClass == cLong) {
3508 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
3509 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
3511 assert(0 && "Cannot handle this type of cast instruction!");
3517 // Handle cast of SMALLER int to LARGER int using a move with sign extension
3518 // or zero extension, depending on whether the source type was signed.
3519 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
3520 SrcClass < DestClass) {
3521 bool isLong = DestClass == cLong;
3522 if (isLong) DestClass = cInt;
3524 static const unsigned Opc[][4] = {
3525 { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
3526 { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
3529 bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy;
3530 BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
3531 DestReg).addReg(SrcReg);
3533 if (isLong) { // Handle upper 32 bits as appropriate...
3534 if (isUnsigned) // Zero out top bits...
3535 BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
3536 else // Sign extend bottom half...
3537 BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
3542 // Special case long -> int ...
3543 if (SrcClass == cLong && DestClass == cInt) {
3544 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
3548 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
3549 // move out of AX or AL.
3550 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
3551 && SrcClass > DestClass) {
3552 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
3553 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
3554 BuildMI(*BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
3558 // Handle casts from integer to floating point now...
3559 if (DestClass == cFP) {
3560 // Promote the integer to a type supported by FLD. We do this because there
3561 // are no unsigned FLD instructions, so we must promote an unsigned value to
3562 // a larger signed value, then use FLD on the larger value.
3564 const Type *PromoteType = 0;
3565 unsigned PromoteOpcode = 0;
3566 unsigned RealDestReg = DestReg;
3567 switch (SrcTy->getTypeID()) {
3568 case Type::BoolTyID:
3569 case Type::SByteTyID:
3570 // We don't have the facilities for directly loading byte sized data from
3571 // memory (even signed). Promote it to 16 bits.
3572 PromoteType = Type::ShortTy;
3573 PromoteOpcode = X86::MOVSX16rr8;
3575 case Type::UByteTyID:
3576 PromoteType = Type::ShortTy;
3577 PromoteOpcode = X86::MOVZX16rr8;
3579 case Type::UShortTyID:
3580 PromoteType = Type::IntTy;
3581 PromoteOpcode = X86::MOVZX32rr16;
3583 case Type::ULongTyID:
3584 case Type::UIntTyID:
3585 // Don't fild into the read destination.
3586 DestReg = makeAnotherReg(Type::DoubleTy);
3588 default: // No promotion needed...
3593 unsigned TmpReg = makeAnotherReg(PromoteType);
3594 BuildMI(*BB, IP, PromoteOpcode, 1, TmpReg).addReg(SrcReg);
3595 SrcTy = PromoteType;
3596 SrcClass = getClass(PromoteType);
3600 // Spill the integer to memory and reload it from there...
3602 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
3604 if (SrcClass == cLong) {
3605 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3606 FrameIdx).addReg(SrcReg);
3607 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3608 FrameIdx, 4).addReg(SrcReg+1);
3610 static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
3611 addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
3612 FrameIdx).addReg(SrcReg);
3615 static const unsigned Op2[] =
3616 { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
3617 addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
3619 if (SrcTy == Type::UIntTy) {
3620 // If this is a cast from uint -> double, we need to be careful about if
3621 // the "sign" bit is set. If so, we don't want to make a negative number,
3622 // we want to make a positive number. Emit code to add an offset if the
3625 // Compute whether the sign bit is set by shifting the reg right 31 bits.
3626 unsigned IsNeg = makeAnotherReg(Type::IntTy);
3627 BuildMI(*BB, IP, X86::SHR32ri, 2, IsNeg).addReg(SrcReg).addImm(31);
3629 // Create a CP value that has the offset in one word and 0 in the other.
3630 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
3631 0x4f80000000000000ULL);
3632 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
3633 BuildMI(*BB, IP, X86::FADD32m, 5, RealDestReg).addReg(DestReg)
3634 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
3636 } else if (SrcTy == Type::ULongTy) {
3637 // We need special handling for unsigned 64-bit integer sources. If the
3638 // input number has the "sign bit" set, then we loaded it incorrectly as a
3639 // negative 64-bit number. In this case, add an offset value.
3641 // Emit a test instruction to see if the dynamic input value was signed.
3642 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
3644 // If the sign bit is set, get a pointer to an offset, otherwise get a
3645 // pointer to a zero.
3646 MachineConstantPool *CP = F->getConstantPool();
3647 unsigned Zero = makeAnotherReg(Type::IntTy);
3648 Constant *Null = Constant::getNullValue(Type::UIntTy);
3649 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
3650 CP->getConstantPoolIndex(Null));
3651 unsigned Offset = makeAnotherReg(Type::IntTy);
3652 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
3654 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
3655 CP->getConstantPoolIndex(OffsetCst));
3656 unsigned Addr = makeAnotherReg(Type::IntTy);
3657 BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
3659 // Load the constant for an add. FIXME: this could make an 'fadd' that
3660 // reads directly from memory, but we don't support these yet.
3661 unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
3662 addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
3664 BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
3665 .addReg(ConstReg).addReg(DestReg);
3671 // Handle casts from floating point to integer now...
3672 if (SrcClass == cFP) {
3673 // Change the floating point control register to use "round towards zero"
3674 // mode when truncating to an integer value.
3676 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
3677 addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
3679 // Load the old value of the high byte of the control word...
3680 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
3681 addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
3684 // Set the high part to be round to zero...
3685 addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
3686 CWFrameIdx, 1).addImm(12);
3688 // Reload the modified control word now...
3689 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3691 // Restore the memory image of control word to original value
3692 addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
3693 CWFrameIdx, 1).addReg(HighPartOfCW);
3695 // We don't have the facilities for directly storing byte sized data to
3696 // memory. Promote it to 16 bits. We also must promote unsigned values to
3697 // larger classes because we only have signed FP stores.
3698 unsigned StoreClass = DestClass;
3699 const Type *StoreTy = DestTy;
3700 if (StoreClass == cByte || DestTy->isUnsigned())
3701 switch (StoreClass) {
3702 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
3703 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
3704 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
3705 // The following treatment of cLong may not be perfectly right,
3706 // but it survives chains of casts of the form
3707 // double->ulong->double.
3708 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
3709 default: assert(0 && "Unknown store class!");
3712 // Spill the integer to memory and reload it from there...
3714 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
3716 static const unsigned Op1[] =
3717 { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
3718 addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
3719 FrameIdx).addReg(SrcReg);
3721 if (DestClass == cLong) {
3722 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
3723 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
3726 static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
3727 addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
3730 // Reload the original control word now...
3731 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3735 // Anything we haven't handled already, we can't (yet) handle at all.
3736 assert(0 && "Unhandled cast instruction!");
3740 void X86ISel::visitVAArgInst(VAArgInst &I) {
3741 unsigned VAListPtr = getReg(I.getOperand(0));
3742 unsigned DestReg = getReg(I);
3743 unsigned VAList = makeAnotherReg(Type::IntTy);
3744 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, VAList), VAListPtr);
3746 switch (I.getType()->getTypeID()) {
3749 assert(0 && "Error: bad type for va_next instruction!");
3751 case Type::PointerTyID:
3752 case Type::UIntTyID:
3755 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3757 case Type::ULongTyID:
3758 case Type::LongTyID:
3760 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3761 addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
3763 case Type::DoubleTyID:
3765 addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
3768 // Increment the VAList pointer...
3769 unsigned NP = makeAnotherReg(Type::IntTy);
3770 BuildMI(BB, X86::ADD32ri, 2, NP).addReg(VAList).addSImm(Size);
3771 addDirectMem(BuildMI(BB, X86::MOV32rm, 5), VAListPtr).addReg(VAList);
3774 /// visitGetElementPtrInst - instruction-select GEP instructions
3776 void X86ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
3777 // If this GEP instruction will be folded into all of its users, we don't need
3778 // to explicitly calculate it!
3780 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), AM)) {
3781 // Check all of the users of the instruction to see if they are loads and
3783 bool AllWillFold = true;
3784 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI)
3785 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Load)
3786 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Store ||
3787 cast<Instruction>(*UI)->getOperand(0) == &I) {
3788 AllWillFold = false;
3792 // If the instruction is foldable, and will be folded into all users, don't
3794 if (AllWillFold) return;
3797 unsigned outputReg = getReg(I);
3798 emitGEPOperation(BB, BB->end(), I.getOperand(0),
3799 I.op_begin()+1, I.op_end(), outputReg);
3802 /// getGEPIndex - Inspect the getelementptr operands specified with GEPOps and
3803 /// GEPTypes (the derived types being stepped through at each level). On return
3804 /// from this function, if some indexes of the instruction are representable as
3805 /// an X86 lea instruction, the machine operands are put into the Ops
3806 /// instruction and the consumed indexes are poped from the GEPOps/GEPTypes
3807 /// lists. Otherwise, GEPOps.size() is returned. If this returns a an
3808 /// addressing mode that only partially consumes the input, the BaseReg input of
3809 /// the addressing mode must be left free.
3811 /// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
3813 void X86ISel::getGEPIndex(MachineBasicBlock *MBB,
3814 MachineBasicBlock::iterator IP,
3815 std::vector<Value*> &GEPOps,
3816 std::vector<const Type*> &GEPTypes,
3817 X86AddressMode &AM) {
3818 const TargetData &TD = TM.getTargetData();
3820 // Clear out the state we are working with...
3821 AM.BaseType = X86AddressMode::RegBase;
3822 AM.Base.Reg = 0; // No base register
3823 AM.Scale = 1; // Unit scale
3824 AM.IndexReg = 0; // No index register
3825 AM.Disp = 0; // No displacement
3827 // While there are GEP indexes that can be folded into the current address,
3828 // keep processing them.
3829 while (!GEPTypes.empty()) {
3830 if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
3831 // It's a struct access. CUI is the index into the structure,
3832 // which names the field. This index must have unsigned type.
3833 const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
3835 // Use the TargetData structure to pick out what the layout of the
3836 // structure is in memory. Since the structure index must be constant, we
3837 // can get its value and use it to find the right byte offset from the
3838 // StructLayout class's list of structure member offsets.
3839 AM.Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
3840 GEPOps.pop_back(); // Consume a GEP operand
3841 GEPTypes.pop_back();
3843 // It's an array or pointer access: [ArraySize x ElementType].
3844 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3845 Value *idx = GEPOps.back();
3847 // idx is the index into the array. Unlike with structure
3848 // indices, we may not know its actual value at code-generation
3851 // If idx is a constant, fold it into the offset.
3852 unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
3853 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
3854 AM.Disp += TypeSize*CSI->getValue();
3855 } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
3856 AM.Disp += TypeSize*CUI->getValue();
3858 // If the index reg is already taken, we can't handle this index.
3859 if (AM.IndexReg) return;
3861 // If this is a size that we can handle, then add the index as
3863 case 1: case 2: case 4: case 8:
3864 // These are all acceptable scales on X86.
3865 AM.Scale = TypeSize;
3868 // Otherwise, we can't handle this scale
3872 if (CastInst *CI = dyn_cast<CastInst>(idx))
3873 if (CI->getOperand(0)->getType() == Type::IntTy ||
3874 CI->getOperand(0)->getType() == Type::UIntTy)
3875 idx = CI->getOperand(0);
3877 AM.IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
3880 GEPOps.pop_back(); // Consume a GEP operand
3881 GEPTypes.pop_back();
3885 // GEPTypes is empty, which means we have a single operand left. Set it as
3886 // the base register.
3888 assert(AM.Base.Reg == 0);
3890 if (AllocaInst *AI = dyn_castFixedAlloca(GEPOps.back())) {
3891 AM.BaseType = X86AddressMode::FrameIndexBase;
3892 AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
3897 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps.back())) {
3903 AM.Base.Reg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
3904 GEPOps.pop_back(); // Consume the last GEP operand
3908 /// isGEPFoldable - Return true if the specified GEP can be completely
3909 /// folded into the addressing mode of a load/store or lea instruction.
3910 bool X86ISel::isGEPFoldable(MachineBasicBlock *MBB,
3911 Value *Src, User::op_iterator IdxBegin,
3912 User::op_iterator IdxEnd, X86AddressMode &AM) {
3914 std::vector<Value*> GEPOps;
3915 GEPOps.resize(IdxEnd-IdxBegin+1);
3917 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3919 std::vector<const Type*>
3920 GEPTypes(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3921 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3923 MachineBasicBlock::iterator IP;
3924 if (MBB) IP = MBB->end();
3925 getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
3927 // We can fold it away iff the getGEPIndex call eliminated all operands.
3928 return GEPOps.empty();
3931 void X86ISel::emitGEPOperation(MachineBasicBlock *MBB,
3932 MachineBasicBlock::iterator IP,
3933 Value *Src, User::op_iterator IdxBegin,
3934 User::op_iterator IdxEnd, unsigned TargetReg) {
3935 const TargetData &TD = TM.getTargetData();
3937 // If this is a getelementptr null, with all constant integer indices, just
3938 // replace it with TargetReg = 42.
3939 if (isa<ConstantPointerNull>(Src)) {
3940 User::op_iterator I = IdxBegin;
3941 for (; I != IdxEnd; ++I)
3942 if (!isa<ConstantInt>(*I))
3944 if (I == IdxEnd) { // All constant indices
3945 unsigned Offset = TD.getIndexedOffset(Src->getType(),
3946 std::vector<Value*>(IdxBegin, IdxEnd));
3947 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addImm(Offset);
3952 std::vector<Value*> GEPOps;
3953 GEPOps.resize(IdxEnd-IdxBegin+1);
3955 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3957 std::vector<const Type*> GEPTypes;
3958 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3959 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3961 // Keep emitting instructions until we consume the entire GEP instruction.
3962 while (!GEPOps.empty()) {
3963 unsigned OldSize = GEPOps.size();
3965 getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
3967 if (GEPOps.size() != OldSize) {
3968 // getGEPIndex consumed some of the input. Build an LEA instruction here.
3969 unsigned NextTarget = 0;
3970 if (!GEPOps.empty()) {
3971 assert(AM.Base.Reg == 0 &&
3972 "getGEPIndex should have left the base register open for chaining!");
3973 NextTarget = AM.Base.Reg = makeAnotherReg(Type::UIntTy);
3976 if (AM.BaseType == X86AddressMode::RegBase &&
3977 AM.IndexReg == 0 && AM.Disp == 0 && !AM.GV)
3978 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(AM.Base.Reg);
3979 else if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0 &&
3980 AM.IndexReg == 0 && AM.Disp == 0)
3981 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(AM.GV);
3983 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg), AM);
3985 TargetReg = NextTarget;
3986 } else if (GEPTypes.empty()) {
3987 // The getGEPIndex operation didn't want to build an LEA. Check to see if
3988 // all operands are consumed but the base pointer. If so, just load it
3989 // into the register.
3990 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
3991 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
3993 unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
3994 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3996 break; // we are now done
3999 // It's an array or pointer access: [ArraySize x ElementType].
4000 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
4001 Value *idx = GEPOps.back();
4002 GEPOps.pop_back(); // Consume a GEP operand
4003 GEPTypes.pop_back();
4005 // Many GEP instructions use a [cast (int/uint) to LongTy] as their
4006 // operand on X86. Handle this case directly now...
4007 if (CastInst *CI = dyn_cast<CastInst>(idx))
4008 if (CI->getOperand(0)->getType() == Type::IntTy ||
4009 CI->getOperand(0)->getType() == Type::UIntTy)
4010 idx = CI->getOperand(0);
4012 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
4013 // must find the size of the pointed-to type (Not coincidentally, the next
4014 // type is the type of the elements in the array).
4015 const Type *ElTy = SqTy->getElementType();
4016 unsigned elementSize = TD.getTypeSize(ElTy);
4018 // If idxReg is a constant, we don't need to perform the multiply!
4019 if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) {
4020 if (!CSI->isNullValue()) {
4021 unsigned Offset = elementSize*CSI->getRawValue();
4022 unsigned Reg = makeAnotherReg(Type::UIntTy);
4023 BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
4024 .addReg(Reg).addImm(Offset);
4025 --IP; // Insert the next instruction before this one.
4026 TargetReg = Reg; // Codegen the rest of the GEP into this
4028 } else if (elementSize == 1) {
4029 // If the element size is 1, we don't have to multiply, just add
4030 unsigned idxReg = getReg(idx, MBB, IP);
4031 unsigned Reg = makeAnotherReg(Type::UIntTy);
4032 BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
4033 --IP; // Insert the next instruction before this one.
4034 TargetReg = Reg; // Codegen the rest of the GEP into this
4036 unsigned idxReg = getReg(idx, MBB, IP);
4037 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
4039 // Make sure we can back the iterator up to point to the first
4040 // instruction emitted.
4041 MachineBasicBlock::iterator BeforeIt = IP;
4042 if (IP == MBB->begin())
4043 BeforeIt = MBB->end();
4046 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
4048 // Emit an ADD to add OffsetReg to the basePtr.
4049 unsigned Reg = makeAnotherReg(Type::UIntTy);
4050 BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
4051 .addReg(Reg).addReg(OffsetReg);
4053 // Step to the first instruction of the multiply.
4054 if (BeforeIt == MBB->end())
4059 TargetReg = Reg; // Codegen the rest of the GEP into this
4065 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
4066 /// frame manager, otherwise do it the hard way.
4068 void X86ISel::visitAllocaInst(AllocaInst &I) {
4069 // If this is a fixed size alloca in the entry block for the function, we
4070 // statically stack allocate the space, so we don't need to do anything here.
4072 if (dyn_castFixedAlloca(&I)) return;
4074 // Find the data size of the alloca inst's getAllocatedType.
4075 const Type *Ty = I.getAllocatedType();
4076 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
4078 // Create a register to hold the temporary result of multiplying the type size
4079 // constant by the variable amount.
4080 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
4081 unsigned SrcReg1 = getReg(I.getArraySize());
4083 // TotalSizeReg = mul <numelements>, <TypeSize>
4084 MachineBasicBlock::iterator MBBI = BB->end();
4085 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
4087 // AddedSize = add <TotalSizeReg>, 15
4088 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
4089 BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
4091 // AlignedSize = and <AddedSize>, ~15
4092 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
4093 BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
4095 // Subtract size from stack pointer, thereby allocating some space.
4096 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
4098 // Put a pointer to the space into the result register, by copying
4099 // the stack pointer.
4100 BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
4102 // Inform the Frame Information that we have just allocated a variable-sized
4104 F->getFrameInfo()->CreateVariableSizedObject();
4107 /// visitMallocInst - Malloc instructions are code generated into direct calls
4108 /// to the library malloc.
4110 void X86ISel::visitMallocInst(MallocInst &I) {
4111 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
4114 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
4115 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
4117 Arg = makeAnotherReg(Type::UIntTy);
4118 unsigned Op0Reg = getReg(I.getOperand(0));
4119 MachineBasicBlock::iterator MBBI = BB->end();
4120 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
4123 std::vector<ValueRecord> Args;
4124 Args.push_back(ValueRecord(Arg, Type::UIntTy));
4125 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
4126 1).addExternalSymbol("malloc", true);
4127 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
4131 /// visitFreeInst - Free instructions are code gen'd to call the free libc
4134 void X86ISel::visitFreeInst(FreeInst &I) {
4135 std::vector<ValueRecord> Args;
4136 Args.push_back(ValueRecord(I.getOperand(0)));
4137 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
4138 1).addExternalSymbol("free", true);
4139 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
4142 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
4143 /// into a machine code representation is a very simple peep-hole fashion. The
4144 /// generated code sucks but the implementation is nice and simple.
4146 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
4147 return new X86ISel(TM);