1 //===-- X86ISelSimple.cpp - A simple instruction selector for x86 ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/Pass.h"
22 #include "llvm/CodeGen/IntrinsicLowering.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/SSARegMap.h"
27 #include "llvm/Target/MRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/GetElementPtrTypeIterator.h"
30 #include "llvm/Support/InstVisitor.h"
31 #include "llvm/ADT/Statistic.h"
36 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
38 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
42 cByte, cShort, cInt, cFP, cLong
46 /// getClass - Turn a primitive type into a "class" number which is based on the
47 /// size of the type, and whether or not it is floating point.
49 static inline TypeClass getClass(const Type *Ty) {
50 switch (Ty->getTypeID()) {
52 case Type::UByteTyID: return cByte; // Byte operands are class #0
54 case Type::UShortTyID: return cShort; // Short operands are class #1
57 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
60 case Type::DoubleTyID: return cFP; // Floating Point is #3
63 case Type::ULongTyID: return cLong; // Longs are class #4
65 assert(0 && "Invalid type to getClass!");
66 return cByte; // not reached
70 // getClassB - Just like getClass, but treat boolean values as bytes.
71 static inline TypeClass getClassB(const Type *Ty) {
72 if (Ty == Type::BoolTy) return cByte;
77 struct X86ISel : public FunctionPass, InstVisitor<X86ISel> {
79 MachineFunction *F; // The function we are compiling into
80 MachineBasicBlock *BB; // The current MBB we are compiling
81 int VarArgsFrameIndex; // FrameIndex for start of varargs area
82 int ReturnAddressIndex; // FrameIndex for the return address
84 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
86 // MBBMap - Mapping between LLVM BB -> Machine BB
87 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
89 // AllocaMap - Mapping from fixed sized alloca instructions to the
90 // FrameIndex for the alloca.
91 std::map<AllocaInst*, unsigned> AllocaMap;
93 X86ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
95 /// runOnFunction - Top level implementation of instruction selection for
96 /// the entire function.
98 bool runOnFunction(Function &Fn) {
99 // First pass over the function, lower any unknown intrinsic functions
100 // with the IntrinsicLowering class.
101 LowerUnknownIntrinsicFunctionCalls(Fn);
103 F = &MachineFunction::construct(&Fn, TM);
105 // Create all of the machine basic blocks for the function...
106 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
107 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
111 // Set up a frame object for the return address. This is used by the
112 // llvm.returnaddress & llvm.frameaddress intrinisics.
113 ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
115 // Copy incoming arguments off of the stack...
116 LoadArgumentsToVirtualRegs(Fn);
118 // Instruction select everything except PHI nodes
121 // Select the PHI nodes
124 // Insert the FP_REG_KILL instructions into blocks that need them.
131 // We always build a machine code representation for the function
135 virtual const char *getPassName() const {
136 return "X86 Simple Instruction Selection";
139 /// visitBasicBlock - This method is called when we are visiting a new basic
140 /// block. This simply creates a new MachineBasicBlock to emit code into
141 /// and adds it to the current MachineFunction. Subsequent visit* for
142 /// instructions will be invoked for all instructions in the basic block.
144 void visitBasicBlock(BasicBlock &LLVM_BB) {
145 BB = MBBMap[&LLVM_BB];
148 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
149 /// function, lowering any calls to unknown intrinsic functions into the
150 /// equivalent LLVM code.
152 void LowerUnknownIntrinsicFunctionCalls(Function &F);
154 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
155 /// from the stack into virtual registers.
157 void LoadArgumentsToVirtualRegs(Function &F);
159 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
160 /// because we have to generate our sources into the source basic blocks,
161 /// not the current one.
163 void SelectPHINodes();
165 /// InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks
166 /// that need them. This only occurs due to the floating point stackifier
167 /// not being aggressive enough to handle arbitrary global stackification.
169 void InsertFPRegKills();
171 // Visitation methods for various instructions. These methods simply emit
172 // fixed X86 code for each instruction.
175 // Control flow operators
176 void visitReturnInst(ReturnInst &RI);
177 void visitBranchInst(BranchInst &BI);
178 void visitUnreachableInst(UnreachableInst &UI) {}
184 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
185 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
187 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
188 const std::vector<ValueRecord> &Args);
189 void visitCallInst(CallInst &I);
190 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
192 // Arithmetic operators
193 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
194 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
195 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
196 void visitMul(BinaryOperator &B);
198 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
199 void visitRem(BinaryOperator &B) { visitDivRem(B); }
200 void visitDivRem(BinaryOperator &B);
203 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
204 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
205 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
207 // Comparison operators...
208 void visitSetCondInst(SetCondInst &I);
209 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
210 MachineBasicBlock *MBB,
211 MachineBasicBlock::iterator MBBI);
212 void visitSelectInst(SelectInst &SI);
215 // Memory Instructions
216 void visitLoadInst(LoadInst &I);
217 void visitStoreInst(StoreInst &I);
218 void visitGetElementPtrInst(GetElementPtrInst &I);
219 void visitAllocaInst(AllocaInst &I);
220 void visitMallocInst(MallocInst &I);
221 void visitFreeInst(FreeInst &I);
224 void visitShiftInst(ShiftInst &I);
225 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
226 void visitCastInst(CastInst &I);
227 void visitVANextInst(VANextInst &I);
228 void visitVAArgInst(VAArgInst &I);
230 void visitInstruction(Instruction &I) {
231 std::cerr << "Cannot instruction select: " << I;
235 /// promote32 - Make a value 32-bits wide, and put it somewhere.
237 void promote32(unsigned targetReg, const ValueRecord &VR);
239 /// getAddressingMode - Get the addressing mode to use to address the
240 /// specified value. The returned value should be used with addFullAddress.
241 void getAddressingMode(Value *Addr, X86AddressMode &AM);
244 /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
246 void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
247 std::vector<Value*> &GEPOps,
248 std::vector<const Type*> &GEPTypes,
251 /// isGEPFoldable - Return true if the specified GEP can be completely
252 /// folded into the addressing mode of a load/store or lea instruction.
253 bool isGEPFoldable(MachineBasicBlock *MBB,
254 Value *Src, User::op_iterator IdxBegin,
255 User::op_iterator IdxEnd, X86AddressMode &AM);
257 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
258 /// constant expression GEP support.
260 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
261 Value *Src, User::op_iterator IdxBegin,
262 User::op_iterator IdxEnd, unsigned TargetReg);
264 /// emitCastOperation - Common code shared between visitCastInst and
265 /// constant expression cast support.
267 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
268 Value *Src, const Type *DestTy, unsigned TargetReg);
270 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
271 /// and constant expression support.
273 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
274 MachineBasicBlock::iterator IP,
275 Value *Op0, Value *Op1,
276 unsigned OperatorClass, unsigned TargetReg);
278 /// emitBinaryFPOperation - This method handles emission of floating point
279 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
280 void emitBinaryFPOperation(MachineBasicBlock *BB,
281 MachineBasicBlock::iterator IP,
282 Value *Op0, Value *Op1,
283 unsigned OperatorClass, unsigned TargetReg);
285 void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
286 Value *Op0, Value *Op1, unsigned TargetReg);
288 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
289 unsigned DestReg, const Type *DestTy,
290 unsigned Op0Reg, unsigned Op1Reg);
291 void doMultiplyConst(MachineBasicBlock *MBB,
292 MachineBasicBlock::iterator MBBI,
293 unsigned DestReg, const Type *DestTy,
294 unsigned Op0Reg, unsigned Op1Val);
296 void emitDivRemOperation(MachineBasicBlock *BB,
297 MachineBasicBlock::iterator IP,
298 Value *Op0, Value *Op1, bool isDiv,
301 /// emitSetCCOperation - Common code shared between visitSetCondInst and
302 /// constant expression support.
304 void emitSetCCOperation(MachineBasicBlock *BB,
305 MachineBasicBlock::iterator IP,
306 Value *Op0, Value *Op1, unsigned Opcode,
309 /// emitShiftOperation - Common code shared between visitShiftInst and
310 /// constant expression support.
312 void emitShiftOperation(MachineBasicBlock *MBB,
313 MachineBasicBlock::iterator IP,
314 Value *Op, Value *ShiftAmount, bool isLeftShift,
315 const Type *ResultTy, unsigned DestReg);
317 /// emitSelectOperation - Common code shared between visitSelectInst and the
318 /// constant expression support.
319 void emitSelectOperation(MachineBasicBlock *MBB,
320 MachineBasicBlock::iterator IP,
321 Value *Cond, Value *TrueVal, Value *FalseVal,
324 /// copyConstantToRegister - Output the instructions required to put the
325 /// specified constant into the specified register.
327 void copyConstantToRegister(MachineBasicBlock *MBB,
328 MachineBasicBlock::iterator MBBI,
329 Constant *C, unsigned Reg);
331 void emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
332 unsigned LHS, unsigned RHS);
334 /// makeAnotherReg - This method returns the next register number we haven't
337 /// Long values are handled somewhat specially. They are always allocated
338 /// as pairs of 32 bit integer values. The register number returned is the
339 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
340 /// of the long value.
342 unsigned makeAnotherReg(const Type *Ty) {
343 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
344 "Current target doesn't have X86 reg info??");
345 const X86RegisterInfo *MRI =
346 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
347 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
348 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
349 // Create the lower part
350 F->getSSARegMap()->createVirtualRegister(RC);
351 // Create the upper part.
352 return F->getSSARegMap()->createVirtualRegister(RC)-1;
355 // Add the mapping of regnumber => reg class to MachineFunction
356 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
357 return F->getSSARegMap()->createVirtualRegister(RC);
360 /// getReg - This method turns an LLVM value into a register number.
362 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
363 unsigned getReg(Value *V) {
364 // Just append to the end of the current bb.
365 MachineBasicBlock::iterator It = BB->end();
366 return getReg(V, BB, It);
368 unsigned getReg(Value *V, MachineBasicBlock *MBB,
369 MachineBasicBlock::iterator IPt);
371 /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
372 /// that is to be statically allocated with the initial stack frame
374 unsigned getFixedSizedAllocaFI(AllocaInst *AI);
378 /// dyn_castFixedAlloca - If the specified value is a fixed size alloca
379 /// instruction in the entry block, return it. Otherwise, return a null
381 static AllocaInst *dyn_castFixedAlloca(Value *V) {
382 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
383 BasicBlock *BB = AI->getParent();
384 if (isa<ConstantUInt>(AI->getArraySize()) && BB ==&BB->getParent()->front())
390 /// getReg - This method turns an LLVM value into a register number.
392 unsigned X86ISel::getReg(Value *V, MachineBasicBlock *MBB,
393 MachineBasicBlock::iterator IPt) {
394 // If this operand is a constant, emit the code to copy the constant into
395 // the register here...
396 if (Constant *C = dyn_cast<Constant>(V)) {
397 unsigned Reg = makeAnotherReg(V->getType());
398 copyConstantToRegister(MBB, IPt, C, Reg);
400 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
401 // Do not emit noop casts at all, unless it's a double -> float cast.
402 if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()) &&
403 (CI->getType() != Type::FloatTy ||
404 CI->getOperand(0)->getType() != Type::DoubleTy))
405 return getReg(CI->getOperand(0), MBB, IPt);
406 } else if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
407 // If the alloca address couldn't be folded into the instruction addressing,
408 // emit an explicit LEA as appropriate.
409 unsigned Reg = makeAnotherReg(V->getType());
410 unsigned FI = getFixedSizedAllocaFI(AI);
411 addFrameReference(BuildMI(*MBB, IPt, X86::LEA32r, 4, Reg), FI);
415 unsigned &Reg = RegMap[V];
417 Reg = makeAnotherReg(V->getType());
424 /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca
425 /// that is to be statically allocated with the initial stack frame
427 unsigned X86ISel::getFixedSizedAllocaFI(AllocaInst *AI) {
428 // Already computed this?
429 std::map<AllocaInst*, unsigned>::iterator I = AllocaMap.lower_bound(AI);
430 if (I != AllocaMap.end() && I->first == AI) return I->second;
432 const Type *Ty = AI->getAllocatedType();
433 ConstantUInt *CUI = cast<ConstantUInt>(AI->getArraySize());
434 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
435 TySize *= CUI->getValue(); // Get total allocated size...
436 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
438 // Create a new stack object using the frame manager...
439 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
440 AllocaMap.insert(I, std::make_pair(AI, FrameIdx));
445 /// copyConstantToRegister - Output the instructions required to put the
446 /// specified constant into the specified register.
448 void X86ISel::copyConstantToRegister(MachineBasicBlock *MBB,
449 MachineBasicBlock::iterator IP,
450 Constant *C, unsigned R) {
451 if (isa<UndefValue>(C)) {
452 switch (getClassB(C->getType())) {
454 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
455 BuildMI(*MBB, IP, X86::FLD0, 0, R);
458 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, R+1);
461 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, R);
464 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
466 switch (CE->getOpcode()) {
467 case Instruction::GetElementPtr:
468 emitGEPOperation(MBB, IP, CE->getOperand(0),
469 CE->op_begin()+1, CE->op_end(), R);
471 case Instruction::Cast:
472 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
475 case Instruction::Xor: ++Class; // FALL THROUGH
476 case Instruction::Or: ++Class; // FALL THROUGH
477 case Instruction::And: ++Class; // FALL THROUGH
478 case Instruction::Sub: ++Class; // FALL THROUGH
479 case Instruction::Add:
480 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
484 case Instruction::Mul:
485 emitMultiply(MBB, IP, CE->getOperand(0), CE->getOperand(1), R);
488 case Instruction::Div:
489 case Instruction::Rem:
490 emitDivRemOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
491 CE->getOpcode() == Instruction::Div, R);
494 case Instruction::SetNE:
495 case Instruction::SetEQ:
496 case Instruction::SetLT:
497 case Instruction::SetGT:
498 case Instruction::SetLE:
499 case Instruction::SetGE:
500 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
504 case Instruction::Shl:
505 case Instruction::Shr:
506 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
507 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
510 case Instruction::Select:
511 emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
512 CE->getOperand(2), R);
516 std::cerr << "Offending expr: " << *C << "\n";
517 assert(0 && "Constant expression not yet handled!\n");
521 if (C->getType()->isIntegral()) {
522 unsigned Class = getClassB(C->getType());
524 if (Class == cLong) {
525 // Copy the value into the register pair.
526 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
527 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
528 BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
532 assert(Class <= cInt && "Type not handled yet!");
534 static const unsigned IntegralOpcodeTab[] = {
535 X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
538 if (C->getType() == Type::BoolTy) {
539 BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
541 ConstantInt *CI = cast<ConstantInt>(C);
542 BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
544 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
545 if (CFP->isExactlyValue(+0.0))
546 BuildMI(*MBB, IP, X86::FLD0, 0, R);
547 else if (CFP->isExactlyValue(+1.0))
548 BuildMI(*MBB, IP, X86::FLD1, 0, R);
550 // Otherwise we need to spill the constant to memory...
551 MachineConstantPool *CP = F->getConstantPool();
552 unsigned CPI = CP->getConstantPoolIndex(CFP);
553 const Type *Ty = CFP->getType();
555 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
556 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
557 addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
560 } else if (isa<ConstantPointerNull>(C)) {
561 // Copy zero (null pointer) to the register.
562 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
563 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
564 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(GV);
566 std::cerr << "Offending constant: " << *C << "\n";
567 assert(0 && "Type not handled yet!");
571 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
572 /// the stack into virtual registers.
574 void X86ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
575 // Emit instructions to load the arguments... On entry to a function on the
576 // X86, the stack frame looks like this:
578 // [ESP] -- return address
579 // [ESP + 4] -- first argument (leftmost lexically)
580 // [ESP + 8] -- second argument, if first argument is four bytes in size
583 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
584 MachineFrameInfo *MFI = F->getFrameInfo();
586 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
587 bool ArgLive = !I->use_empty();
588 unsigned Reg = ArgLive ? getReg(*I) : 0;
589 int FI; // Frame object index
591 switch (getClassB(I->getType())) {
594 FI = MFI->CreateFixedObject(1, ArgOffset);
595 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
600 FI = MFI->CreateFixedObject(2, ArgOffset);
601 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
606 FI = MFI->CreateFixedObject(4, ArgOffset);
607 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
612 FI = MFI->CreateFixedObject(8, ArgOffset);
613 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
614 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
616 ArgOffset += 4; // longs require 4 additional bytes
621 if (I->getType() == Type::FloatTy) {
622 Opcode = X86::FLD32m;
623 FI = MFI->CreateFixedObject(4, ArgOffset);
625 Opcode = X86::FLD64m;
626 FI = MFI->CreateFixedObject(8, ArgOffset);
628 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
630 if (I->getType() == Type::DoubleTy)
631 ArgOffset += 4; // doubles require 4 additional bytes
634 assert(0 && "Unhandled argument type!");
636 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
639 // If the function takes variable number of arguments, add a frame offset for
640 // the start of the first vararg value... this is used to expand
642 if (Fn.getFunctionType()->isVarArg())
643 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
647 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
648 /// because we have to generate our sources into the source basic blocks, not
651 void X86ISel::SelectPHINodes() {
652 const TargetInstrInfo &TII = *TM.getInstrInfo();
653 const Function &LF = *F->getFunction(); // The LLVM function...
654 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
655 const BasicBlock *BB = I;
656 MachineBasicBlock &MBB = *MBBMap[I];
658 // Loop over all of the PHI nodes in the LLVM basic block...
659 MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
660 for (BasicBlock::const_iterator I = BB->begin(); isa<PHINode>(I); ++I) {
661 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I));
663 // Create a new machine instr PHI node, and insert it.
664 unsigned PHIReg = getReg(*PN);
665 MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint,
666 X86::PHI, PN->getNumOperands(), PHIReg);
668 MachineInstr *LongPhiMI = 0;
669 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy)
670 LongPhiMI = BuildMI(MBB, PHIInsertPoint,
671 X86::PHI, PN->getNumOperands(), PHIReg+1);
673 // PHIValues - Map of blocks to incoming virtual registers. We use this
674 // so that we only initialize one incoming value for a particular block,
675 // even if the block has multiple entries in the PHI node.
677 std::map<MachineBasicBlock*, unsigned> PHIValues;
679 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
680 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
682 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
683 PHIValues.lower_bound(PredMBB);
685 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
686 // We already inserted an initialization of the register for this
687 // predecessor. Recycle it.
688 ValReg = EntryIt->second;
691 // Get the incoming value into a virtual register.
693 Value *Val = PN->getIncomingValue(i);
695 // If this is a constant or GlobalValue, we may have to insert code
696 // into the basic block to compute it into a virtual register.
697 if ((isa<Constant>(Val) && !isa<ConstantExpr>(Val))) {
698 // Simple constants get emitted at the end of the basic block,
699 // before any terminator instructions. We "know" that the code to
700 // move a constant into a register will never clobber any flags.
701 ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
703 // Because we don't want to clobber any values which might be in
704 // physical registers with the computation of this constant (which
705 // might be arbitrarily complex if it is a constant expression),
706 // just insert the computation at the top of the basic block.
707 MachineBasicBlock::iterator PI = PredMBB->begin();
709 // Skip over any PHI nodes though!
710 while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
713 ValReg = getReg(Val, PredMBB, PI);
716 // Remember that we inserted a value for this PHI for this predecessor
717 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
720 PhiMI->addRegOperand(ValReg);
721 PhiMI->addMachineBasicBlockOperand(PredMBB);
723 LongPhiMI->addRegOperand(ValReg+1);
724 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
728 // Now that we emitted all of the incoming values for the PHI node, make
729 // sure to reposition the InsertPoint after the PHI that we just added.
730 // This is needed because we might have inserted a constant into this
731 // block, right after the PHI's which is before the old insert point!
732 PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI;
738 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
739 /// compensation code on critical edges. As such, it requires that we kill all
740 /// FP registers on the exit from any blocks that either ARE critical edges, or
741 /// branch to a block that has incoming critical edges.
743 /// Note that this kill instruction will eventually be eliminated when
744 /// restrictions in the stackifier are relaxed.
746 static bool RequiresFPRegKill(const MachineBasicBlock *MBB) {
748 const BasicBlock *BB = MBB->getBasicBlock ();
749 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
750 const BasicBlock *Succ = *SI;
751 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
752 ++PI; // Block have at least one predecessory
753 if (PI != PE) { // If it has exactly one, this isn't crit edge
754 // If this block has more than one predecessor, check all of the
755 // predecessors to see if they have multiple successors. If so, then the
756 // block we are analyzing needs an FPRegKill.
757 for (PI = pred_begin(Succ); PI != PE; ++PI) {
758 const BasicBlock *Pred = *PI;
759 succ_const_iterator SI2 = succ_begin(Pred);
760 ++SI2; // There must be at least one successor of this block.
761 if (SI2 != succ_end(Pred))
762 return true; // Yes, we must insert the kill on this edge.
766 // If we got this far, there is no need to insert the kill instruction.
773 // InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks that
774 // need them. This only occurs due to the floating point stackifier not being
775 // aggressive enough to handle arbitrary global stackification.
777 // Currently we insert an FP_REG_KILL instruction into each block that uses or
778 // defines a floating point virtual register.
780 // When the global register allocators (like linear scan) finally update live
781 // variable analysis, we can keep floating point values in registers across
782 // portions of the CFG that do not involve critical edges. This will be a big
783 // win, but we are waiting on the global allocators before we can do this.
785 // With a bit of work, the floating point stackifier pass can be enhanced to
786 // break critical edges as needed (to make a place to put compensation code),
787 // but this will require some infrastructure improvements as well.
789 void X86ISel::InsertFPRegKills() {
790 SSARegMap &RegMap = *F->getSSARegMap();
792 for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
793 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
794 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
795 MachineOperand& MO = I->getOperand(i);
796 if (MO.isRegister() && MO.getReg()) {
797 unsigned Reg = MO.getReg();
798 if (MRegisterInfo::isVirtualRegister(Reg))
799 if (RegMap.getRegClass(Reg)->getSize() == 10)
803 // If we haven't found an FP register use or def in this basic block, check
804 // to see if any of our successors has an FP PHI node, which will cause a
805 // copy to be inserted into this block.
806 for (MachineBasicBlock::const_succ_iterator SI = BB->succ_begin(),
807 SE = BB->succ_end(); SI != SE; ++SI) {
808 MachineBasicBlock *SBB = *SI;
809 for (MachineBasicBlock::iterator I = SBB->begin();
810 I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
811 if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
817 // Okay, this block uses an FP register. If the block has successors (ie,
818 // it's not an unwind/return), insert the FP_REG_KILL instruction.
819 if (BB->succ_size () && RequiresFPRegKill(BB)) {
820 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
827 void X86ISel::getAddressingMode(Value *Addr, X86AddressMode &AM) {
828 AM.BaseType = X86AddressMode::RegBase;
829 AM.Base.Reg = 0; AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
830 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
831 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
834 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
835 if (CE->getOpcode() == Instruction::GetElementPtr)
836 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
839 } else if (AllocaInst *AI = dyn_castFixedAlloca(Addr)) {
840 AM.BaseType = X86AddressMode::FrameIndexBase;
841 AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
843 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
848 // If it's not foldable, reset addr mode.
849 AM.BaseType = X86AddressMode::RegBase;
850 AM.Base.Reg = getReg(Addr);
851 AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
854 // canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
855 // it into the conditional branch or select instruction which is the only user
856 // of the cc instruction. This is the case if the conditional branch is the
857 // only user of the setcc. We also don't handle long arguments below, so we
858 // reject them here as well.
860 static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
861 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
862 if (SCI->hasOneUse()) {
863 Instruction *User = cast<Instruction>(SCI->use_back());
864 if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
865 (getClassB(SCI->getOperand(0)->getType()) != cLong ||
866 SCI->getOpcode() == Instruction::SetEQ ||
867 SCI->getOpcode() == Instruction::SetNE) &&
868 (isa<BranchInst>(User) || User->getOperand(0) == V))
874 // Return a fixed numbering for setcc instructions which does not depend on the
875 // order of the opcodes.
877 static unsigned getSetCCNumber(unsigned Opcode) {
879 default: assert(0 && "Unknown setcc instruction!");
880 case Instruction::SetEQ: return 0;
881 case Instruction::SetNE: return 1;
882 case Instruction::SetLT: return 2;
883 case Instruction::SetGE: return 3;
884 case Instruction::SetGT: return 4;
885 case Instruction::SetLE: return 5;
889 // LLVM -> X86 signed X86 unsigned
890 // ----- ---------- ------------
891 // seteq -> sete sete
892 // setne -> setne setne
893 // setlt -> setl setb
894 // setge -> setge setae
895 // setgt -> setg seta
896 // setle -> setle setbe
898 // sets // Used by comparison with 0 optimization
900 static const unsigned SetCCOpcodeTab[2][8] = {
901 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
903 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
904 X86::SETSr, X86::SETNSr },
907 /// emitUCOMr - In the future when we support processors before the P6, this
908 /// wraps the logic for emitting an FUCOMr vs FUCOMIr.
909 void X86ISel::emitUCOMr(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
910 unsigned LHS, unsigned RHS) {
911 if (0) { // for processors prior to the P6
912 BuildMI(*MBB, IP, X86::FUCOMr, 2).addReg(LHS).addReg(RHS);
913 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
914 BuildMI(*MBB, IP, X86::SAHF, 1);
916 BuildMI(*MBB, IP, X86::FUCOMIr, 2).addReg(LHS).addReg(RHS);
920 // EmitComparison - This function emits a comparison of the two operands,
921 // returning the extended setcc code to use.
922 unsigned X86ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
923 MachineBasicBlock *MBB,
924 MachineBasicBlock::iterator IP) {
925 // The arguments are already supposed to be of the same type.
926 const Type *CompTy = Op0->getType();
927 unsigned Class = getClassB(CompTy);
929 // Special case handling of: cmp R, i
930 if (isa<ConstantPointerNull>(Op1)) {
931 unsigned Op0r = getReg(Op0, MBB, IP);
932 if (OpNum < 2) // seteq/setne -> test
933 BuildMI(*MBB, IP, X86::TEST32rr, 2).addReg(Op0r).addReg(Op0r);
935 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(0);
938 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
939 if (Class == cByte || Class == cShort || Class == cInt) {
940 unsigned Op1v = CI->getRawValue();
942 // Mask off any upper bits of the constant, if there are any...
943 Op1v &= (1ULL << (8 << Class)) - 1;
945 // If this is a comparison against zero, emit more efficient code. We
946 // can't handle unsigned comparisons against zero unless they are == or
947 // !=. These should have been strength reduced already anyway.
948 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
950 // If this is a comparison against zero and the LHS is an and of a
951 // register with a constant, use the test to do the and.
952 if (Instruction *Op0I = dyn_cast<Instruction>(Op0))
953 if (Op0I->getOpcode() == Instruction::And && Op0->hasOneUse() &&
954 isa<ConstantInt>(Op0I->getOperand(1))) {
955 static const unsigned TESTTab[] = {
956 X86::TEST8ri, X86::TEST16ri, X86::TEST32ri
960 unsigned LHS = getReg(Op0I->getOperand(0), MBB, IP);
962 cast<ConstantInt>(Op0I->getOperand(1))->getRawValue();
963 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(LHS).addImm(Imm);
965 if (OpNum == 2) return 6; // Map jl -> js
966 if (OpNum == 3) return 7; // Map jg -> jns
970 unsigned Op0r = getReg(Op0, MBB, IP);
971 static const unsigned TESTTab[] = {
972 X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
974 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
976 if (OpNum == 2) return 6; // Map jl -> js
977 if (OpNum == 3) return 7; // Map jg -> jns
981 static const unsigned CMPTab[] = {
982 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
985 unsigned Op0r = getReg(Op0, MBB, IP);
986 BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
989 unsigned Op0r = getReg(Op0, MBB, IP);
990 assert(Class == cLong && "Unknown integer class!");
991 unsigned LowCst = CI->getRawValue();
992 unsigned HiCst = CI->getRawValue() >> 32;
993 if (OpNum < 2) { // seteq, setne
994 unsigned LoTmp = Op0r;
996 LoTmp = makeAnotherReg(Type::IntTy);
997 BuildMI(*MBB, IP, X86::XOR32ri, 2, LoTmp).addReg(Op0r).addImm(LowCst);
999 unsigned HiTmp = Op0r+1;
1001 HiTmp = makeAnotherReg(Type::IntTy);
1002 BuildMI(*MBB, IP, X86::XOR32ri, 2,HiTmp).addReg(Op0r+1).addImm(HiCst);
1004 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
1005 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
1008 // Emit a sequence of code which compares the high and low parts once
1009 // each, then uses a conditional move to handle the overflow case. For
1010 // example, a setlt for long would generate code like this:
1012 // AL = lo(op1) < lo(op2) // Always unsigned comparison
1013 // BL = hi(op1) < hi(op2) // Signedness depends on operands
1014 // dest = hi(op1) == hi(op2) ? BL : AL;
1017 // FIXME: This would be much better if we had hierarchical register
1018 // classes! Until then, hardcode registers so that we can deal with
1019 // their aliases (because we don't have conditional byte moves).
1021 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(LowCst);
1022 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
1023 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r+1).addImm(HiCst);
1024 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0,X86::BL);
1025 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
1026 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
1027 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
1029 // NOTE: visitSetCondInst knows that the value is dumped into the BL
1030 // register at this point for long values...
1036 unsigned Op0r = getReg(Op0, MBB, IP);
1038 // Special case handling of comparison against +/- 0.0
1039 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
1040 if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
1041 BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
1042 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
1043 BuildMI(*MBB, IP, X86::SAHF, 1);
1047 unsigned Op1r = getReg(Op1, MBB, IP);
1049 default: assert(0 && "Unknown type class!");
1050 // Emit: cmp <var1>, <var2> (do the comparison). We can
1051 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
1054 BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
1057 BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
1060 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
1063 emitUCOMr(MBB, IP, Op0r, Op1r);
1067 if (OpNum < 2) { // seteq, setne
1068 unsigned LoTmp = makeAnotherReg(Type::IntTy);
1069 unsigned HiTmp = makeAnotherReg(Type::IntTy);
1070 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
1071 BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
1072 BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
1073 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
1074 break; // Allow the sete or setne to be generated from flags set by OR
1076 // Emit a sequence of code which compares the high and low parts once
1077 // each, then uses a conditional move to handle the overflow case. For
1078 // example, a setlt for long would generate code like this:
1080 // AL = lo(op1) < lo(op2) // Signedness depends on operands
1081 // BL = hi(op1) < hi(op2) // Always unsigned comparison
1082 // dest = hi(op1) == hi(op2) ? BL : AL;
1085 // FIXME: This would be much better if we had hierarchical register
1086 // classes! Until then, hardcode registers so that we can deal with their
1087 // aliases (because we don't have conditional byte moves).
1089 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
1090 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
1091 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
1092 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
1093 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
1094 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
1095 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
1097 // NOTE: visitSetCondInst knows that the value is dumped into the BL
1098 // register at this point for long values...
1105 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
1106 /// register, then move it to wherever the result should be.
1108 void X86ISel::visitSetCondInst(SetCondInst &I) {
1109 if (canFoldSetCCIntoBranchOrSelect(&I))
1110 return; // Fold this into a branch or select.
1112 unsigned DestReg = getReg(I);
1113 MachineBasicBlock::iterator MII = BB->end();
1114 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
1118 /// emitSetCCOperation - Common code shared between visitSetCondInst and
1119 /// constant expression support.
1121 void X86ISel::emitSetCCOperation(MachineBasicBlock *MBB,
1122 MachineBasicBlock::iterator IP,
1123 Value *Op0, Value *Op1, unsigned Opcode,
1124 unsigned TargetReg) {
1125 unsigned OpNum = getSetCCNumber(Opcode);
1126 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
1128 const Type *CompTy = Op0->getType();
1129 unsigned CompClass = getClassB(CompTy);
1130 bool isSigned = CompTy->isSigned() && CompClass != cFP;
1132 if (CompClass != cLong || OpNum < 2) {
1133 // Handle normal comparisons with a setcc instruction...
1134 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
1136 // Handle long comparisons by copying the value which is already in BL into
1137 // the register we want...
1138 BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
1142 void X86ISel::visitSelectInst(SelectInst &SI) {
1143 unsigned DestReg = getReg(SI);
1144 MachineBasicBlock::iterator MII = BB->end();
1145 emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
1146 SI.getFalseValue(), DestReg);
1149 /// emitSelect - Common code shared between visitSelectInst and the constant
1150 /// expression support.
1151 void X86ISel::emitSelectOperation(MachineBasicBlock *MBB,
1152 MachineBasicBlock::iterator IP,
1153 Value *Cond, Value *TrueVal, Value *FalseVal,
1155 unsigned SelectClass = getClassB(TrueVal->getType());
1157 // We don't support 8-bit conditional moves. If we have incoming constants,
1158 // transform them into 16-bit constants to avoid having a run-time conversion.
1159 if (SelectClass == cByte) {
1160 if (Constant *T = dyn_cast<Constant>(TrueVal))
1161 TrueVal = ConstantExpr::getCast(T, Type::ShortTy);
1162 if (Constant *F = dyn_cast<Constant>(FalseVal))
1163 FalseVal = ConstantExpr::getCast(F, Type::ShortTy);
1166 unsigned TrueReg = getReg(TrueVal, MBB, IP);
1167 unsigned FalseReg = getReg(FalseVal, MBB, IP);
1168 if (TrueReg == FalseReg) {
1169 static const unsigned Opcode[] = {
1170 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
1172 BuildMI(*MBB, IP, Opcode[SelectClass], 1, DestReg).addReg(TrueReg);
1173 if (SelectClass == cLong)
1174 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(TrueReg+1);
1179 if (SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(Cond)) {
1180 // We successfully folded the setcc into the select instruction.
1182 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1183 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), MBB,
1186 const Type *CompTy = SCI->getOperand(0)->getType();
1187 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1189 // LLVM -> X86 signed X86 unsigned
1190 // ----- ---------- ------------
1191 // seteq -> cmovNE cmovNE
1192 // setne -> cmovE cmovE
1193 // setlt -> cmovGE cmovAE
1194 // setge -> cmovL cmovB
1195 // setgt -> cmovLE cmovBE
1196 // setle -> cmovG cmovA
1198 // cmovNS // Used by comparison with 0 optimization
1201 switch (SelectClass) {
1202 default: assert(0 && "Unknown value class!");
1204 // Annoyingly, we don't have a full set of floating point conditional
1206 static const unsigned OpcodeTab[2][8] = {
1207 { X86::FCMOVNE, X86::FCMOVE, X86::FCMOVAE, X86::FCMOVB,
1208 X86::FCMOVBE, X86::FCMOVA, 0, 0 },
1209 { X86::FCMOVNE, X86::FCMOVE, 0, 0, 0, 0, 0, 0 },
1211 Opcode = OpcodeTab[isSigned][OpNum];
1213 // If opcode == 0, we hit a case that we don't support. Output a setcc
1214 // and compare the result against zero.
1216 unsigned CompClass = getClassB(CompTy);
1218 if (CompClass != cLong || OpNum < 2) {
1219 CondReg = makeAnotherReg(Type::BoolTy);
1220 // Handle normal comparisons with a setcc instruction...
1221 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, CondReg);
1223 // Long comparisons end up in the BL register.
1227 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1228 Opcode = X86::FCMOVE;
1234 static const unsigned OpcodeTab[2][8] = {
1235 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVAE16rr, X86::CMOVB16rr,
1236 X86::CMOVBE16rr, X86::CMOVA16rr, 0, 0 },
1237 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVGE16rr, X86::CMOVL16rr,
1238 X86::CMOVLE16rr, X86::CMOVG16rr, X86::CMOVNS16rr, X86::CMOVS16rr },
1240 Opcode = OpcodeTab[isSigned][OpNum];
1245 static const unsigned OpcodeTab[2][8] = {
1246 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVAE32rr, X86::CMOVB32rr,
1247 X86::CMOVBE32rr, X86::CMOVA32rr, 0, 0 },
1248 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVGE32rr, X86::CMOVL32rr,
1249 X86::CMOVLE32rr, X86::CMOVG32rr, X86::CMOVNS32rr, X86::CMOVS32rr },
1251 Opcode = OpcodeTab[isSigned][OpNum];
1256 // Get the value being branched on, and use it to set the condition codes.
1257 unsigned CondReg = getReg(Cond, MBB, IP);
1258 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1259 switch (SelectClass) {
1260 default: assert(0 && "Unknown value class!");
1261 case cFP: Opcode = X86::FCMOVE; break;
1263 case cShort: Opcode = X86::CMOVE16rr; break;
1265 case cLong: Opcode = X86::CMOVE32rr; break;
1269 unsigned RealDestReg = DestReg;
1272 // Annoyingly enough, X86 doesn't HAVE 8-bit conditional moves. Because of
1273 // this, we have to promote the incoming values to 16 bits, perform a 16-bit
1274 // cmove, then truncate the result.
1275 if (SelectClass == cByte) {
1276 DestReg = makeAnotherReg(Type::ShortTy);
1277 if (getClassB(TrueVal->getType()) == cByte) {
1278 // Promote the true value, by storing it into AL, and reading from AX.
1279 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::AL).addReg(TrueReg);
1280 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::AH).addImm(0);
1281 TrueReg = makeAnotherReg(Type::ShortTy);
1282 BuildMI(*MBB, IP, X86::MOV16rr, 1, TrueReg).addReg(X86::AX);
1284 if (getClassB(FalseVal->getType()) == cByte) {
1285 // Promote the true value, by storing it into CL, and reading from CX.
1286 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(FalseReg);
1287 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::CH).addImm(0);
1288 FalseReg = makeAnotherReg(Type::ShortTy);
1289 BuildMI(*MBB, IP, X86::MOV16rr, 1, FalseReg).addReg(X86::CX);
1293 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(TrueReg).addReg(FalseReg);
1295 switch (SelectClass) {
1297 // We did the computation with 16-bit registers. Truncate back to our
1298 // result by copying into AX then copying out AL.
1299 BuildMI(*MBB, IP, X86::MOV16rr, 1, X86::AX).addReg(DestReg);
1300 BuildMI(*MBB, IP, X86::MOV8rr, 1, RealDestReg).addReg(X86::AL);
1303 // Move the upper half of the value as well.
1304 BuildMI(*MBB, IP, Opcode, 2,DestReg+1).addReg(TrueReg+1).addReg(FalseReg+1);
1311 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
1312 /// operand, in the specified target register.
1314 void X86ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
1315 bool isUnsigned = VR.Ty->isUnsigned() || VR.Ty == Type::BoolTy;
1317 Value *Val = VR.Val;
1318 const Type *Ty = VR.Ty;
1320 if (Constant *C = dyn_cast<Constant>(Val)) {
1321 Val = ConstantExpr::getCast(C, Type::IntTy);
1325 // If this is a simple constant, just emit a MOVri directly to avoid the
1327 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
1328 int TheVal = CI->getRawValue() & 0xFFFFFFFF;
1329 BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
1334 // Make sure we have the register number for this value...
1335 unsigned Reg = Val ? getReg(Val) : VR.Reg;
1337 switch (getClassB(Ty)) {
1339 // Extend value into target register (8->32)
1341 BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
1343 BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
1346 // Extend value into target register (16->32)
1348 BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
1350 BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
1353 // Move value into target register (32->32)
1354 BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
1357 assert(0 && "Unpromotable operand class in promote32");
1361 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
1362 /// we have the following possibilities:
1364 /// ret void: No return value, simply emit a 'ret' instruction
1365 /// ret sbyte, ubyte : Extend value into EAX and return
1366 /// ret short, ushort: Extend value into EAX and return
1367 /// ret int, uint : Move value into EAX and return
1368 /// ret pointer : Move value into EAX and return
1369 /// ret long, ulong : Move value into EAX/EDX and return
1370 /// ret float/double : Top of FP stack
1372 void X86ISel::visitReturnInst(ReturnInst &I) {
1373 if (I.getNumOperands() == 0) {
1374 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
1378 Value *RetVal = I.getOperand(0);
1379 switch (getClassB(RetVal->getType())) {
1380 case cByte: // integral return values: extend or move into EAX and return
1383 promote32(X86::EAX, ValueRecord(RetVal));
1384 // Declare that EAX is live on exit
1385 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
1387 case cFP: { // Floats & Doubles: Return in ST(0)
1388 unsigned RetReg = getReg(RetVal);
1389 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
1390 // Declare that top-of-stack is live on exit
1391 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
1395 unsigned RetReg = getReg(RetVal);
1396 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
1397 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
1398 // Declare that EAX & EDX are live on exit
1399 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
1404 visitInstruction(I);
1406 // Emit a 'ret' instruction
1407 BuildMI(BB, X86::RET, 0);
1410 // getBlockAfter - Return the basic block which occurs lexically after the
1412 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
1413 Function::iterator I = BB; ++I; // Get iterator to next block
1414 return I != BB->getParent()->end() ? &*I : 0;
1417 /// visitBranchInst - Handle conditional and unconditional branches here. Note
1418 /// that since code layout is frozen at this point, that if we are trying to
1419 /// jump to a block that is the immediate successor of the current block, we can
1420 /// just make a fall-through (but we don't currently).
1422 void X86ISel::visitBranchInst(BranchInst &BI) {
1423 // Update machine-CFG edges
1424 BB->addSuccessor (MBBMap[BI.getSuccessor(0)]);
1425 if (BI.isConditional())
1426 BB->addSuccessor (MBBMap[BI.getSuccessor(1)]);
1428 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
1430 if (!BI.isConditional()) { // Unconditional branch?
1431 if (BI.getSuccessor(0) != NextBB)
1432 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1436 // See if we can fold the setcc into the branch itself...
1437 SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition());
1439 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
1440 // computed some other way...
1441 unsigned condReg = getReg(BI.getCondition());
1442 BuildMI(BB, X86::TEST8rr, 2).addReg(condReg).addReg(condReg);
1443 if (BI.getSuccessor(1) == NextBB) {
1444 if (BI.getSuccessor(0) != NextBB)
1445 BuildMI(BB, X86::JNE, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1447 BuildMI(BB, X86::JE, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
1449 if (BI.getSuccessor(0) != NextBB)
1450 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
1455 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1456 MachineBasicBlock::iterator MII = BB->end();
1457 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
1459 const Type *CompTy = SCI->getOperand(0)->getType();
1460 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1463 // LLVM -> X86 signed X86 unsigned
1464 // ----- ---------- ------------
1472 // js // Used by comparison with 0 optimization
1475 static const unsigned OpcodeTab[2][8] = {
1476 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
1477 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
1478 X86::JS, X86::JNS },
1481 if (BI.getSuccessor(0) != NextBB) {
1482 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
1483 .addMBB(MBBMap[BI.getSuccessor(0)]);
1484 if (BI.getSuccessor(1) != NextBB)
1485 BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
1487 // Change to the inverse condition...
1488 if (BI.getSuccessor(1) != NextBB) {
1490 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
1491 .addMBB(MBBMap[BI.getSuccessor(1)]);
1497 /// doCall - This emits an abstract call instruction, setting up the arguments
1498 /// and the return value as appropriate. For the actual function call itself,
1499 /// it inserts the specified CallMI instruction into the stream.
1501 void X86ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1502 const std::vector<ValueRecord> &Args) {
1503 // Count how many bytes are to be pushed on the stack...
1504 unsigned NumBytes = 0;
1506 if (!Args.empty()) {
1507 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1508 switch (getClassB(Args[i].Ty)) {
1509 case cByte: case cShort: case cInt:
1510 NumBytes += 4; break;
1512 NumBytes += 8; break;
1514 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1516 default: assert(0 && "Unknown class!");
1519 // Adjust the stack pointer for the new arguments...
1520 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(NumBytes);
1522 // Arguments go on the stack in reverse order, as specified by the ABI.
1523 unsigned ArgOffset = 0;
1524 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1526 switch (getClassB(Args[i].Ty)) {
1528 if (Args[i].Val && isa<ConstantBool>(Args[i].Val)) {
1529 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1530 .addImm(Args[i].Val == ConstantBool::True);
1535 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1536 // Zero/Sign extend constant, then stuff into memory.
1537 ConstantInt *Val = cast<ConstantInt>(Args[i].Val);
1538 Val = cast<ConstantInt>(ConstantExpr::getCast(Val, Type::IntTy));
1539 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1540 .addImm(Val->getRawValue() & 0xFFFFFFFF);
1542 // Promote arg to 32 bits wide into a temporary register...
1543 ArgReg = makeAnotherReg(Type::UIntTy);
1544 promote32(ArgReg, Args[i]);
1545 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1546 X86::ESP, ArgOffset).addReg(ArgReg);
1550 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1551 unsigned Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1552 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1553 X86::ESP, ArgOffset).addImm(Val);
1554 } else if (Args[i].Val && isa<ConstantPointerNull>(Args[i].Val)) {
1555 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1556 X86::ESP, ArgOffset).addImm(0);
1558 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1559 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1560 X86::ESP, ArgOffset).addReg(ArgReg);
1564 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1565 uint64_t Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1566 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1567 X86::ESP, ArgOffset).addImm(Val & ~0U);
1568 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1569 X86::ESP, ArgOffset+4).addImm(Val >> 32ULL);
1571 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1572 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1573 X86::ESP, ArgOffset).addReg(ArgReg);
1574 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1575 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1577 ArgOffset += 4; // 8 byte entry, not 4.
1581 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1582 if (Args[i].Ty == Type::FloatTy) {
1583 addRegOffset(BuildMI(BB, X86::FST32m, 5),
1584 X86::ESP, ArgOffset).addReg(ArgReg);
1586 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1587 addRegOffset(BuildMI(BB, X86::FST64m, 5),
1588 X86::ESP, ArgOffset).addReg(ArgReg);
1589 ArgOffset += 4; // 8 byte entry, not 4.
1593 default: assert(0 && "Unknown class!");
1598 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(0);
1601 BB->push_back(CallMI);
1603 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addImm(NumBytes);
1605 // If there is a return value, scavenge the result from the location the call
1608 if (Ret.Ty != Type::VoidTy) {
1609 unsigned DestClass = getClassB(Ret.Ty);
1610 switch (DestClass) {
1614 // Integral results are in %eax, or the appropriate portion
1616 static const unsigned regRegMove[] = {
1617 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
1619 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1620 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1623 case cFP: // Floating-point return values live in %ST(0)
1624 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1626 case cLong: // Long values are left in EDX:EAX
1627 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
1628 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
1630 default: assert(0 && "Unknown class!");
1636 /// visitCallInst - Push args on stack and do a procedure call instruction.
1637 void X86ISel::visitCallInst(CallInst &CI) {
1638 MachineInstr *TheCall;
1639 if (Function *F = CI.getCalledFunction()) {
1640 // Is it an intrinsic function call?
1641 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1642 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1646 // Emit a CALL instruction with PC-relative displacement.
1647 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1648 } else { // Emit an indirect call...
1649 unsigned Reg = getReg(CI.getCalledValue());
1650 TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
1653 std::vector<ValueRecord> Args;
1654 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1655 Args.push_back(ValueRecord(CI.getOperand(i)));
1657 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1658 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1661 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1662 /// function, lowering any calls to unknown intrinsic functions into the
1663 /// equivalent LLVM code.
1665 void X86ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1666 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1667 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1668 if (CallInst *CI = dyn_cast<CallInst>(I++))
1669 if (Function *F = CI->getCalledFunction())
1670 switch (F->getIntrinsicID()) {
1671 case Intrinsic::not_intrinsic:
1672 case Intrinsic::vastart:
1673 case Intrinsic::vacopy:
1674 case Intrinsic::vaend:
1675 case Intrinsic::returnaddress:
1676 case Intrinsic::frameaddress:
1677 case Intrinsic::memcpy:
1678 case Intrinsic::memset:
1679 case Intrinsic::isunordered:
1680 case Intrinsic::readport:
1681 case Intrinsic::writeport:
1682 // We directly implement these intrinsics
1684 case Intrinsic::readio: {
1685 // On X86, memory operations are in-order. Lower this intrinsic
1686 // into a volatile load.
1687 Instruction *Before = CI->getPrev();
1688 LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI);
1689 CI->replaceAllUsesWith(LI);
1690 BB->getInstList().erase(CI);
1693 case Intrinsic::writeio: {
1694 // On X86, memory operations are in-order. Lower this intrinsic
1695 // into a volatile store.
1696 Instruction *Before = CI->getPrev();
1697 StoreInst *LI = new StoreInst(CI->getOperand(1),
1698 CI->getOperand(2), true, CI);
1699 CI->replaceAllUsesWith(LI);
1700 BB->getInstList().erase(CI);
1704 // All other intrinsic calls we must lower.
1705 Instruction *Before = CI->getPrev();
1706 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1707 if (Before) { // Move iterator to instruction after call
1715 void X86ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1716 unsigned TmpReg1, TmpReg2;
1718 case Intrinsic::vastart:
1719 // Get the address of the first vararg value...
1720 TmpReg1 = getReg(CI);
1721 addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
1724 case Intrinsic::vacopy:
1725 TmpReg1 = getReg(CI);
1726 TmpReg2 = getReg(CI.getOperand(1));
1727 BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
1729 case Intrinsic::vaend: return; // Noop on X86
1731 case Intrinsic::returnaddress:
1732 case Intrinsic::frameaddress:
1733 TmpReg1 = getReg(CI);
1734 if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
1735 if (ID == Intrinsic::returnaddress) {
1736 // Just load the return address
1737 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
1738 ReturnAddressIndex);
1740 addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
1741 ReturnAddressIndex, -4);
1744 // Values other than zero are not implemented yet.
1745 BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
1749 case Intrinsic::isunordered:
1750 TmpReg1 = getReg(CI.getOperand(1));
1751 TmpReg2 = getReg(CI.getOperand(2));
1752 emitUCOMr(BB, BB->end(), TmpReg2, TmpReg1);
1753 TmpReg2 = getReg(CI);
1754 BuildMI(BB, X86::SETPr, 0, TmpReg2);
1757 case Intrinsic::memcpy: {
1758 assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
1760 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1761 Align = AlignC->getRawValue();
1762 if (Align == 0) Align = 1;
1765 // Turn the byte code into # iterations
1768 switch (Align & 3) {
1769 case 2: // WORD aligned
1770 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1771 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1773 CountReg = makeAnotherReg(Type::IntTy);
1774 unsigned ByteReg = getReg(CI.getOperand(3));
1775 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1777 Opcode = X86::REP_MOVSW;
1779 case 0: // DWORD aligned
1780 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1781 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1783 CountReg = makeAnotherReg(Type::IntTy);
1784 unsigned ByteReg = getReg(CI.getOperand(3));
1785 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1787 Opcode = X86::REP_MOVSD;
1789 default: // BYTE aligned
1790 CountReg = getReg(CI.getOperand(3));
1791 Opcode = X86::REP_MOVSB;
1795 // No matter what the alignment is, we put the source in ESI, the
1796 // destination in EDI, and the count in ECX.
1797 TmpReg1 = getReg(CI.getOperand(1));
1798 TmpReg2 = getReg(CI.getOperand(2));
1799 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1800 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1801 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
1802 BuildMI(BB, Opcode, 0);
1805 case Intrinsic::memset: {
1806 assert(CI.getNumOperands() == 5 && "Illegal llvm.memset call!");
1808 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1809 Align = AlignC->getRawValue();
1810 if (Align == 0) Align = 1;
1813 // Turn the byte code into # iterations
1816 if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
1817 unsigned Val = ValC->getRawValue() & 255;
1819 // If the value is a constant, then we can potentially use larger copies.
1820 switch (Align & 3) {
1821 case 2: // WORD aligned
1822 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1823 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1825 CountReg = makeAnotherReg(Type::IntTy);
1826 unsigned ByteReg = getReg(CI.getOperand(3));
1827 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1829 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
1830 Opcode = X86::REP_STOSW;
1832 case 0: // DWORD aligned
1833 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1834 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1836 CountReg = makeAnotherReg(Type::IntTy);
1837 unsigned ByteReg = getReg(CI.getOperand(3));
1838 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1840 Val = (Val << 8) | Val;
1841 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
1842 Opcode = X86::REP_STOSD;
1844 default: // BYTE aligned
1845 CountReg = getReg(CI.getOperand(3));
1846 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
1847 Opcode = X86::REP_STOSB;
1851 // If it's not a constant value we are storing, just fall back. We could
1852 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
1853 unsigned ValReg = getReg(CI.getOperand(2));
1854 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1855 CountReg = getReg(CI.getOperand(3));
1856 Opcode = X86::REP_STOSB;
1859 // No matter what the alignment is, we put the source in ESI, the
1860 // destination in EDI, and the count in ECX.
1861 TmpReg1 = getReg(CI.getOperand(1));
1862 //TmpReg2 = getReg(CI.getOperand(2));
1863 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1864 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1865 BuildMI(BB, Opcode, 0);
1869 case Intrinsic::readport: {
1870 // First, determine that the size of the operand falls within the acceptable
1871 // range for this architecture.
1873 if (getClassB(CI.getOperand(1)->getType()) != cShort) {
1874 std::cerr << "llvm.readport: Address size is not 16 bits\n";
1878 // Now, move the I/O port address into the DX register and use the IN
1879 // instruction to get the input data.
1881 unsigned Class = getClass(CI.getCalledFunction()->getReturnType());
1882 unsigned DestReg = getReg(CI);
1884 // If the port is a single-byte constant, use the immediate form.
1885 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(1)))
1886 if ((C->getRawValue() & 255) == C->getRawValue()) {
1889 BuildMI(BB, X86::IN8ri, 1).addImm((unsigned char)C->getRawValue());
1890 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1893 BuildMI(BB, X86::IN16ri, 1).addImm((unsigned char)C->getRawValue());
1894 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AX);
1897 BuildMI(BB, X86::IN32ri, 1).addImm((unsigned char)C->getRawValue());
1898 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::EAX);
1903 unsigned Reg = getReg(CI.getOperand(1));
1904 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
1907 BuildMI(BB, X86::IN8rr, 0);
1908 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1911 BuildMI(BB, X86::IN16rr, 0);
1912 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AX);
1915 BuildMI(BB, X86::IN32rr, 0);
1916 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::EAX);
1919 std::cerr << "Cannot do input on this data type";
1925 case Intrinsic::writeport: {
1926 // First, determine that the size of the operand falls within the
1927 // acceptable range for this architecture.
1928 if (getClass(CI.getOperand(2)->getType()) != cShort) {
1929 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
1933 unsigned Class = getClassB(CI.getOperand(1)->getType());
1934 unsigned ValReg = getReg(CI.getOperand(1));
1937 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1940 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(ValReg);
1943 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(ValReg);
1946 std::cerr << "llvm.writeport: invalid data type for X86 target";
1951 // If the port is a single-byte constant, use the immediate form.
1952 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(2)))
1953 if ((C->getRawValue() & 255) == C->getRawValue()) {
1954 static const unsigned O[] = { X86::OUT8ir, X86::OUT16ir, X86::OUT32ir };
1955 BuildMI(BB, O[Class], 1).addImm((unsigned char)C->getRawValue());
1959 // Otherwise, move the I/O port address into the DX register and the value
1960 // to write into the AL/AX/EAX register.
1961 static const unsigned Opc[] = { X86::OUT8rr, X86::OUT16rr, X86::OUT32rr };
1962 unsigned Reg = getReg(CI.getOperand(2));
1963 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
1964 BuildMI(BB, Opc[Class], 0);
1968 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
1972 static bool isSafeToFoldLoadIntoInstruction(LoadInst &LI, Instruction &User) {
1973 if (LI.getParent() != User.getParent())
1975 BasicBlock::iterator It = &LI;
1976 // Check all of the instructions between the load and the user. We should
1977 // really use alias analysis here, but for now we just do something simple.
1978 for (++It; It != BasicBlock::iterator(&User); ++It) {
1979 switch (It->getOpcode()) {
1980 case Instruction::Free:
1981 case Instruction::Store:
1982 case Instruction::Call:
1983 case Instruction::Invoke:
1985 case Instruction::Load:
1986 if (cast<LoadInst>(It)->isVolatile() && LI.isVolatile())
1994 /// visitSimpleBinary - Implement simple binary operators for integral types...
1995 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1998 void X86ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1999 unsigned DestReg = getReg(B);
2000 MachineBasicBlock::iterator MI = BB->end();
2001 Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
2002 unsigned Class = getClassB(B.getType());
2004 // If this is AND X, C, and it is only used by a setcc instruction, it will
2005 // be folded. There is no need to emit this instruction.
2006 if (B.hasOneUse() && OperatorClass == 2 && isa<ConstantInt>(Op1))
2007 if (Class == cByte || Class == cShort || Class == cInt) {
2008 Instruction *Use = cast<Instruction>(B.use_back());
2009 if (isa<SetCondInst>(Use) &&
2010 Use->getOperand(1) == Constant::getNullValue(B.getType())) {
2011 switch (getSetCCNumber(Use->getOpcode())) {
2016 if (B.getType()->isSigned()) return;
2021 // Special case: op Reg, load [mem]
2022 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1) && Class != cLong &&
2024 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B))
2025 if (!B.swapOperands())
2026 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
2028 if (isa<LoadInst>(Op1) && Class != cLong && Op1->hasOneUse() &&
2029 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op1), B)) {
2033 static const unsigned OpcodeTab[][3] = {
2034 // Arithmetic operators
2035 { X86::ADD8rm, X86::ADD16rm, X86::ADD32rm }, // ADD
2036 { X86::SUB8rm, X86::SUB16rm, X86::SUB32rm }, // SUB
2038 // Bitwise operators
2039 { X86::AND8rm, X86::AND16rm, X86::AND32rm }, // AND
2040 { X86:: OR8rm, X86:: OR16rm, X86:: OR32rm }, // OR
2041 { X86::XOR8rm, X86::XOR16rm, X86::XOR32rm }, // XOR
2043 Opcode = OpcodeTab[OperatorClass][Class];
2045 static const unsigned OpcodeTab[][2] = {
2046 { X86::FADD32m, X86::FADD64m }, // ADD
2047 { X86::FSUB32m, X86::FSUB64m }, // SUB
2049 const Type *Ty = Op0->getType();
2050 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2051 Opcode = OpcodeTab[OperatorClass][Ty == Type::DoubleTy];
2054 unsigned Op0r = getReg(Op0);
2055 if (AllocaInst *AI =
2056 dyn_castFixedAlloca(cast<LoadInst>(Op1)->getOperand(0))) {
2057 unsigned FI = getFixedSizedAllocaFI(AI);
2058 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), FI);
2062 getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), AM);
2064 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), AM);
2069 // If this is a floating point subtract, check to see if we can fold the first
2071 if (Class == cFP && OperatorClass == 1 &&
2072 isa<LoadInst>(Op0) &&
2073 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B)) {
2074 const Type *Ty = Op0->getType();
2075 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2076 unsigned Opcode = Ty == Type::FloatTy ? X86::FSUBR32m : X86::FSUBR64m;
2078 unsigned Op1r = getReg(Op1);
2079 if (AllocaInst *AI =
2080 dyn_castFixedAlloca(cast<LoadInst>(Op0)->getOperand(0))) {
2081 unsigned FI = getFixedSizedAllocaFI(AI);
2082 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), FI);
2085 getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), AM);
2087 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), AM);
2092 emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
2096 /// emitBinaryFPOperation - This method handles emission of floating point
2097 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
2098 void X86ISel::emitBinaryFPOperation(MachineBasicBlock *BB,
2099 MachineBasicBlock::iterator IP,
2100 Value *Op0, Value *Op1,
2101 unsigned OperatorClass, unsigned DestReg) {
2102 // Special case: op Reg, <const fp>
2103 if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1))
2104 if (!Op1C->isExactlyValue(+0.0) && !Op1C->isExactlyValue(+1.0)) {
2105 // Create a constant pool entry for this constant.
2106 MachineConstantPool *CP = F->getConstantPool();
2107 unsigned CPI = CP->getConstantPoolIndex(Op1C);
2108 const Type *Ty = Op1->getType();
2110 static const unsigned OpcodeTab[][4] = {
2111 { X86::FADD32m, X86::FSUB32m, X86::FMUL32m, X86::FDIV32m }, // Float
2112 { X86::FADD64m, X86::FSUB64m, X86::FMUL64m, X86::FDIV64m }, // Double
2115 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
2116 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
2117 unsigned Op0r = getReg(Op0, BB, IP);
2118 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
2119 DestReg).addReg(Op0r), CPI);
2123 // Special case: R1 = op <const fp>, R2
2124 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
2125 if (CFP->isExactlyValue(-0.0) && OperatorClass == 1) {
2127 unsigned op1Reg = getReg(Op1, BB, IP);
2128 BuildMI(*BB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
2130 } else if (!CFP->isExactlyValue(+0.0) && !CFP->isExactlyValue(+1.0)) {
2131 // R1 = op CST, R2 --> R1 = opr R2, CST
2133 // Create a constant pool entry for this constant.
2134 MachineConstantPool *CP = F->getConstantPool();
2135 unsigned CPI = CP->getConstantPoolIndex(CFP);
2136 const Type *Ty = CFP->getType();
2138 static const unsigned OpcodeTab[][4] = {
2139 { X86::FADD32m, X86::FSUBR32m, X86::FMUL32m, X86::FDIVR32m }, // Float
2140 { X86::FADD64m, X86::FSUBR64m, X86::FMUL64m, X86::FDIVR64m }, // Double
2143 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2144 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
2145 unsigned Op1r = getReg(Op1, BB, IP);
2146 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
2147 DestReg).addReg(Op1r), CPI);
2152 static const unsigned OpcodeTab[4] = {
2153 X86::FpADD, X86::FpSUB, X86::FpMUL, X86::FpDIV
2156 unsigned Opcode = OpcodeTab[OperatorClass];
2157 unsigned Op0r = getReg(Op0, BB, IP);
2158 unsigned Op1r = getReg(Op1, BB, IP);
2159 BuildMI(*BB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
2162 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
2163 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
2166 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
2167 /// and constant expression support.
2169 void X86ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
2170 MachineBasicBlock::iterator IP,
2171 Value *Op0, Value *Op1,
2172 unsigned OperatorClass,
2174 unsigned Class = getClassB(Op0->getType());
2177 assert(OperatorClass < 2 && "No logical ops for FP!");
2178 emitBinaryFPOperation(MBB, IP, Op0, Op1, OperatorClass, DestReg);
2182 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
2183 if (OperatorClass == 1) {
2184 static unsigned const NEGTab[] = {
2185 X86::NEG8r, X86::NEG16r, X86::NEG32r, 0, X86::NEG32r
2188 // sub 0, X -> neg X
2189 if (CI->isNullValue()) {
2190 unsigned op1Reg = getReg(Op1, MBB, IP);
2191 BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
2193 if (Class == cLong) {
2194 // We just emitted: Dl = neg Sl
2195 // Now emit : T = addc Sh, 0
2197 unsigned T = makeAnotherReg(Type::IntTy);
2198 BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
2199 BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
2202 } else if (Op1->hasOneUse() && Class != cLong) {
2203 // sub C, X -> tmp = neg X; DestReg = add tmp, C. This is better
2204 // than copying C into a temporary register, because of register
2205 // pressure (tmp and destreg can share a register.
2206 static unsigned const ADDRITab[] = {
2207 X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri
2209 unsigned op1Reg = getReg(Op1, MBB, IP);
2210 unsigned Tmp = makeAnotherReg(Op0->getType());
2211 BuildMI(*MBB, IP, NEGTab[Class], 1, Tmp).addReg(op1Reg);
2212 BuildMI(*MBB, IP, ADDRITab[Class], 2,
2213 DestReg).addReg(Tmp).addImm(CI->getRawValue());
2218 // Special case: op Reg, <const int>
2219 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
2220 unsigned Op0r = getReg(Op0, MBB, IP);
2222 // xor X, -1 -> not X
2223 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
2224 static unsigned const NOTTab[] = {
2225 X86::NOT8r, X86::NOT16r, X86::NOT32r, 0, X86::NOT32r
2227 BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
2228 if (Class == cLong) // Invert the top part too
2229 BuildMI(*MBB, IP, X86::NOT32r, 1, DestReg+1).addReg(Op0r+1);
2233 // add X, -1 -> dec X
2234 if (OperatorClass == 0 && Op1C->isAllOnesValue() && Class != cLong) {
2235 // Note that we can't use dec for 64-bit decrements, because it does not
2236 // set the carry flag!
2237 static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
2238 BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
2242 // add X, 1 -> inc X
2243 if (OperatorClass == 0 && Op1C->equalsInt(1) && Class != cLong) {
2244 // Note that we can't use inc for 64-bit increments, because it does not
2245 // set the carry flag!
2246 static unsigned const INCTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
2247 BuildMI(*MBB, IP, INCTab[Class], 1, DestReg).addReg(Op0r);
2251 static const unsigned OpcodeTab[][5] = {
2252 // Arithmetic operators
2253 { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri }, // ADD
2254 { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, X86::SUB32ri }, // SUB
2256 // Bitwise operators
2257 { X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, X86::AND32ri }, // AND
2258 { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri, 0, X86::OR32ri }, // OR
2259 { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, X86::XOR32ri }, // XOR
2262 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2263 unsigned Op1l = cast<ConstantInt>(Op1C)->getRawValue();
2265 if (Class != cLong) {
2266 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2270 // If this is a long value and the high or low bits have a special
2271 // property, emit some special cases.
2272 unsigned Op1h = cast<ConstantInt>(Op1C)->getRawValue() >> 32LL;
2274 // If the constant is zero in the low 32-bits, just copy the low part
2275 // across and apply the normal 32-bit operation to the high parts. There
2276 // will be no carry or borrow into the top.
2278 if (OperatorClass != 2) // All but and...
2279 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0r);
2281 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2282 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg+1)
2283 .addReg(Op0r+1).addImm(Op1h);
2287 // If this is a logical operation and the top 32-bits are zero, just
2288 // operate on the lower 32.
2289 if (Op1h == 0 && OperatorClass > 1) {
2290 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg)
2291 .addReg(Op0r).addImm(Op1l);
2292 if (OperatorClass != 2) // All but and
2293 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(Op0r+1);
2295 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2299 // TODO: We could handle lots of other special cases here, such as AND'ing
2300 // with 0xFFFFFFFF00000000 -> noop, etc.
2302 // Otherwise, code generate the full operation with a constant.
2303 static const unsigned TopTab[] = {
2304 X86::ADC32ri, X86::SBB32ri, X86::AND32ri, X86::OR32ri, X86::XOR32ri
2307 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2308 BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1)
2309 .addReg(Op0r+1).addImm(Op1h);
2313 // Finally, handle the general case now.
2314 static const unsigned OpcodeTab[][5] = {
2315 // Arithmetic operators
2316 { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, 0, X86::ADD32rr }, // ADD
2317 { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, 0, X86::SUB32rr }, // SUB
2319 // Bitwise operators
2320 { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, X86::AND32rr }, // AND
2321 { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0, X86:: OR32rr }, // OR
2322 { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, X86::XOR32rr }, // XOR
2325 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2326 unsigned Op0r = getReg(Op0, MBB, IP);
2327 unsigned Op1r = getReg(Op1, MBB, IP);
2328 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
2330 if (Class == cLong) { // Handle the upper 32 bits of long values...
2331 static const unsigned TopTab[] = {
2332 X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
2334 BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
2335 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
2339 /// doMultiply - Emit appropriate instructions to multiply together the
2340 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
2341 /// result should be given as DestTy.
2343 void X86ISel::doMultiply(MachineBasicBlock *MBB,
2344 MachineBasicBlock::iterator MBBI,
2345 unsigned DestReg, const Type *DestTy,
2346 unsigned op0Reg, unsigned op1Reg) {
2347 unsigned Class = getClass(DestTy);
2351 BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
2352 .addReg(op0Reg).addReg(op1Reg);
2355 // Must use the MUL instruction, which forces use of AL...
2356 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
2357 BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
2358 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
2361 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
2365 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
2366 // returns zero when the input is not exactly a power of two.
2367 static unsigned ExactLog2(unsigned Val) {
2368 if (Val == 0 || (Val & (Val-1))) return 0;
2378 /// doMultiplyConst - This function is specialized to efficiently codegen an 8,
2379 /// 16, or 32-bit integer multiply by a constant.
2380 void X86ISel::doMultiplyConst(MachineBasicBlock *MBB,
2381 MachineBasicBlock::iterator IP,
2382 unsigned DestReg, const Type *DestTy,
2383 unsigned op0Reg, unsigned ConstRHS) {
2384 static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
2385 static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
2386 static const unsigned ADDrrTab[] = {X86::ADD8rr, X86::ADD16rr, X86::ADD32rr};
2387 static const unsigned NEGrTab[] = {X86::NEG8r , X86::NEG16r , X86::NEG32r };
2389 unsigned Class = getClass(DestTy);
2392 // Handle special cases here.
2395 TmpReg = makeAnotherReg(DestTy);
2396 BuildMI(*MBB, IP, NEGrTab[Class], 1, TmpReg).addReg(op0Reg);
2397 BuildMI(*MBB, IP, ADDrrTab[Class], 1,DestReg).addReg(TmpReg).addReg(TmpReg);
2400 BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(op0Reg);
2403 BuildMI(*MBB, IP, MOVriTab[Class], 1, DestReg).addImm(0);
2406 BuildMI(*MBB, IP, MOVrrTab[Class], 1, DestReg).addReg(op0Reg);
2409 BuildMI(*MBB, IP, ADDrrTab[Class], 1,DestReg).addReg(op0Reg).addReg(op0Reg);
2414 if (Class == cInt) {
2416 AM.BaseType = X86AddressMode::RegBase;
2417 AM.Base.Reg = op0Reg;
2418 AM.Scale = ConstRHS-1;
2419 AM.IndexReg = op0Reg;
2421 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg), AM);
2427 if (Class == cInt) {
2428 TmpReg = makeAnotherReg(DestTy);
2430 AM.BaseType = X86AddressMode::RegBase;
2431 AM.Base.Reg = op0Reg;
2432 AM.Scale = -ConstRHS-1;
2433 AM.IndexReg = op0Reg;
2435 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TmpReg), AM);
2436 BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(TmpReg);
2441 // If the element size is exactly a power of 2, use a shift to get it.
2442 if (unsigned Shift = ExactLog2(ConstRHS)) {
2444 default: assert(0 && "Unknown class for this function!");
2446 BuildMI(*MBB, IP, X86::SHL8ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2449 BuildMI(*MBB, IP, X86::SHL16ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2452 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2457 // If the element size is a negative power of 2, use a shift/neg to get it.
2458 if (unsigned Shift = ExactLog2(-ConstRHS)) {
2459 TmpReg = makeAnotherReg(DestTy);
2460 BuildMI(*MBB, IP, NEGrTab[Class], 1, TmpReg).addReg(op0Reg);
2462 default: assert(0 && "Unknown class for this function!");
2464 BuildMI(*MBB, IP, X86::SHL8ri,2, DestReg).addReg(TmpReg).addImm(Shift-1);
2467 BuildMI(*MBB, IP, X86::SHL16ri,2, DestReg).addReg(TmpReg).addImm(Shift-1);
2470 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(TmpReg).addImm(Shift-1);
2475 if (Class == cShort) {
2476 BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2478 } else if (Class == cInt) {
2479 BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2483 // Most general case, emit a normal multiply...
2484 TmpReg = makeAnotherReg(DestTy);
2485 BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
2487 // Emit a MUL to multiply the register holding the index by
2488 // elementSize, putting the result in OffsetReg.
2489 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
2492 /// visitMul - Multiplies are not simple binary operators because they must deal
2493 /// with the EAX register explicitly.
2495 void X86ISel::visitMul(BinaryOperator &I) {
2496 unsigned ResultReg = getReg(I);
2498 Value *Op0 = I.getOperand(0);
2499 Value *Op1 = I.getOperand(1);
2501 // Fold loads into floating point multiplies.
2502 if (getClass(Op0->getType()) == cFP) {
2503 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
2504 if (!I.swapOperands())
2505 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
2506 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2507 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2508 const Type *Ty = Op0->getType();
2509 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2510 unsigned Opcode = Ty == Type::FloatTy ? X86::FMUL32m : X86::FMUL64m;
2512 unsigned Op0r = getReg(Op0);
2513 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2514 unsigned FI = getFixedSizedAllocaFI(AI);
2515 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
2518 getAddressingMode(LI->getOperand(0), AM);
2520 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
2526 MachineBasicBlock::iterator IP = BB->end();
2527 emitMultiply(BB, IP, Op0, Op1, ResultReg);
2530 void X86ISel::emitMultiply(MachineBasicBlock *MBB,
2531 MachineBasicBlock::iterator IP,
2532 Value *Op0, Value *Op1, unsigned DestReg) {
2533 MachineBasicBlock &BB = *MBB;
2534 TypeClass Class = getClass(Op0->getType());
2536 // Simple scalar multiply?
2537 unsigned Op0Reg = getReg(Op0, &BB, IP);
2542 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2543 unsigned Val = (unsigned)CI->getRawValue(); // Isn't a 64-bit constant
2544 doMultiplyConst(&BB, IP, DestReg, Op0->getType(), Op0Reg, Val);
2546 unsigned Op1Reg = getReg(Op1, &BB, IP);
2547 doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg);
2551 emitBinaryFPOperation(MBB, IP, Op0, Op1, 2, DestReg);
2557 // Long value. We have to do things the hard way...
2558 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2559 unsigned CLow = CI->getRawValue();
2560 unsigned CHi = CI->getRawValue() >> 32;
2563 // If the low part of the constant is all zeros, things are simple.
2564 BuildMI(BB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2565 doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi);
2569 // Multiply the two low parts... capturing carry into EDX
2570 unsigned OverflowReg = 0;
2572 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
2574 unsigned Op1RegL = makeAnotherReg(Type::UIntTy);
2575 OverflowReg = makeAnotherReg(Type::UIntTy);
2576 BuildMI(BB, IP, X86::MOV32ri, 1, Op1RegL).addImm(CLow);
2577 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2578 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1RegL); // AL*BL
2580 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2581 BuildMI(BB, IP, X86::MOV32rr, 1,
2582 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2585 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2586 doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow);
2588 unsigned AHBLplusOverflowReg;
2590 AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2591 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2592 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2594 AHBLplusOverflowReg = AHBLReg;
2598 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(AHBLplusOverflowReg);
2600 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2601 doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi);
2603 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2604 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2609 // General 64x64 multiply
2611 unsigned Op1Reg = getReg(Op1, &BB, IP);
2612 // Multiply the two low parts... capturing carry into EDX
2613 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2614 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
2616 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
2617 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2618 BuildMI(BB, IP, X86::MOV32rr, 1,
2619 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2621 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2622 BuildMI(BB, IP, X86::IMUL32rr, 2,
2623 AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
2625 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2626 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2627 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2629 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2630 BuildMI(BB, IP, X86::IMUL32rr, 2,
2631 ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
2633 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2634 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2638 /// visitDivRem - Handle division and remainder instructions... these
2639 /// instruction both require the same instructions to be generated, they just
2640 /// select the result from a different register. Note that both of these
2641 /// instructions work differently for signed and unsigned operands.
2643 void X86ISel::visitDivRem(BinaryOperator &I) {
2644 unsigned ResultReg = getReg(I);
2645 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2647 // Fold loads into floating point divides.
2648 if (getClass(Op0->getType()) == cFP) {
2649 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2650 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2651 const Type *Ty = Op0->getType();
2652 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2653 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIV32m : X86::FDIV64m;
2655 unsigned Op0r = getReg(Op0);
2656 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2657 unsigned FI = getFixedSizedAllocaFI(AI);
2658 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
2661 getAddressingMode(LI->getOperand(0), AM);
2663 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
2668 if (LoadInst *LI = dyn_cast<LoadInst>(Op0))
2669 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2670 const Type *Ty = Op0->getType();
2671 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2672 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIVR32m : X86::FDIVR64m;
2674 unsigned Op1r = getReg(Op1);
2675 if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
2676 unsigned FI = getFixedSizedAllocaFI(AI);
2677 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), FI);
2680 getAddressingMode(LI->getOperand(0), AM);
2681 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), AM);
2688 MachineBasicBlock::iterator IP = BB->end();
2689 emitDivRemOperation(BB, IP, Op0, Op1,
2690 I.getOpcode() == Instruction::Div, ResultReg);
2693 void X86ISel::emitDivRemOperation(MachineBasicBlock *BB,
2694 MachineBasicBlock::iterator IP,
2695 Value *Op0, Value *Op1, bool isDiv,
2696 unsigned ResultReg) {
2697 const Type *Ty = Op0->getType();
2698 unsigned Class = getClass(Ty);
2700 case cFP: // Floating point divide
2702 emitBinaryFPOperation(BB, IP, Op0, Op1, 3, ResultReg);
2704 } else { // Floating point remainder...
2705 unsigned Op0Reg = getReg(Op0, BB, IP);
2706 unsigned Op1Reg = getReg(Op1, BB, IP);
2707 MachineInstr *TheCall =
2708 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
2709 std::vector<ValueRecord> Args;
2710 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
2711 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
2712 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
2716 static const char *FnName[] =
2717 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
2718 unsigned Op0Reg = getReg(Op0, BB, IP);
2719 unsigned Op1Reg = getReg(Op1, BB, IP);
2720 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
2721 MachineInstr *TheCall =
2722 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
2724 std::vector<ValueRecord> Args;
2725 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
2726 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
2727 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
2730 case cByte: case cShort: case cInt:
2731 break; // Small integrals, handled below...
2732 default: assert(0 && "Unknown class!");
2735 static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
2736 static const unsigned NEGOpcode[]={ X86::NEG8r, X86::NEG16r, X86::NEG32r };
2737 static const unsigned SAROpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
2738 static const unsigned SHROpcode[]={ X86::SHR8ri, X86::SHR16ri, X86::SHR32ri };
2739 static const unsigned ADDOpcode[]={ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
2741 // Special case signed division by power of 2.
2742 if (ConstantSInt *CI = dyn_cast<ConstantSInt>(Op1))
2744 assert(Class != cLong && "This doesn't handle 64-bit divides!");
2745 int V = CI->getValue();
2747 if (V == 1) { // X /s 1 => X
2748 unsigned Op0Reg = getReg(Op0, BB, IP);
2749 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(Op0Reg);
2753 if (V == -1) { // X /s -1 => -X
2754 unsigned Op0Reg = getReg(Op0, BB, IP);
2755 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(Op0Reg);
2759 if (V == 2 || V == -2) { // X /s 2
2760 static const unsigned CMPOpcode[] = {
2761 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
2763 static const unsigned SBBOpcode[] = {
2764 X86::SBB8ri, X86::SBB16ri, X86::SBB32ri
2766 unsigned Op0Reg = getReg(Op0, BB, IP);
2767 unsigned SignBit = 1 << (CI->getType()->getPrimitiveSize()*8-1);
2768 BuildMI(*BB, IP, CMPOpcode[Class], 2).addReg(Op0Reg).addImm(SignBit);
2770 unsigned TmpReg = makeAnotherReg(Op0->getType());
2771 BuildMI(*BB, IP, SBBOpcode[Class], 2, TmpReg).addReg(Op0Reg).addImm(-1);
2773 unsigned TmpReg2 = V == 2 ? ResultReg : makeAnotherReg(Op0->getType());
2774 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg2).addReg(TmpReg).addImm(1);
2776 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg2);
2782 if (V < 0) { // Not a positive power of 2?
2784 isNeg = true; // Maybe it's a negative power of 2.
2786 if (unsigned Log = ExactLog2(V)) {
2788 unsigned Op0Reg = getReg(Op0, BB, IP);
2789 unsigned TmpReg = makeAnotherReg(Op0->getType());
2790 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg)
2791 .addReg(Op0Reg).addImm(Log-1);
2792 unsigned TmpReg2 = makeAnotherReg(Op0->getType());
2793 BuildMI(*BB, IP, SHROpcode[Class], 2, TmpReg2)
2794 .addReg(TmpReg).addImm(32-Log);
2795 unsigned TmpReg3 = makeAnotherReg(Op0->getType());
2796 BuildMI(*BB, IP, ADDOpcode[Class], 2, TmpReg3)
2797 .addReg(Op0Reg).addReg(TmpReg2);
2799 unsigned TmpReg4 = isNeg ? makeAnotherReg(Op0->getType()) : ResultReg;
2800 BuildMI(*BB, IP, SAROpcode[Class], 2, TmpReg4)
2801 .addReg(TmpReg3).addImm(Log);
2803 BuildMI(*BB, IP, NEGOpcode[Class], 1, ResultReg).addReg(TmpReg4);
2807 assert(Class != cLong && "This doesn't handle 64-bit remainder!");
2808 int V = CI->getValue();
2810 if (V == 2 || V == -2) { // X % 2, X % -2
2811 static const unsigned SExtOpcode[] = { X86::CBW, X86::CWD, X86::CDQ };
2812 static const unsigned BaseReg[] = { X86::AL , X86::AX , X86::EAX };
2813 static const unsigned SExtReg[] = { X86::AH , X86::DX , X86::EDX };
2814 static const unsigned ANDOpcode[] = {
2815 X86::AND8ri, X86::AND16ri, X86::AND32ri
2817 static const unsigned XOROpcode[] = {
2818 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr
2820 static const unsigned SUBOpcode[] = {
2821 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr
2824 // Sign extend result into reg of -1 or 0.
2825 unsigned Op0Reg = getReg(Op0, BB, IP);
2826 BuildMI(*BB, IP, MovOpcode[Class], 1, BaseReg[Class]).addReg(Op0Reg);
2827 BuildMI(*BB, IP, SExtOpcode[Class], 0);
2828 unsigned TmpReg0 = makeAnotherReg(Op0->getType());
2829 BuildMI(*BB, IP, MovOpcode[Class], 1, TmpReg0).addReg(SExtReg[Class]);
2831 unsigned TmpReg1 = makeAnotherReg(Op0->getType());
2832 BuildMI(*BB, IP, ANDOpcode[Class], 2, TmpReg1).addReg(Op0Reg).addImm(1);
2834 unsigned TmpReg2 = makeAnotherReg(Op0->getType());
2835 BuildMI(*BB, IP, XOROpcode[Class], 2,
2836 TmpReg2).addReg(TmpReg1).addReg(TmpReg0);
2837 BuildMI(*BB, IP, SUBOpcode[Class], 2,
2838 ResultReg).addReg(TmpReg2).addReg(TmpReg0);
2843 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
2844 static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
2845 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
2847 static const unsigned DivOpcode[][4] = {
2848 { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
2849 { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
2852 unsigned Reg = Regs[Class];
2853 unsigned ExtReg = ExtRegs[Class];
2855 // Put the first operand into one of the A registers...
2856 unsigned Op0Reg = getReg(Op0, BB, IP);
2857 unsigned Op1Reg = getReg(Op1, BB, IP);
2858 BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
2860 if (Ty->isSigned()) {
2861 // Emit a sign extension instruction...
2862 unsigned ShiftResult = makeAnotherReg(Op0->getType());
2863 BuildMI(*BB, IP, SAROpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
2864 BuildMI(*BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
2866 // Emit the appropriate divide or remainder instruction...
2867 BuildMI(*BB, IP, DivOpcode[1][Class], 1).addReg(Op1Reg);
2869 // If unsigned, emit a zeroing instruction... (reg = 0)
2870 BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
2872 // Emit the appropriate divide or remainder instruction...
2873 BuildMI(*BB, IP, DivOpcode[0][Class], 1).addReg(Op1Reg);
2876 // Figure out which register we want to pick the result out of...
2877 unsigned DestReg = isDiv ? Reg : ExtReg;
2879 // Put the result into the destination register...
2880 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
2884 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
2885 /// for constant immediate shift values, and for constant immediate
2886 /// shift values equal to 1. Even the general case is sort of special,
2887 /// because the shift amount has to be in CL, not just any old register.
2889 void X86ISel::visitShiftInst(ShiftInst &I) {
2890 MachineBasicBlock::iterator IP = BB->end ();
2891 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
2892 I.getOpcode () == Instruction::Shl, I.getType (),
2896 /// emitShiftOperation - Common code shared between visitShiftInst and
2897 /// constant expression support.
2898 void X86ISel::emitShiftOperation(MachineBasicBlock *MBB,
2899 MachineBasicBlock::iterator IP,
2900 Value *Op, Value *ShiftAmount,
2901 bool isLeftShift, const Type *ResultTy,
2903 unsigned SrcReg = getReg (Op, MBB, IP);
2904 bool isSigned = ResultTy->isSigned ();
2905 unsigned Class = getClass (ResultTy);
2907 static const unsigned ConstantOperand[][4] = {
2908 { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
2909 { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
2910 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
2911 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
2914 static const unsigned NonConstantOperand[][4] = {
2915 { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
2916 { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
2917 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
2918 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
2921 // Longs, as usual, are handled specially...
2922 if (Class == cLong) {
2923 // If we have a constant shift, we can generate much more efficient code
2924 // than otherwise...
2926 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
2927 unsigned Amount = CUI->getValue();
2928 if (Amount == 1 && isLeftShift) { // X << 1 == X+X
2929 BuildMI(*MBB, IP, X86::ADD32rr, 2,
2930 DestReg).addReg(SrcReg).addReg(SrcReg);
2931 BuildMI(*MBB, IP, X86::ADC32rr, 2,
2932 DestReg+1).addReg(SrcReg+1).addReg(SrcReg+1);
2933 } else if (Amount < 32) {
2934 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
2936 BuildMI(*MBB, IP, Opc[3], 3,
2937 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addImm(Amount);
2938 BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
2940 BuildMI(*MBB, IP, Opc[3], 3,
2941 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
2942 BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
2944 } else { // Shifting more than 32 bits
2948 BuildMI(*MBB, IP, X86::SHL32ri, 2,
2949 DestReg + 1).addReg(SrcReg).addImm(Amount);
2951 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
2953 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2956 BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
2957 DestReg).addReg(SrcReg+1).addImm(Amount);
2959 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
2961 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2965 unsigned TmpReg = makeAnotherReg(Type::IntTy);
2966 if (!isLeftShift && isSigned) {
2967 // If this is a SHR of a Long, then we need to do funny sign extension
2968 // stuff. TmpReg gets the value to use as the high-part if we are
2969 // shifting more than 32 bits.
2970 BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
2972 // Other shifts use a fixed zero value if the shift is more than 32
2974 BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
2977 // Initialize CL with the shift amount...
2978 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
2979 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2981 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
2982 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
2984 // TmpReg2 = shld inHi, inLo
2985 BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
2987 // TmpReg3 = shl inLo, CL
2988 BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
2990 // Set the flags to indicate whether the shift was by more than 32 bits.
2991 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2993 // DestHi = (>32) ? TmpReg3 : TmpReg2;
2994 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2995 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
2996 // DestLo = (>32) ? TmpReg : TmpReg3;
2997 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2998 DestReg).addReg(TmpReg3).addReg(TmpReg);
3000 // TmpReg2 = shrd inLo, inHi
3001 BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
3003 // TmpReg3 = s[ah]r inHi, CL
3004 BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
3007 // Set the flags to indicate whether the shift was by more than 32 bits.
3008 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
3010 // DestLo = (>32) ? TmpReg3 : TmpReg2;
3011 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
3012 DestReg).addReg(TmpReg2).addReg(TmpReg3);
3014 // DestHi = (>32) ? TmpReg : TmpReg3;
3015 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
3016 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
3022 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
3023 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
3024 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
3026 if (CUI->getValue() == 1 && isLeftShift) { // X << 1 -> X+X
3027 static const int AddOpC[] = { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr };
3028 BuildMI(*MBB, IP, AddOpC[Class], 2,DestReg).addReg(SrcReg).addReg(SrcReg);
3030 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
3031 BuildMI(*MBB, IP, Opc[Class], 2,
3032 DestReg).addReg(SrcReg).addImm(CUI->getValue());
3034 } else { // The shift amount is non-constant.
3035 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
3036 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
3038 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
3039 BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
3044 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
3045 /// instruction. The load and store instructions are the only place where we
3046 /// need to worry about the memory layout of the target machine.
3048 void X86ISel::visitLoadInst(LoadInst &I) {
3049 // Check to see if this load instruction is going to be folded into a binary
3050 // instruction, like add. If so, we don't want to emit it. Wouldn't a real
3051 // pattern matching instruction selector be nice?
3052 unsigned Class = getClassB(I.getType());
3053 if (I.hasOneUse()) {
3054 Instruction *User = cast<Instruction>(I.use_back());
3055 switch (User->getOpcode()) {
3056 case Instruction::Cast:
3057 // If this is a cast from a signed-integer type to a floating point type,
3058 // fold the cast here.
3059 if (getClassB(User->getType()) == cFP &&
3060 (I.getType() == Type::ShortTy || I.getType() == Type::IntTy ||
3061 I.getType() == Type::LongTy)) {
3062 unsigned DestReg = getReg(User);
3063 static const unsigned Opcode[] = {
3064 0/*BYTE*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m
3067 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
3068 unsigned FI = getFixedSizedAllocaFI(AI);
3069 addFrameReference(BuildMI(BB, Opcode[Class], 4, DestReg), FI);
3072 getAddressingMode(I.getOperand(0), AM);
3073 addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg), AM);
3081 case Instruction::Add:
3082 case Instruction::Sub:
3083 case Instruction::And:
3084 case Instruction::Or:
3085 case Instruction::Xor:
3086 if (Class == cLong) User = 0;
3088 case Instruction::Mul:
3089 case Instruction::Div:
3090 if (Class != cFP) User = 0;
3091 break; // Folding only implemented for floating point.
3092 default: User = 0; break;
3096 // Okay, we found a user. If the load is the first operand and there is
3097 // no second operand load, reverse the operand ordering. Note that this
3098 // can fail for a subtract (ie, no change will be made).
3099 bool Swapped = false;
3100 if (!isa<LoadInst>(User->getOperand(1)))
3101 Swapped = !cast<BinaryOperator>(User)->swapOperands();
3103 // Okay, now that everything is set up, if this load is used by the second
3104 // operand, and if there are no instructions that invalidate the load
3105 // before the binary operator, eliminate the load.
3106 if (User->getOperand(1) == &I &&
3107 isSafeToFoldLoadIntoInstruction(I, *User))
3108 return; // Eliminate the load!
3110 // If this is a floating point sub or div, we won't be able to swap the
3111 // operands, but we will still be able to eliminate the load.
3112 if (Class == cFP && User->getOperand(0) == &I &&
3113 !isa<LoadInst>(User->getOperand(1)) &&
3114 (User->getOpcode() == Instruction::Sub ||
3115 User->getOpcode() == Instruction::Div) &&
3116 isSafeToFoldLoadIntoInstruction(I, *User))
3117 return; // Eliminate the load!
3119 // If we swapped the operands to the instruction, but couldn't fold the
3120 // load anyway, swap them back. We don't want to break add X, int
3122 if (Swapped) cast<BinaryOperator>(User)->swapOperands();
3126 static const unsigned Opcodes[] = {
3127 X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m, X86::MOV32rm
3129 unsigned Opcode = Opcodes[Class];
3130 if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
3132 unsigned DestReg = getReg(I);
3134 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) {
3135 unsigned FI = getFixedSizedAllocaFI(AI);
3136 if (Class == cLong) {
3137 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg), FI);
3138 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), FI, 4);
3140 addFrameReference(BuildMI(BB, Opcode, 4, DestReg), FI);
3144 getAddressingMode(I.getOperand(0), AM);
3146 if (Class == cLong) {
3147 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg), AM);
3149 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), AM);
3151 addFullAddress(BuildMI(BB, Opcode, 4, DestReg), AM);
3156 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
3159 void X86ISel::visitStoreInst(StoreInst &I) {
3161 getAddressingMode(I.getOperand(1), AM);
3163 const Type *ValTy = I.getOperand(0)->getType();
3164 unsigned Class = getClassB(ValTy);
3166 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
3167 uint64_t Val = CI->getRawValue();
3168 if (Class == cLong) {
3169 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val & ~0U);
3171 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val>>32);
3173 static const unsigned Opcodes[] = {
3174 X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
3176 unsigned Opcode = Opcodes[Class];
3177 addFullAddress(BuildMI(BB, Opcode, 5), AM).addImm(Val);
3179 } else if (isa<ConstantPointerNull>(I.getOperand(0))) {
3180 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(0);
3181 } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
3182 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CB->getValue());
3183 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) {
3184 // Store constant FP values with integer instructions to avoid having to
3185 // load the constants from the constant pool then do a store.
3186 if (CFP->getType() == Type::FloatTy) {
3191 V.F = CFP->getValue();
3192 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(V.I);
3198 V.F = CFP->getValue();
3199 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm((unsigned)V.I);
3201 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(
3202 unsigned(V.I >> 32));
3205 } else if (Class == cLong) {
3206 unsigned ValReg = getReg(I.getOperand(0));
3207 addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg);
3209 addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg+1);
3211 // FIXME: stop emitting these two instructions:
3212 // movl $global,%eax
3214 // when one instruction will suffice. That includes when the global
3215 // has an offset applied to it.
3216 unsigned ValReg = getReg(I.getOperand(0));
3217 static const unsigned Opcodes[] = {
3218 X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
3220 unsigned Opcode = Opcodes[Class];
3221 if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
3223 addFullAddress(BuildMI(BB, Opcode, 1+4), AM).addReg(ValReg);
3228 /// visitCastInst - Here we have various kinds of copying with or without sign
3229 /// extension going on.
3231 void X86ISel::visitCastInst(CastInst &CI) {
3232 Value *Op = CI.getOperand(0);
3234 unsigned SrcClass = getClassB(Op->getType());
3235 unsigned DestClass = getClassB(CI.getType());
3236 // Noop casts are not emitted: getReg will return the source operand as the
3237 // register to use for any uses of the noop cast.
3238 if (DestClass == SrcClass) {
3239 // The only detail in this plan is that casts from double -> float are
3240 // truncating operations that we have to codegen through memory (despite
3241 // the fact that the source/dest registers are the same class).
3242 if (CI.getType() != Type::FloatTy || Op->getType() != Type::DoubleTy)
3246 // If this is a cast from a 32-bit integer to a Long type, and the only uses
3247 // of the case are GEP instructions, then the cast does not need to be
3248 // generated explicitly, it will be folded into the GEP.
3249 if (DestClass == cLong && SrcClass == cInt) {
3250 bool AllUsesAreGEPs = true;
3251 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
3252 if (!isa<GetElementPtrInst>(*I)) {
3253 AllUsesAreGEPs = false;
3257 // No need to codegen this cast if all users are getelementptr instrs...
3258 if (AllUsesAreGEPs) return;
3261 // If this cast converts a load from a short,int, or long integer to a FP
3262 // value, we will have folded this cast away.
3263 if (DestClass == cFP && isa<LoadInst>(Op) && Op->hasOneUse() &&
3264 (Op->getType() == Type::ShortTy || Op->getType() == Type::IntTy ||
3265 Op->getType() == Type::LongTy))
3269 unsigned DestReg = getReg(CI);
3270 MachineBasicBlock::iterator MI = BB->end();
3271 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
3274 /// emitCastOperation - Common code shared between visitCastInst and constant
3275 /// expression cast support.
3277 void X86ISel::emitCastOperation(MachineBasicBlock *BB,
3278 MachineBasicBlock::iterator IP,
3279 Value *Src, const Type *DestTy,
3281 const Type *SrcTy = Src->getType();
3282 unsigned SrcClass = getClassB(SrcTy);
3283 unsigned DestClass = getClassB(DestTy);
3284 unsigned SrcReg = getReg(Src, BB, IP);
3286 // Implement casts to bool by using compare on the operand followed by set if
3287 // not zero on the result.
3288 if (DestTy == Type::BoolTy) {
3291 BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
3294 BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
3297 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
3300 unsigned TmpReg = makeAnotherReg(Type::IntTy);
3301 BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
3305 BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
3306 BuildMI(*BB, IP, X86::FNSTSW8r, 0);
3307 BuildMI(*BB, IP, X86::SAHF, 1);
3311 // If the zero flag is not set, then the value is true, set the byte to
3313 BuildMI(*BB, IP, X86::SETNEr, 1, DestReg);
3317 static const unsigned RegRegMove[] = {
3318 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
3321 // Implement casts between values of the same type class (as determined by
3322 // getClass) by using a register-to-register move.
3323 if (SrcClass == DestClass) {
3324 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
3325 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
3326 } else if (SrcClass == cFP) {
3327 if (SrcTy == Type::FloatTy) { // double -> float
3328 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
3329 BuildMI(*BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
3330 } else { // float -> double
3331 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
3332 "Unknown cFP member!");
3333 // Truncate from double to float by storing to memory as short, then
3335 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
3336 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
3337 addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5), FrameIdx).addReg(SrcReg);
3338 addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
3340 } else if (SrcClass == cLong) {
3341 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
3342 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
3344 assert(0 && "Cannot handle this type of cast instruction!");
3350 // Handle cast of SMALLER int to LARGER int using a move with sign extension
3351 // or zero extension, depending on whether the source type was signed.
3352 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
3353 SrcClass < DestClass) {
3354 bool isLong = DestClass == cLong;
3355 if (isLong) DestClass = cInt;
3357 static const unsigned Opc[][4] = {
3358 { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
3359 { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
3362 bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy;
3363 BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
3364 DestReg).addReg(SrcReg);
3366 if (isLong) { // Handle upper 32 bits as appropriate...
3367 if (isUnsigned) // Zero out top bits...
3368 BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
3369 else // Sign extend bottom half...
3370 BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
3375 // Special case long -> int ...
3376 if (SrcClass == cLong && DestClass == cInt) {
3377 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
3381 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
3382 // move out of AX or AL.
3383 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
3384 && SrcClass > DestClass) {
3385 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
3386 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
3387 BuildMI(*BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
3391 // Handle casts from integer to floating point now...
3392 if (DestClass == cFP) {
3393 // Promote the integer to a type supported by FLD. We do this because there
3394 // are no unsigned FLD instructions, so we must promote an unsigned value to
3395 // a larger signed value, then use FLD on the larger value.
3397 const Type *PromoteType = 0;
3398 unsigned PromoteOpcode = 0;
3399 unsigned RealDestReg = DestReg;
3400 switch (SrcTy->getTypeID()) {
3401 case Type::BoolTyID:
3402 case Type::SByteTyID:
3403 // We don't have the facilities for directly loading byte sized data from
3404 // memory (even signed). Promote it to 16 bits.
3405 PromoteType = Type::ShortTy;
3406 PromoteOpcode = X86::MOVSX16rr8;
3408 case Type::UByteTyID:
3409 PromoteType = Type::ShortTy;
3410 PromoteOpcode = X86::MOVZX16rr8;
3412 case Type::UShortTyID:
3413 PromoteType = Type::IntTy;
3414 PromoteOpcode = X86::MOVZX32rr16;
3416 case Type::ULongTyID:
3417 case Type::UIntTyID:
3418 // Don't fild into the read destination.
3419 DestReg = makeAnotherReg(Type::DoubleTy);
3421 default: // No promotion needed...
3426 unsigned TmpReg = makeAnotherReg(PromoteType);
3427 BuildMI(*BB, IP, PromoteOpcode, 1, TmpReg).addReg(SrcReg);
3428 SrcTy = PromoteType;
3429 SrcClass = getClass(PromoteType);
3433 // Spill the integer to memory and reload it from there...
3435 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
3437 if (SrcClass == cLong) {
3438 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3439 FrameIdx).addReg(SrcReg);
3440 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3441 FrameIdx, 4).addReg(SrcReg+1);
3443 static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
3444 addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
3445 FrameIdx).addReg(SrcReg);
3448 static const unsigned Op2[] =
3449 { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
3450 addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
3452 if (SrcTy == Type::UIntTy) {
3453 // If this is a cast from uint -> double, we need to be careful about if
3454 // the "sign" bit is set. If so, we don't want to make a negative number,
3455 // we want to make a positive number. Emit code to add an offset if the
3458 // Compute whether the sign bit is set by shifting the reg right 31 bits.
3459 unsigned IsNeg = makeAnotherReg(Type::IntTy);
3460 BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(SrcReg).addImm(31);
3462 // Create a CP value that has the offset in one word and 0 in the other.
3463 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
3464 0x4f80000000000000ULL);
3465 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
3466 BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(DestReg)
3467 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
3469 } else if (SrcTy == Type::ULongTy) {
3470 // We need special handling for unsigned 64-bit integer sources. If the
3471 // input number has the "sign bit" set, then we loaded it incorrectly as a
3472 // negative 64-bit number. In this case, add an offset value.
3474 // Emit a test instruction to see if the dynamic input value was signed.
3475 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
3477 // If the sign bit is set, get a pointer to an offset, otherwise get a
3478 // pointer to a zero.
3479 MachineConstantPool *CP = F->getConstantPool();
3480 unsigned Zero = makeAnotherReg(Type::IntTy);
3481 Constant *Null = Constant::getNullValue(Type::UIntTy);
3482 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
3483 CP->getConstantPoolIndex(Null));
3484 unsigned Offset = makeAnotherReg(Type::IntTy);
3485 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
3487 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
3488 CP->getConstantPoolIndex(OffsetCst));
3489 unsigned Addr = makeAnotherReg(Type::IntTy);
3490 BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
3492 // Load the constant for an add. FIXME: this could make an 'fadd' that
3493 // reads directly from memory, but we don't support these yet.
3494 unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
3495 addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
3497 BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
3498 .addReg(ConstReg).addReg(DestReg);
3504 // Handle casts from floating point to integer now...
3505 if (SrcClass == cFP) {
3506 // Change the floating point control register to use "round towards zero"
3507 // mode when truncating to an integer value.
3509 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
3510 addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
3512 // Load the old value of the high byte of the control word...
3513 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
3514 addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
3517 // Set the high part to be round to zero...
3518 addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
3519 CWFrameIdx, 1).addImm(12);
3521 // Reload the modified control word now...
3522 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3524 // Restore the memory image of control word to original value
3525 addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
3526 CWFrameIdx, 1).addReg(HighPartOfCW);
3528 // We don't have the facilities for directly storing byte sized data to
3529 // memory. Promote it to 16 bits. We also must promote unsigned values to
3530 // larger classes because we only have signed FP stores.
3531 unsigned StoreClass = DestClass;
3532 const Type *StoreTy = DestTy;
3533 if (StoreClass == cByte || DestTy->isUnsigned())
3534 switch (StoreClass) {
3535 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
3536 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
3537 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
3538 // The following treatment of cLong may not be perfectly right,
3539 // but it survives chains of casts of the form
3540 // double->ulong->double.
3541 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
3542 default: assert(0 && "Unknown store class!");
3545 // Spill the integer to memory and reload it from there...
3547 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
3549 static const unsigned Op1[] =
3550 { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
3551 addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
3552 FrameIdx).addReg(SrcReg);
3554 if (DestClass == cLong) {
3555 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
3556 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
3559 static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
3560 addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
3563 // Reload the original control word now...
3564 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3568 // Anything we haven't handled already, we can't (yet) handle at all.
3569 assert(0 && "Unhandled cast instruction!");
3573 /// visitVANextInst - Implement the va_next instruction...
3575 void X86ISel::visitVANextInst(VANextInst &I) {
3576 unsigned VAList = getReg(I.getOperand(0));
3577 unsigned DestReg = getReg(I);
3580 switch (I.getArgType()->getTypeID()) {
3583 assert(0 && "Error: bad type for va_next instruction!");
3585 case Type::PointerTyID:
3586 case Type::UIntTyID:
3590 case Type::ULongTyID:
3591 case Type::LongTyID:
3592 case Type::DoubleTyID:
3597 // Increment the VAList pointer...
3598 BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
3601 void X86ISel::visitVAArgInst(VAArgInst &I) {
3602 unsigned VAList = getReg(I.getOperand(0));
3603 unsigned DestReg = getReg(I);
3605 switch (I.getType()->getTypeID()) {
3608 assert(0 && "Error: bad type for va_next instruction!");
3610 case Type::PointerTyID:
3611 case Type::UIntTyID:
3613 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3615 case Type::ULongTyID:
3616 case Type::LongTyID:
3617 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3618 addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
3620 case Type::DoubleTyID:
3621 addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
3626 /// visitGetElementPtrInst - instruction-select GEP instructions
3628 void X86ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
3629 // If this GEP instruction will be folded into all of its users, we don't need
3630 // to explicitly calculate it!
3632 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), AM)) {
3633 // Check all of the users of the instruction to see if they are loads and
3635 bool AllWillFold = true;
3636 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI)
3637 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Load)
3638 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Store ||
3639 cast<Instruction>(*UI)->getOperand(0) == &I) {
3640 AllWillFold = false;
3644 // If the instruction is foldable, and will be folded into all users, don't
3646 if (AllWillFold) return;
3649 unsigned outputReg = getReg(I);
3650 emitGEPOperation(BB, BB->end(), I.getOperand(0),
3651 I.op_begin()+1, I.op_end(), outputReg);
3654 /// getGEPIndex - Inspect the getelementptr operands specified with GEPOps and
3655 /// GEPTypes (the derived types being stepped through at each level). On return
3656 /// from this function, if some indexes of the instruction are representable as
3657 /// an X86 lea instruction, the machine operands are put into the Ops
3658 /// instruction and the consumed indexes are poped from the GEPOps/GEPTypes
3659 /// lists. Otherwise, GEPOps.size() is returned. If this returns a an
3660 /// addressing mode that only partially consumes the input, the BaseReg input of
3661 /// the addressing mode must be left free.
3663 /// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
3665 void X86ISel::getGEPIndex(MachineBasicBlock *MBB,
3666 MachineBasicBlock::iterator IP,
3667 std::vector<Value*> &GEPOps,
3668 std::vector<const Type*> &GEPTypes,
3669 X86AddressMode &AM) {
3670 const TargetData &TD = TM.getTargetData();
3672 // Clear out the state we are working with...
3673 AM.BaseType = X86AddressMode::RegBase;
3674 AM.Base.Reg = 0; // No base register
3675 AM.Scale = 1; // Unit scale
3676 AM.IndexReg = 0; // No index register
3677 AM.Disp = 0; // No displacement
3679 // While there are GEP indexes that can be folded into the current address,
3680 // keep processing them.
3681 while (!GEPTypes.empty()) {
3682 if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
3683 // It's a struct access. CUI is the index into the structure,
3684 // which names the field. This index must have unsigned type.
3685 const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
3687 // Use the TargetData structure to pick out what the layout of the
3688 // structure is in memory. Since the structure index must be constant, we
3689 // can get its value and use it to find the right byte offset from the
3690 // StructLayout class's list of structure member offsets.
3691 AM.Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
3692 GEPOps.pop_back(); // Consume a GEP operand
3693 GEPTypes.pop_back();
3695 // It's an array or pointer access: [ArraySize x ElementType].
3696 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3697 Value *idx = GEPOps.back();
3699 // idx is the index into the array. Unlike with structure
3700 // indices, we may not know its actual value at code-generation
3703 // If idx is a constant, fold it into the offset.
3704 unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
3705 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
3706 AM.Disp += TypeSize*CSI->getValue();
3707 } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
3708 AM.Disp += TypeSize*CUI->getValue();
3710 // If the index reg is already taken, we can't handle this index.
3711 if (AM.IndexReg) return;
3713 // If this is a size that we can handle, then add the index as
3715 case 1: case 2: case 4: case 8:
3716 // These are all acceptable scales on X86.
3717 AM.Scale = TypeSize;
3720 // Otherwise, we can't handle this scale
3724 if (CastInst *CI = dyn_cast<CastInst>(idx))
3725 if (CI->getOperand(0)->getType() == Type::IntTy ||
3726 CI->getOperand(0)->getType() == Type::UIntTy)
3727 idx = CI->getOperand(0);
3729 AM.IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
3732 GEPOps.pop_back(); // Consume a GEP operand
3733 GEPTypes.pop_back();
3737 // GEPTypes is empty, which means we have a single operand left. Set it as
3738 // the base register.
3740 assert(AM.Base.Reg == 0);
3742 if (AllocaInst *AI = dyn_castFixedAlloca(GEPOps.back())) {
3743 AM.BaseType = X86AddressMode::FrameIndexBase;
3744 AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
3749 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps.back())) {
3755 AM.Base.Reg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
3756 GEPOps.pop_back(); // Consume the last GEP operand
3760 /// isGEPFoldable - Return true if the specified GEP can be completely
3761 /// folded into the addressing mode of a load/store or lea instruction.
3762 bool X86ISel::isGEPFoldable(MachineBasicBlock *MBB,
3763 Value *Src, User::op_iterator IdxBegin,
3764 User::op_iterator IdxEnd, X86AddressMode &AM) {
3766 std::vector<Value*> GEPOps;
3767 GEPOps.resize(IdxEnd-IdxBegin+1);
3769 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3771 std::vector<const Type*>
3772 GEPTypes(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3773 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3775 MachineBasicBlock::iterator IP;
3776 if (MBB) IP = MBB->end();
3777 getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
3779 // We can fold it away iff the getGEPIndex call eliminated all operands.
3780 return GEPOps.empty();
3783 void X86ISel::emitGEPOperation(MachineBasicBlock *MBB,
3784 MachineBasicBlock::iterator IP,
3785 Value *Src, User::op_iterator IdxBegin,
3786 User::op_iterator IdxEnd, unsigned TargetReg) {
3787 const TargetData &TD = TM.getTargetData();
3789 // If this is a getelementptr null, with all constant integer indices, just
3790 // replace it with TargetReg = 42.
3791 if (isa<ConstantPointerNull>(Src)) {
3792 User::op_iterator I = IdxBegin;
3793 for (; I != IdxEnd; ++I)
3794 if (!isa<ConstantInt>(*I))
3796 if (I == IdxEnd) { // All constant indices
3797 unsigned Offset = TD.getIndexedOffset(Src->getType(),
3798 std::vector<Value*>(IdxBegin, IdxEnd));
3799 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addImm(Offset);
3804 std::vector<Value*> GEPOps;
3805 GEPOps.resize(IdxEnd-IdxBegin+1);
3807 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3809 std::vector<const Type*> GEPTypes;
3810 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3811 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3813 // Keep emitting instructions until we consume the entire GEP instruction.
3814 while (!GEPOps.empty()) {
3815 unsigned OldSize = GEPOps.size();
3817 getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
3819 if (GEPOps.size() != OldSize) {
3820 // getGEPIndex consumed some of the input. Build an LEA instruction here.
3821 unsigned NextTarget = 0;
3822 if (!GEPOps.empty()) {
3823 assert(AM.Base.Reg == 0 &&
3824 "getGEPIndex should have left the base register open for chaining!");
3825 NextTarget = AM.Base.Reg = makeAnotherReg(Type::UIntTy);
3828 if (AM.BaseType == X86AddressMode::RegBase &&
3829 AM.IndexReg == 0 && AM.Disp == 0 && !AM.GV)
3830 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(AM.Base.Reg);
3831 else if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0 &&
3832 AM.IndexReg == 0 && AM.Disp == 0)
3833 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(AM.GV);
3835 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg), AM);
3837 TargetReg = NextTarget;
3838 } else if (GEPTypes.empty()) {
3839 // The getGEPIndex operation didn't want to build an LEA. Check to see if
3840 // all operands are consumed but the base pointer. If so, just load it
3841 // into the register.
3842 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
3843 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
3845 unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
3846 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3848 break; // we are now done
3851 // It's an array or pointer access: [ArraySize x ElementType].
3852 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3853 Value *idx = GEPOps.back();
3854 GEPOps.pop_back(); // Consume a GEP operand
3855 GEPTypes.pop_back();
3857 // Many GEP instructions use a [cast (int/uint) to LongTy] as their
3858 // operand on X86. Handle this case directly now...
3859 if (CastInst *CI = dyn_cast<CastInst>(idx))
3860 if (CI->getOperand(0)->getType() == Type::IntTy ||
3861 CI->getOperand(0)->getType() == Type::UIntTy)
3862 idx = CI->getOperand(0);
3864 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
3865 // must find the size of the pointed-to type (Not coincidentally, the next
3866 // type is the type of the elements in the array).
3867 const Type *ElTy = SqTy->getElementType();
3868 unsigned elementSize = TD.getTypeSize(ElTy);
3870 // If idxReg is a constant, we don't need to perform the multiply!
3871 if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) {
3872 if (!CSI->isNullValue()) {
3873 unsigned Offset = elementSize*CSI->getRawValue();
3874 unsigned Reg = makeAnotherReg(Type::UIntTy);
3875 BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
3876 .addReg(Reg).addImm(Offset);
3877 --IP; // Insert the next instruction before this one.
3878 TargetReg = Reg; // Codegen the rest of the GEP into this
3880 } else if (elementSize == 1) {
3881 // If the element size is 1, we don't have to multiply, just add
3882 unsigned idxReg = getReg(idx, MBB, IP);
3883 unsigned Reg = makeAnotherReg(Type::UIntTy);
3884 BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
3885 --IP; // Insert the next instruction before this one.
3886 TargetReg = Reg; // Codegen the rest of the GEP into this
3888 unsigned idxReg = getReg(idx, MBB, IP);
3889 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
3891 // Make sure we can back the iterator up to point to the first
3892 // instruction emitted.
3893 MachineBasicBlock::iterator BeforeIt = IP;
3894 if (IP == MBB->begin())
3895 BeforeIt = MBB->end();
3898 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
3900 // Emit an ADD to add OffsetReg to the basePtr.
3901 unsigned Reg = makeAnotherReg(Type::UIntTy);
3902 BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
3903 .addReg(Reg).addReg(OffsetReg);
3905 // Step to the first instruction of the multiply.
3906 if (BeforeIt == MBB->end())
3911 TargetReg = Reg; // Codegen the rest of the GEP into this
3917 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
3918 /// frame manager, otherwise do it the hard way.
3920 void X86ISel::visitAllocaInst(AllocaInst &I) {
3921 // If this is a fixed size alloca in the entry block for the function, we
3922 // statically stack allocate the space, so we don't need to do anything here.
3924 if (dyn_castFixedAlloca(&I)) return;
3926 // Find the data size of the alloca inst's getAllocatedType.
3927 const Type *Ty = I.getAllocatedType();
3928 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
3930 // Create a register to hold the temporary result of multiplying the type size
3931 // constant by the variable amount.
3932 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
3933 unsigned SrcReg1 = getReg(I.getArraySize());
3935 // TotalSizeReg = mul <numelements>, <TypeSize>
3936 MachineBasicBlock::iterator MBBI = BB->end();
3937 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
3939 // AddedSize = add <TotalSizeReg>, 15
3940 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
3941 BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
3943 // AlignedSize = and <AddedSize>, ~15
3944 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
3945 BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
3947 // Subtract size from stack pointer, thereby allocating some space.
3948 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
3950 // Put a pointer to the space into the result register, by copying
3951 // the stack pointer.
3952 BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
3954 // Inform the Frame Information that we have just allocated a variable-sized
3956 F->getFrameInfo()->CreateVariableSizedObject();
3959 /// visitMallocInst - Malloc instructions are code generated into direct calls
3960 /// to the library malloc.
3962 void X86ISel::visitMallocInst(MallocInst &I) {
3963 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
3966 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
3967 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
3969 Arg = makeAnotherReg(Type::UIntTy);
3970 unsigned Op0Reg = getReg(I.getOperand(0));
3971 MachineBasicBlock::iterator MBBI = BB->end();
3972 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
3975 std::vector<ValueRecord> Args;
3976 Args.push_back(ValueRecord(Arg, Type::UIntTy));
3977 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3978 1).addExternalSymbol("malloc", true);
3979 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
3983 /// visitFreeInst - Free instructions are code gen'd to call the free libc
3986 void X86ISel::visitFreeInst(FreeInst &I) {
3987 std::vector<ValueRecord> Args;
3988 Args.push_back(ValueRecord(I.getOperand(0)));
3989 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3990 1).addExternalSymbol("free", true);
3991 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
3994 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
3995 /// into a machine code representation is a very simple peep-hole fashion. The
3996 /// generated code sucks but the implementation is nice and simple.
3998 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
3999 return new X86ISel(TM);