1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "llvm/Constants.h" // FIXME: REMOVE
18 #include "llvm/Function.h"
19 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/CodeGen/SSARegMap.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetLowering.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/ADT/Statistic.h"
32 //===----------------------------------------------------------------------===//
33 // X86TargetLowering - X86 Implementation of the TargetLowering interface
35 class X86TargetLowering : public TargetLowering {
36 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
37 int ReturnAddrIndex; // FrameIndex for return slot.
39 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
40 // Set up the TargetLowering object.
41 addRegisterClass(MVT::i8, X86::R8RegisterClass);
42 addRegisterClass(MVT::i16, X86::R16RegisterClass);
43 addRegisterClass(MVT::i32, X86::R32RegisterClass);
44 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
46 // FIXME: Eliminate these two classes when legalize can handle promotions
48 addRegisterClass(MVT::i1, X86::R8RegisterClass);
49 addRegisterClass(MVT::f32, X86::RFPRegisterClass);
51 computeRegisterProperties();
53 setOperationUnsupported(ISD::MEMMOVE, MVT::Other);
55 setOperationUnsupported(ISD::MUL, MVT::i8);
56 setOperationUnsupported(ISD::SELECT, MVT::i1);
57 setOperationUnsupported(ISD::SELECT, MVT::i8);
59 addLegalFPImmediate(+0.0); // FLD0
60 addLegalFPImmediate(+1.0); // FLD1
61 addLegalFPImmediate(-0.0); // FLD0/FCHS
62 addLegalFPImmediate(-1.0); // FLD1/FCHS
65 /// LowerArguments - This hook must be implemented to indicate how we should
66 /// lower the arguments for the specified function, into the specified DAG.
67 virtual std::vector<SDOperand>
68 LowerArguments(Function &F, SelectionDAG &DAG);
70 /// LowerCallTo - This hook lowers an abstract call to a function into an
72 virtual std::pair<SDOperand, SDOperand>
73 LowerCallTo(SDOperand Chain, const Type *RetTy, SDOperand Callee,
74 ArgListTy &Args, SelectionDAG &DAG);
76 virtual std::pair<SDOperand, SDOperand>
77 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
79 virtual std::pair<SDOperand,SDOperand>
80 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
81 const Type *ArgTy, SelectionDAG &DAG);
83 virtual std::pair<SDOperand, SDOperand>
84 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
90 std::vector<SDOperand>
91 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
92 std::vector<SDOperand> ArgValues;
94 // Add DAG nodes to load the arguments... On entry to a function on the X86,
95 // the stack frame looks like this:
97 // [ESP] -- return address
98 // [ESP + 4] -- first argument (leftmost lexically)
99 // [ESP + 8] -- second argument, if first argument is four bytes in size
102 MachineFunction &MF = DAG.getMachineFunction();
103 MachineFrameInfo *MFI = MF.getFrameInfo();
105 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
106 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I) {
107 MVT::ValueType ObjectVT = getValueType(I->getType());
108 unsigned ArgIncrement = 4;
111 default: assert(0 && "Unhandled argument type!");
113 case MVT::i8: ObjSize = 1; break;
114 case MVT::i16: ObjSize = 2; break;
115 case MVT::i32: ObjSize = 4; break;
116 case MVT::i64: ObjSize = ArgIncrement = 8; break;
117 case MVT::f32: ObjSize = 4; break;
118 case MVT::f64: ObjSize = ArgIncrement = 8; break;
120 // Create the frame index object for this incoming parameter...
121 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
123 // Create the SelectionDAG nodes corresponding to a load from this parameter
124 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
126 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
130 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN);
132 if (MVT::isInteger(ObjectVT))
133 ArgValue = DAG.getConstant(0, ObjectVT);
135 ArgValue = DAG.getConstantFP(0, ObjectVT);
137 ArgValues.push_back(ArgValue);
139 ArgOffset += ArgIncrement; // Move on to the next argument...
142 // If the function takes variable number of arguments, make a frame index for
143 // the start of the first vararg value... for expansion of llvm.va_start.
145 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
146 ReturnAddrIndex = 0; // No return address slot generated yet.
150 std::pair<SDOperand, SDOperand>
151 X86TargetLowering::LowerCallTo(SDOperand Chain,
152 const Type *RetTy, SDOperand Callee,
153 ArgListTy &Args, SelectionDAG &DAG) {
154 // Count how many bytes are to be pushed on the stack.
155 unsigned NumBytes = 0;
159 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
160 DAG.getConstant(0, getPointerTy()));
162 for (unsigned i = 0, e = Args.size(); i != e; ++i)
163 switch (getValueType(Args[i].second)) {
164 default: assert(0 && "Unknown value type!");
178 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
179 DAG.getConstant(NumBytes, getPointerTy()));
181 // Arguments go on the stack in reverse order, as specified by the ABI.
182 unsigned ArgOffset = 0;
183 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32);
184 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
186 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
187 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
189 switch (getValueType(Args[i].second)) {
190 default: assert(0 && "Unexpected ValueType for argument!");
194 // Promote the integer to 32 bits. If the input type is signed use a
195 // sign extend, otherwise use a zero extend.
196 if (Args[i].second->isSigned())
197 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
199 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
204 // FIXME: Note that all of these stores are independent of each other.
205 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
206 Args[i].first, PtrOff);
211 // FIXME: Note that all of these stores are independent of each other.
212 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
213 Args[i].first, PtrOff);
220 std::vector<MVT::ValueType> RetVals;
221 MVT::ValueType RetTyVT = getValueType(RetTy);
222 if (RetTyVT != MVT::isVoid)
223 RetVals.push_back(RetTyVT);
224 RetVals.push_back(MVT::Other);
226 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain, Callee), 0);
227 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
228 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
229 DAG.getConstant(NumBytes, getPointerTy()));
230 return std::make_pair(TheCall, Chain);
233 std::pair<SDOperand, SDOperand>
234 X86TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
235 // vastart just returns the address of the VarArgsFrameIndex slot.
236 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32), Chain);
239 std::pair<SDOperand,SDOperand> X86TargetLowering::
240 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
241 const Type *ArgTy, SelectionDAG &DAG) {
242 MVT::ValueType ArgVT = getValueType(ArgTy);
245 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList);
248 if (ArgVT == MVT::i32)
251 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
252 "Other types should have been promoted for varargs!");
255 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
256 DAG.getConstant(Amt, VAList.getValueType()));
258 return std::make_pair(Result, Chain);
262 std::pair<SDOperand, SDOperand> X86TargetLowering::
263 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
266 if (Depth) // Depths > 0 not supported yet!
267 Result = DAG.getConstant(0, getPointerTy());
269 if (ReturnAddrIndex == 0) {
270 // Set up a frame object for the return address.
271 MachineFunction &MF = DAG.getMachineFunction();
272 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
275 SDOperand RetAddrFI = DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
278 // Just load the return address
279 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI);
281 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
282 DAG.getConstant(4, MVT::i32));
284 return std::make_pair(Result, Chain);
293 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
295 //===--------------------------------------------------------------------===//
296 /// ISel - X86 specific code to select X86 machine instructions for
297 /// SelectionDAG operations.
299 class ISel : public SelectionDAGISel {
300 /// ContainsFPCode - Every instruction we select that uses or defines a FP
301 /// register should set this to true.
304 /// X86Lowering - This object fully describes how to lower LLVM code to an
305 /// X86-specific SelectionDAG.
306 X86TargetLowering X86Lowering;
308 /// RegPressureMap - This keeps an approximate count of the number of
309 /// registers required to evaluate each node in the graph.
310 std::map<SDNode*, unsigned> RegPressureMap;
312 /// ExprMap - As shared expressions are codegen'd, we keep track of which
313 /// vreg the value is produced in, so we only emit one copy of each compiled
315 std::map<SDOperand, unsigned> ExprMap;
316 std::set<SDOperand> LoweredTokens;
319 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
322 unsigned getRegPressure(SDOperand O) {
323 return RegPressureMap[O.Val];
325 unsigned ComputeRegPressure(SDOperand O);
327 /// InstructionSelectBasicBlock - This callback is invoked by
328 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
329 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG) {
330 // While we're doing this, keep track of whether we see any FP code for
331 // FP_REG_KILL insertion.
332 ContainsFPCode = false;
334 // Compute the RegPressureMap, which is an approximation for the number of
335 // registers required to compute each node.
336 ComputeRegPressure(DAG.getRoot());
340 // Codegen the basic block.
341 Select(DAG.getRoot());
343 // Insert FP_REG_KILL instructions into basic blocks that need them. This
344 // only occurs due to the floating point stackifier not being aggressive
345 // enough to handle arbitrary global stackification.
347 // Currently we insert an FP_REG_KILL instruction into each block that
348 // uses or defines a floating point virtual register.
350 // When the global register allocators (like linear scan) finally update
351 // live variable analysis, we can keep floating point values in registers
352 // across basic blocks. This will be a huge win, but we are waiting on
353 // the global allocators before we can do this.
355 if (ContainsFPCode && BB->succ_size()) {
356 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
360 // Clear state used for selection.
362 LoweredTokens.clear();
363 RegPressureMap.clear();
366 void EmitCMP(SDOperand LHS, SDOperand RHS);
367 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
368 void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
369 unsigned RTrue, unsigned RFalse, unsigned RDest);
370 unsigned SelectExpr(SDOperand N);
371 bool SelectAddress(SDOperand N, X86AddressMode &AM);
372 void Select(SDOperand N);
376 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
377 // for the number of registers required to compute each node. This is basically
378 // computing a generalized form of the Sethi-Ullman number for each node.
379 unsigned ISel::ComputeRegPressure(SDOperand O) {
381 unsigned &Result = RegPressureMap[N];
382 if (Result) return Result;
384 // FIXME: Should operations like CALL (which clobber lots o regs) have a
385 // higher fixed cost??
387 if (N->getNumOperands() == 0)
390 unsigned MaxRegUse = 0;
391 unsigned NumExtraMaxRegUsers = 0;
392 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
393 unsigned Regs = ComputeRegPressure(N->getOperand(i));
394 if (Regs > MaxRegUse) {
396 NumExtraMaxRegUsers = 0;
397 } else if (Regs == MaxRegUse) {
398 ++NumExtraMaxRegUsers;
402 return Result = MaxRegUse+NumExtraMaxRegUsers;
405 /// SelectAddress - Add the specified node to the specified addressing mode,
406 /// returning true if it cannot be done.
407 bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
408 switch (N.getOpcode()) {
410 case ISD::FrameIndex:
411 if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0) {
412 AM.BaseType = X86AddressMode::FrameIndexBase;
413 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
417 case ISD::GlobalAddress:
419 AM.GV = cast<GlobalAddressSDNode>(N)->getGlobal();
424 AM.Disp += cast<ConstantSDNode>(N)->getValue();
427 if (AM.IndexReg == 0 || AM.Scale == 1)
428 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
429 unsigned Val = CN->getValue();
430 if (Val == 1 || Val == 2 || Val == 3) {
432 SDOperand ShVal = N.Val->getOperand(0);
434 // Okay, we know that we have a scale by now. However, if the scaled
435 // value is an add of something and a constant, we can fold the
436 // constant into the disp field here.
437 if (ShVal.Val->getOpcode() == ISD::ADD &&
438 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
439 AM.IndexReg = SelectExpr(ShVal.Val->getOperand(0));
440 ConstantSDNode *AddVal =
441 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
442 AM.Disp += AddVal->getValue() << Val;
444 AM.IndexReg = SelectExpr(ShVal);
451 // X*[3,5,9] -> X+X*[2,4,8]
452 if (AM.IndexReg == 0 && AM.BaseType == X86AddressMode::RegBase &&
454 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
455 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
456 AM.Scale = unsigned(CN->getValue())-1;
458 SDOperand MulVal = N.Val->getOperand(0);
461 // Okay, we know that we have a scale by now. However, if the scaled
462 // value is an add of something and a constant, we can fold the
463 // constant into the disp field here.
464 if (MulVal.Val->getOpcode() == ISD::ADD &&
465 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
466 Reg = SelectExpr(MulVal.Val->getOperand(0));
467 ConstantSDNode *AddVal =
468 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
469 AM.Disp += AddVal->getValue() * CN->getValue();
471 Reg = SelectExpr(N.Val->getOperand(0));
474 AM.IndexReg = AM.Base.Reg = Reg;
480 X86AddressMode Backup = AM;
481 if (!SelectAddress(N.Val->getOperand(0), AM) &&
482 !SelectAddress(N.Val->getOperand(1), AM))
489 // Is the base register already occupied?
490 if (AM.BaseType != X86AddressMode::RegBase || AM.Base.Reg) {
491 // If so, check to see if the scale index register is set.
492 if (AM.IndexReg == 0) {
493 AM.IndexReg = SelectExpr(N);
498 // Otherwise, we cannot select it.
502 // Default, generate it as a register.
503 AM.BaseType = X86AddressMode::RegBase;
504 AM.Base.Reg = SelectExpr(N);
508 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
509 /// assuming that the temporary registers are in the 8-bit register class.
513 /// DestReg = logicalop Tmp1, Tmp2
515 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
516 unsigned SetCC2, unsigned LogicalOp,
518 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
519 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
520 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
521 BuildMI(BB, SetCC1, 0, Tmp1);
522 BuildMI(BB, SetCC2, 0, Tmp2);
523 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
526 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
527 /// condition codes match the specified SetCCOpcode. Note that some conditions
528 /// require multiple instructions to generate the correct value.
529 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
530 ISD::CondCode SetCCOpcode, bool isFP) {
533 switch (SetCCOpcode) {
534 default: assert(0 && "Illegal integer SetCC!");
535 case ISD::SETEQ: Opc = X86::SETEr; break;
536 case ISD::SETGT: Opc = X86::SETGr; break;
537 case ISD::SETGE: Opc = X86::SETGEr; break;
538 case ISD::SETLT: Opc = X86::SETLr; break;
539 case ISD::SETLE: Opc = X86::SETLEr; break;
540 case ISD::SETNE: Opc = X86::SETNEr; break;
541 case ISD::SETULT: Opc = X86::SETBr; break;
542 case ISD::SETUGT: Opc = X86::SETAr; break;
543 case ISD::SETULE: Opc = X86::SETBEr; break;
544 case ISD::SETUGE: Opc = X86::SETAEr; break;
547 // On a floating point condition, the flags are set as follows:
551 // 1 | 0 | 0 | X == Y
552 // 1 | 1 | 1 | unordered
554 switch (SetCCOpcode) {
555 default: assert(0 && "Invalid FP setcc!");
558 Opc = X86::SETEr; // True if ZF = 1
562 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
566 Opc = X86::SETAEr; // True if CF = 0
570 Opc = X86::SETBr; // True if CF = 1
574 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
578 Opc = X86::SETNEr; // True if ZF = 0
581 Opc = X86::SETPr; // True if PF = 1
584 Opc = X86::SETNPr; // True if PF = 0
586 case ISD::SETOEQ: // !PF & ZF
587 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
589 case ISD::SETOLT: // !PF & CF
590 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
592 case ISD::SETOLE: // !PF & (CF || ZF)
593 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
595 case ISD::SETUGT: // PF | (!ZF & !CF)
596 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
598 case ISD::SETUGE: // PF | !CF
599 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
601 case ISD::SETUNE: // PF | !ZF
602 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
606 BuildMI(BB, Opc, 0, DestReg);
610 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
611 /// the Dest block if the Cond condition is true. If we cannot fold this
612 /// condition into the branch, return true.
614 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
616 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
617 // B) using two conditional branches instead of one condbr, two setcc's, and
619 if ((Cond.getOpcode() == ISD::OR ||
620 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
621 // And and or set the flags for us, so there is no need to emit a TST of the
622 // result. It is only safe to do this if there is only a single use of the
623 // AND/OR though, otherwise we don't know it will be emitted here.
626 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
630 // Codegen br not C -> JE.
631 if (Cond.getOpcode() == ISD::XOR)
632 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
633 if (NC->isAllOnesValue()) {
635 if (getRegPressure(Chain) > getRegPressure(Cond)) {
637 CondR = SelectExpr(Cond.Val->getOperand(0));
639 CondR = SelectExpr(Cond.Val->getOperand(0));
642 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
643 BuildMI(BB, X86::JE, 1).addMBB(Dest);
647 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
649 return true; // Can only handle simple setcc's so far.
653 // Handle integer conditions first.
654 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
655 switch (SetCC->getCondition()) {
656 default: assert(0 && "Illegal integer SetCC!");
657 case ISD::SETEQ: Opc = X86::JE; break;
658 case ISD::SETGT: Opc = X86::JG; break;
659 case ISD::SETGE: Opc = X86::JGE; break;
660 case ISD::SETLT: Opc = X86::JL; break;
661 case ISD::SETLE: Opc = X86::JLE; break;
662 case ISD::SETNE: Opc = X86::JNE; break;
663 case ISD::SETULT: Opc = X86::JB; break;
664 case ISD::SETUGT: Opc = X86::JA; break;
665 case ISD::SETULE: Opc = X86::JBE; break;
666 case ISD::SETUGE: Opc = X86::JAE; break;
669 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
670 BuildMI(BB, Opc, 1).addMBB(Dest);
674 unsigned Opc2 = 0; // Second branch if needed.
676 // On a floating point condition, the flags are set as follows:
680 // 1 | 0 | 0 | X == Y
681 // 1 | 1 | 1 | unordered
683 switch (SetCC->getCondition()) {
684 default: assert(0 && "Invalid FP setcc!");
686 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
688 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
690 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
692 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
694 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
696 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
697 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
698 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
699 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
700 Opc = X86::JA; // ZF = 0 & CF = 0
701 Opc2 = X86::JP; // PF = 1
703 case ISD::SETUGE: // PF = 1 | CF = 0
704 Opc = X86::JAE; // CF = 0
705 Opc2 = X86::JP; // PF = 1
707 case ISD::SETUNE: // PF = 1 | ZF = 0
708 Opc = X86::JNE; // ZF = 0
709 Opc2 = X86::JP; // PF = 1
711 case ISD::SETOEQ: // PF = 0 & ZF = 1
714 return true; // FIXME: Emit more efficient code for this branch.
715 case ISD::SETOLT: // PF = 0 & CF = 1
718 return true; // FIXME: Emit more efficient code for this branch.
719 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
722 return true; // FIXME: Emit more efficient code for this branch.
726 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
727 BuildMI(BB, Opc, 1).addMBB(Dest);
729 BuildMI(BB, Opc2, 1).addMBB(Dest);
733 /// EmitSelectCC - Emit code into BB that performs a select operation between
734 /// the two registers RTrue and RFalse, generating a result into RDest. Return
735 /// true if the fold cannot be performed.
737 void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
738 unsigned RTrue, unsigned RFalse, unsigned RDest) {
740 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
742 } CondCode = NOT_SET;
744 static const unsigned CMOVTAB16[] = {
745 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
746 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
747 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
749 static const unsigned CMOVTAB32[] = {
750 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
751 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
752 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
754 static const unsigned CMOVTABFP[] = {
755 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
756 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
757 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
760 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond)) {
761 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
762 switch (SetCC->getCondition()) {
763 default: assert(0 && "Unknown integer comparison!");
764 case ISD::SETEQ: CondCode = EQ; break;
765 case ISD::SETGT: CondCode = GT; break;
766 case ISD::SETGE: CondCode = GE; break;
767 case ISD::SETLT: CondCode = LT; break;
768 case ISD::SETLE: CondCode = LE; break;
769 case ISD::SETNE: CondCode = NE; break;
770 case ISD::SETULT: CondCode = B; break;
771 case ISD::SETUGT: CondCode = A; break;
772 case ISD::SETULE: CondCode = BE; break;
773 case ISD::SETUGE: CondCode = AE; break;
776 // On a floating point condition, the flags are set as follows:
780 // 1 | 0 | 0 | X == Y
781 // 1 | 1 | 1 | unordered
783 switch (SetCC->getCondition()) {
784 default: assert(0 && "Unknown FP comparison!");
786 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
788 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
790 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
792 case ISD::SETLT: CondCode = B; break; // True if CF = 1
794 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
796 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
797 case ISD::SETUO: CondCode = P; break; // True if PF = 1
798 case ISD::SETO: CondCode = NP; break; // True if PF = 0
799 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
800 case ISD::SETUGE: // PF = 1 | CF = 0
801 case ISD::SETUNE: // PF = 1 | ZF = 0
802 case ISD::SETOEQ: // PF = 0 & ZF = 1
803 case ISD::SETOLT: // PF = 0 & CF = 1
804 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
805 // We cannot emit this comparison as a single cmov.
812 if (CondCode != NOT_SET) {
814 default: assert(0 && "Cannot select this type!");
815 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
816 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
818 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
822 // Finally, if we weren't able to fold this, just emit the condition and test
824 if (CondCode == NOT_SET || Opc == 0) {
825 // Get the condition into the zero flag.
826 unsigned CondReg = SelectExpr(Cond);
827 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
830 default: assert(0 && "Cannot select this type!");
831 case MVT::i16: Opc = X86::CMOVE16rr; break;
832 case MVT::i32: Opc = X86::CMOVE32rr; break;
834 case MVT::f64: Opc = X86::FCMOVE; break;
837 // FIXME: CMP R, 0 -> TEST R, R
838 EmitCMP(Cond.getOperand(0), Cond.getOperand(1));
839 std::swap(RTrue, RFalse);
841 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
844 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) {
846 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
848 switch (RHS.getValueType()) {
851 case MVT::i8: Opc = X86::CMP8ri; break;
852 case MVT::i16: Opc = X86::CMP16ri; break;
853 case MVT::i32: Opc = X86::CMP32ri; break;
856 unsigned Tmp1 = SelectExpr(LHS);
857 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
862 switch (LHS.getValueType()) {
863 default: assert(0 && "Cannot compare this value!");
865 case MVT::i8: Opc = X86::CMP8rr; break;
866 case MVT::i16: Opc = X86::CMP16rr; break;
867 case MVT::i32: Opc = X86::CMP32rr; break;
869 case MVT::f64: Opc = X86::FUCOMIr; break;
872 if (getRegPressure(LHS) > getRegPressure(RHS)) {
873 Tmp1 = SelectExpr(LHS);
874 Tmp2 = SelectExpr(RHS);
876 Tmp2 = SelectExpr(RHS);
877 Tmp1 = SelectExpr(LHS);
879 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
882 unsigned ISel::SelectExpr(SDOperand N) {
884 unsigned Tmp1, Tmp2, Tmp3;
887 SDNode *Node = N.Val;
889 if (Node->getOpcode() == ISD::CopyFromReg)
890 // Just use the specified register as our input.
891 return dyn_cast<CopyRegSDNode>(Node)->getReg();
893 // If there are multiple uses of this expression, memorize the
894 // register it is code generated in, instead of emitting it multiple
896 // FIXME: Disabled for our current selection model.
897 if (1 || !Node->hasOneUse()) {
898 unsigned &Reg = ExprMap[N];
901 if (N.getOpcode() != ISD::CALL)
902 Reg = Result = (N.getValueType() != MVT::Other) ?
903 MakeReg(N.getValueType()) : 1;
905 // If this is a call instruction, make sure to prepare ALL of the result
906 // values as well as the chain.
907 if (Node->getNumValues() == 1)
908 Reg = Result = 1; // Void call, just a chain.
910 Result = MakeReg(Node->getValueType(0));
911 ExprMap[N.getValue(0)] = Result;
912 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
913 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
914 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
918 Result = MakeReg(N.getValueType());
921 switch (N.getOpcode()) {
924 assert(0 && "Node not handled!\n");
925 case ISD::FrameIndex:
926 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
927 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
929 case ISD::ConstantPool:
930 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
931 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
933 case ISD::ConstantFP:
934 ContainsFPCode = true;
935 Tmp1 = Result; // Intermediate Register
936 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
937 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
938 Tmp1 = MakeReg(MVT::f64);
940 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
941 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
942 BuildMI(BB, X86::FLD0, 0, Tmp1);
943 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
944 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
945 BuildMI(BB, X86::FLD1, 0, Tmp1);
947 assert(0 && "Unexpected constant!");
949 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
952 switch (N.getValueType()) {
953 default: assert(0 && "Cannot use constants of this type!");
955 case MVT::i8: Opc = X86::MOV8ri; break;
956 case MVT::i16: Opc = X86::MOV16ri; break;
957 case MVT::i32: Opc = X86::MOV32ri; break;
959 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
961 case ISD::GlobalAddress: {
962 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
963 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
966 case ISD::ExternalSymbol: {
967 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
968 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
972 Tmp1 = SelectExpr(N.getOperand(0));
973 BuildMI(BB, X86::FpMOV, 1, Result).addReg(Tmp1);
975 case ISD::ZERO_EXTEND: {
976 int DestIs16 = N.getValueType() == MVT::i16;
977 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
978 Tmp1 = SelectExpr(N.getOperand(0));
980 // FIXME: This hack is here for zero extension casts from bool to i8. This
981 // would not be needed if bools were promoted by Legalize.
982 if (N.getValueType() == MVT::i8) {
983 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
987 static const unsigned Opc[3] = {
988 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
990 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
993 case ISD::SIGN_EXTEND: {
994 int DestIs16 = N.getValueType() == MVT::i16;
995 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
997 // FIXME: Legalize should promote bools to i8!
998 assert(N.getOperand(0).getValueType() != MVT::i1 &&
999 "Sign extend from bool not implemented!");
1001 static const unsigned Opc[3] = {
1002 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
1004 Tmp1 = SelectExpr(N.getOperand(0));
1005 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1009 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
1010 // a move out of AX or AL.
1011 switch (N.getOperand(0).getValueType()) {
1012 default: assert(0 && "Unknown truncate!");
1013 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1014 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1015 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
1017 Tmp1 = SelectExpr(N.getOperand(0));
1018 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1020 switch (N.getValueType()) {
1021 default: assert(0 && "Unknown truncate!");
1023 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1024 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1026 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1030 // Truncate from double to float by storing to memory as float,
1031 // then reading it back into a register.
1033 // Create as stack slot to use.
1034 // FIXME: This should automatically be made by the Legalizer!
1035 Tmp1 = TLI.getTargetData().getFloatAlignment();
1036 Tmp2 = BB->getParent()->getFrameInfo()->CreateStackObject(4, Tmp1);
1038 // Codegen the input.
1039 Tmp1 = SelectExpr(N.getOperand(0));
1041 // Emit the store, then the reload.
1042 addFrameReference(BuildMI(BB, X86::FST32m, 5), Tmp2).addReg(Tmp1);
1043 addFrameReference(BuildMI(BB, X86::FLD32m, 5, Result), Tmp2);
1046 case ISD::SINT_TO_FP:
1047 case ISD::UINT_TO_FP: {
1048 // FIXME: Most of this grunt work should be done by legalize!
1049 ContainsFPCode = true;
1051 // Promote the integer to a type supported by FLD. We do this because there
1052 // are no unsigned FLD instructions, so we must promote an unsigned value to
1053 // a larger signed value, then use FLD on the larger value.
1055 MVT::ValueType PromoteType = MVT::Other;
1056 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
1057 unsigned PromoteOpcode = 0;
1058 unsigned RealDestReg = Result;
1062 // We don't have the facilities for directly loading byte sized data from
1063 // memory (even signed). Promote it to 16 bits.
1064 PromoteType = MVT::i16;
1065 PromoteOpcode = Node->getOpcode() == ISD::SINT_TO_FP ?
1066 X86::MOVSX16rr8 : X86::MOVZX16rr8;
1069 if (Node->getOpcode() == ISD::UINT_TO_FP) {
1070 PromoteType = MVT::i32;
1071 PromoteOpcode = X86::MOVZX32rr16;
1075 // Don't fild into the real destination.
1076 if (Node->getOpcode() == ISD::UINT_TO_FP)
1077 Result = MakeReg(Node->getValueType(0));
1081 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1083 if (PromoteType != MVT::Other) {
1084 Tmp2 = MakeReg(PromoteType);
1085 BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
1086 SrcTy = PromoteType;
1090 // Spill the integer to memory and reload it from there.
1091 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
1092 MachineFunction *F = BB->getParent();
1093 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1097 // FIXME: this won't work for cast [u]long to FP
1098 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1099 FrameIdx).addReg(Tmp1);
1100 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1101 FrameIdx, 4).addReg(Tmp1+1);
1102 addFrameReference(BuildMI(BB, X86::FILD64m, 5, Result), FrameIdx);
1105 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1106 FrameIdx).addReg(Tmp1);
1107 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
1110 addFrameReference(BuildMI(BB, X86::MOV16mr, 5),
1111 FrameIdx).addReg(Tmp1);
1112 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
1114 default: break; // No promotion required.
1117 if (Node->getOpcode() == ISD::UINT_TO_FP && SrcTy == MVT::i32) {
1118 // If this is a cast from uint -> double, we need to be careful when if
1119 // the "sign" bit is set. If so, we don't want to make a negative number,
1120 // we want to make a positive number. Emit code to add an offset if the
1123 // Compute whether the sign bit is set by shifting the reg right 31 bits.
1124 unsigned IsNeg = MakeReg(MVT::i32);
1125 BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(Tmp1).addImm(31);
1127 // Create a CP value that has the offset in one word and 0 in the other.
1128 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
1129 0x4f80000000000000ULL);
1130 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
1131 BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(Result)
1132 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
1134 } else if (Node->getOpcode() == ISD::UINT_TO_FP && SrcTy == MVT::i64) {
1135 // We need special handling for unsigned 64-bit integer sources. If the
1136 // input number has the "sign bit" set, then we loaded it incorrectly as a
1137 // negative 64-bit number. In this case, add an offset value.
1139 // Emit a test instruction to see if the dynamic input value was signed.
1140 BuildMI(BB, X86::TEST32rr, 2).addReg(Tmp1+1).addReg(Tmp1+1);
1142 // If the sign bit is set, get a pointer to an offset, otherwise get a
1143 // pointer to a zero.
1144 MachineConstantPool *CP = F->getConstantPool();
1145 unsigned Zero = MakeReg(MVT::i32);
1146 Constant *Null = Constant::getNullValue(Type::UIntTy);
1147 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Zero),
1148 CP->getConstantPoolIndex(Null));
1149 unsigned Offset = MakeReg(MVT::i32);
1150 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
1152 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Offset),
1153 CP->getConstantPoolIndex(OffsetCst));
1154 unsigned Addr = MakeReg(MVT::i32);
1155 BuildMI(BB, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
1157 // Load the constant for an add. FIXME: this could make an 'fadd' that
1158 // reads directly from memory, but we don't support these yet.
1159 unsigned ConstReg = MakeReg(MVT::f64);
1160 addDirectMem(BuildMI(BB, X86::FLD32m, 4, ConstReg), Addr);
1162 BuildMI(BB, X86::FpADD, 2, RealDestReg).addReg(ConstReg).addReg(Result);
1166 case ISD::FP_TO_SINT:
1167 case ISD::FP_TO_UINT: {
1168 // FIXME: Most of this grunt work should be done by legalize!
1169 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1171 // Change the floating point control register to use "round towards zero"
1172 // mode when truncating to an integer value.
1174 MachineFunction *F = BB->getParent();
1175 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1176 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1178 // Load the old value of the high byte of the control word...
1179 unsigned HighPartOfCW = MakeReg(MVT::i8);
1180 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, HighPartOfCW),
1183 // Set the high part to be round to zero...
1184 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1185 CWFrameIdx, 1).addImm(12);
1187 // Reload the modified control word now...
1188 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1190 // Restore the memory image of control word to original value
1191 addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
1192 CWFrameIdx, 1).addReg(HighPartOfCW);
1194 // We don't have the facilities for directly storing byte sized data to
1195 // memory. Promote it to 16 bits. We also must promote unsigned values to
1196 // larger classes because we only have signed FP stores.
1197 MVT::ValueType StoreClass = Node->getValueType(0);
1198 if (StoreClass == MVT::i8 || Node->getOpcode() == ISD::FP_TO_UINT)
1199 switch (StoreClass) {
1200 case MVT::i8: StoreClass = MVT::i16; break;
1201 case MVT::i16: StoreClass = MVT::i32; break;
1202 case MVT::i32: StoreClass = MVT::i64; break;
1203 // The following treatment of cLong may not be perfectly right,
1204 // but it survives chains of casts of the form
1205 // double->ulong->double.
1206 case MVT::i64: StoreClass = MVT::i64; break;
1207 default: assert(0 && "Unknown store class!");
1210 // Spill the integer to memory and reload it from there.
1211 unsigned Size = MVT::getSizeInBits(StoreClass)/8;
1212 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1214 switch (StoreClass) {
1215 default: assert(0 && "Unknown store class!");
1217 addFrameReference(BuildMI(BB, X86::FIST16m, 5), FrameIdx).addReg(Tmp1);
1220 addFrameReference(BuildMI(BB, X86::FIST32m, 5), FrameIdx).addReg(Tmp1);
1223 addFrameReference(BuildMI(BB, X86::FISTP64m, 5), FrameIdx).addReg(Tmp1);
1227 switch (Node->getValueType(0)) {
1229 assert(0 && "Unknown integer type!");
1231 // FIXME: this isn't gunna work.
1232 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1233 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result+1), FrameIdx, 4);
1235 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1238 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Result), FrameIdx);
1241 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Result), FrameIdx);
1245 // Reload the original control word now.
1246 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1250 // See if we can codegen this as an LEA to fold operations together.
1251 if (N.getValueType() == MVT::i32) {
1253 if (!SelectAddress(N.getOperand(0), AM) &&
1254 !SelectAddress(N.getOperand(1), AM)) {
1255 // If this is not just an add, emit the LEA. For a simple add (like
1256 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
1257 // leave this as LEA, then peephole it to 'ADD' after two address elim
1259 if (AM.Scale != 1 || AM.BaseType == X86AddressMode::FrameIndexBase ||
1260 AM.GV || (AM.Base.Reg && AM.IndexReg && AM.Disp)) {
1261 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
1267 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1269 if (CN->getValue() == 1) { // add X, 1 -> inc X
1270 switch (N.getValueType()) {
1271 default: assert(0 && "Cannot integer add this type!");
1272 case MVT::i8: Opc = X86::INC8r; break;
1273 case MVT::i16: Opc = X86::INC16r; break;
1274 case MVT::i32: Opc = X86::INC32r; break;
1276 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
1277 switch (N.getValueType()) {
1278 default: assert(0 && "Cannot integer add this type!");
1279 case MVT::i8: Opc = X86::DEC8r; break;
1280 case MVT::i16: Opc = X86::DEC16r; break;
1281 case MVT::i32: Opc = X86::DEC32r; break;
1286 Tmp1 = SelectExpr(N.getOperand(0));
1287 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1291 switch (N.getValueType()) {
1292 default: assert(0 && "Cannot add this type!");
1293 case MVT::i8: Opc = X86::ADD8ri; break;
1294 case MVT::i16: Opc = X86::ADD16ri; break;
1295 case MVT::i32: Opc = X86::ADD32ri; break;
1298 Tmp1 = SelectExpr(N.getOperand(0));
1299 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1304 switch (N.getValueType()) {
1305 default: assert(0 && "Cannot add this type!");
1306 case MVT::i8: Opc = X86::ADD8rr; break;
1307 case MVT::i16: Opc = X86::ADD16rr; break;
1308 case MVT::i32: Opc = X86::ADD32rr; break;
1310 case MVT::f64: Opc = X86::FpADD; break;
1313 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1314 Tmp1 = SelectExpr(N.getOperand(0));
1315 Tmp2 = SelectExpr(N.getOperand(1));
1317 Tmp2 = SelectExpr(N.getOperand(1));
1318 Tmp1 = SelectExpr(N.getOperand(0));
1321 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1324 if (MVT::isInteger(N.getValueType()))
1325 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
1326 if (CN->isNullValue()) { // 0 - N -> neg N
1327 switch (N.getValueType()) {
1328 default: assert(0 && "Cannot sub this type!");
1330 case MVT::i8: Opc = X86::NEG8r; break;
1331 case MVT::i16: Opc = X86::NEG16r; break;
1332 case MVT::i32: Opc = X86::NEG32r; break;
1334 Tmp1 = SelectExpr(N.getOperand(1));
1335 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1339 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1340 switch (N.getValueType()) {
1341 default: assert(0 && "Cannot sub this type!");
1343 case MVT::i8: Opc = X86::SUB8ri; break;
1344 case MVT::i16: Opc = X86::SUB16ri; break;
1345 case MVT::i32: Opc = X86::SUB32ri; break;
1347 Tmp1 = SelectExpr(N.getOperand(0));
1348 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1352 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1353 Tmp1 = SelectExpr(N.getOperand(0));
1354 Tmp2 = SelectExpr(N.getOperand(1));
1356 Tmp2 = SelectExpr(N.getOperand(1));
1357 Tmp1 = SelectExpr(N.getOperand(0));
1360 switch (N.getValueType()) {
1361 default: assert(0 && "Cannot add this type!");
1363 case MVT::i8: Opc = X86::SUB8rr; break;
1364 case MVT::i16: Opc = X86::SUB16rr; break;
1365 case MVT::i32: Opc = X86::SUB32rr; break;
1367 case MVT::f64: Opc = X86::FpSUB; break;
1369 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1373 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1374 switch (N.getValueType()) {
1375 default: assert(0 && "Cannot add this type!");
1377 case MVT::i8: Opc = X86::AND8ri; break;
1378 case MVT::i16: Opc = X86::AND16ri; break;
1379 case MVT::i32: Opc = X86::AND32ri; break;
1381 Tmp1 = SelectExpr(N.getOperand(0));
1382 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1386 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1387 Tmp1 = SelectExpr(N.getOperand(0));
1388 Tmp2 = SelectExpr(N.getOperand(1));
1390 Tmp2 = SelectExpr(N.getOperand(1));
1391 Tmp1 = SelectExpr(N.getOperand(0));
1394 switch (N.getValueType()) {
1395 default: assert(0 && "Cannot add this type!");
1397 case MVT::i8: Opc = X86::AND8rr; break;
1398 case MVT::i16: Opc = X86::AND16rr; break;
1399 case MVT::i32: Opc = X86::AND32rr; break;
1401 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1404 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1405 Tmp1 = SelectExpr(N.getOperand(0));
1406 switch (N.getValueType()) {
1407 default: assert(0 && "Cannot add this type!");
1409 case MVT::i8: Opc = X86::OR8ri; break;
1410 case MVT::i16: Opc = X86::OR16ri; break;
1411 case MVT::i32: Opc = X86::OR32ri; break;
1413 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1417 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1418 Tmp1 = SelectExpr(N.getOperand(0));
1419 Tmp2 = SelectExpr(N.getOperand(1));
1421 Tmp2 = SelectExpr(N.getOperand(1));
1422 Tmp1 = SelectExpr(N.getOperand(0));
1425 switch (N.getValueType()) {
1426 default: assert(0 && "Cannot add this type!");
1428 case MVT::i8: Opc = X86::OR8rr; break;
1429 case MVT::i16: Opc = X86::OR16rr; break;
1430 case MVT::i32: Opc = X86::OR32rr; break;
1432 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1435 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1436 Tmp1 = SelectExpr(N.getOperand(0));
1438 if (CN->isAllOnesValue()) {
1439 switch (N.getValueType()) {
1440 default: assert(0 && "Cannot add this type!");
1442 case MVT::i8: Opc = X86::NOT8r; break;
1443 case MVT::i16: Opc = X86::NOT16r; break;
1444 case MVT::i32: Opc = X86::NOT32r; break;
1446 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1450 switch (N.getValueType()) {
1451 default: assert(0 && "Cannot xor this type!");
1453 case MVT::i8: Opc = X86::XOR8ri; break;
1454 case MVT::i16: Opc = X86::XOR16ri; break;
1455 case MVT::i32: Opc = X86::XOR32ri; break;
1457 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1461 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1462 Tmp1 = SelectExpr(N.getOperand(0));
1463 Tmp2 = SelectExpr(N.getOperand(1));
1465 Tmp2 = SelectExpr(N.getOperand(1));
1466 Tmp1 = SelectExpr(N.getOperand(0));
1469 switch (N.getValueType()) {
1470 default: assert(0 && "Cannot add this type!");
1472 case MVT::i8: Opc = X86::XOR8rr; break;
1473 case MVT::i16: Opc = X86::XOR16rr; break;
1474 case MVT::i32: Opc = X86::XOR32rr; break;
1476 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1480 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1482 switch (N.getValueType()) {
1483 default: assert(0 && "Cannot multiply this type!");
1484 case MVT::i8: break;
1485 case MVT::i16: Opc = X86::IMUL16rri; break;
1486 case MVT::i32: Opc = X86::IMUL32rri; break;
1489 Tmp1 = SelectExpr(N.getOperand(0));
1490 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1495 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1496 Tmp1 = SelectExpr(N.getOperand(0));
1497 Tmp2 = SelectExpr(N.getOperand(1));
1499 Tmp2 = SelectExpr(N.getOperand(1));
1500 Tmp1 = SelectExpr(N.getOperand(0));
1502 switch (N.getValueType()) {
1503 default: assert(0 && "Cannot add this type!");
1505 // Must use the MUL instruction, which forces use of AL.
1506 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1507 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
1508 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1510 case MVT::i16: Opc = X86::IMUL16rr; break;
1511 case MVT::i32: Opc = X86::IMUL32rr; break;
1513 case MVT::f64: Opc = X86::FpMUL; break;
1515 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1519 if (N.getValueType() != MVT::i1 && N.getValueType() != MVT::i8) {
1520 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1521 Tmp2 = SelectExpr(N.getOperand(1));
1522 Tmp3 = SelectExpr(N.getOperand(2));
1524 Tmp3 = SelectExpr(N.getOperand(2));
1525 Tmp2 = SelectExpr(N.getOperand(1));
1527 EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result);
1530 // FIXME: This should not be implemented here, it should be in the generic
1532 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1533 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1535 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1538 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1540 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1543 unsigned TmpReg = MakeReg(MVT::i16);
1544 EmitSelectCC(N.getOperand(0), MVT::i16, Tmp2, Tmp3, TmpReg);
1545 // FIXME: need subregs to do better than this!
1546 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(TmpReg);
1547 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1555 if (N.getOpcode() == ISD::SDIV)
1556 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1557 // FIXME: These special cases should be handled by the lowering impl!
1558 unsigned RHS = CN->getValue();
1564 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
1565 unsigned Log = log2(RHS);
1566 unsigned TmpReg = MakeReg(N.getValueType());
1567 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
1568 switch (N.getValueType()) {
1569 default: assert("Unknown type to signed divide!");
1571 SAROpc = X86::SAR8ri;
1572 SHROpc = X86::SHR8ri;
1573 ADDOpc = X86::ADD8rr;
1574 NEGOpc = X86::NEG8r;
1577 SAROpc = X86::SAR16ri;
1578 SHROpc = X86::SHR16ri;
1579 ADDOpc = X86::ADD16rr;
1580 NEGOpc = X86::NEG16r;
1583 SAROpc = X86::SAR32ri;
1584 SHROpc = X86::SHR32ri;
1585 ADDOpc = X86::ADD32rr;
1586 NEGOpc = X86::NEG32r;
1589 Tmp1 = SelectExpr(N.getOperand(0));
1590 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
1591 unsigned TmpReg2 = MakeReg(N.getValueType());
1592 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(32-Log);
1593 unsigned TmpReg3 = MakeReg(N.getValueType());
1594 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
1596 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
1597 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
1599 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
1604 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1605 Tmp1 = SelectExpr(N.getOperand(0));
1606 Tmp2 = SelectExpr(N.getOperand(1));
1608 Tmp2 = SelectExpr(N.getOperand(1));
1609 Tmp1 = SelectExpr(N.getOperand(0));
1612 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
1613 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
1614 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
1615 switch (N.getValueType()) {
1616 default: assert(0 && "Cannot sdiv this type!");
1618 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
1621 MovOpcode = X86::MOV8rr;
1622 ClrOpcode = X86::MOV8ri;
1623 SExtOpcode = X86::CBW;
1626 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
1629 MovOpcode = X86::MOV16rr;
1630 ClrOpcode = X86::MOV16ri;
1631 SExtOpcode = X86::CWD;
1634 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
1637 MovOpcode = X86::MOV32rr;
1638 ClrOpcode = X86::MOV32ri;
1639 SExtOpcode = X86::CDQ;
1641 case MVT::i64: assert(0 && "FIXME: implement i64 DIV/REM libcalls!");
1644 if (N.getOpcode() == ISD::SDIV)
1645 BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2);
1647 assert(0 && "FIXME: Emit frem libcall to fmod!");
1651 // Set up the low part.
1652 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
1655 // Sign extend the low part into the high part.
1656 BuildMI(BB, SExtOpcode, 0);
1658 // Zero out the high part, effectively zero extending the input.
1659 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
1662 // Emit the DIV/IDIV instruction.
1663 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
1665 // Get the result of the divide or rem.
1666 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
1671 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1672 switch (N.getValueType()) {
1673 default: assert(0 && "Cannot shift this type!");
1674 case MVT::i8: Opc = X86::SHL8ri; break;
1675 case MVT::i16: Opc = X86::SHL16ri; break;
1676 case MVT::i32: Opc = X86::SHL32ri; break;
1678 Tmp1 = SelectExpr(N.getOperand(0));
1679 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1683 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1684 Tmp1 = SelectExpr(N.getOperand(0));
1685 Tmp2 = SelectExpr(N.getOperand(1));
1687 Tmp2 = SelectExpr(N.getOperand(1));
1688 Tmp1 = SelectExpr(N.getOperand(0));
1691 switch (N.getValueType()) {
1692 default: assert(0 && "Cannot shift this type!");
1693 case MVT::i8 : Opc = X86::SHL8rCL; break;
1694 case MVT::i16: Opc = X86::SHL16rCL; break;
1695 case MVT::i32: Opc = X86::SHL32rCL; break;
1697 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1698 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1701 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1702 switch (N.getValueType()) {
1703 default: assert(0 && "Cannot shift this type!");
1704 case MVT::i8: Opc = X86::SHR8ri; break;
1705 case MVT::i16: Opc = X86::SHR16ri; break;
1706 case MVT::i32: Opc = X86::SHR32ri; break;
1708 Tmp1 = SelectExpr(N.getOperand(0));
1709 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1713 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1714 Tmp1 = SelectExpr(N.getOperand(0));
1715 Tmp2 = SelectExpr(N.getOperand(1));
1717 Tmp2 = SelectExpr(N.getOperand(1));
1718 Tmp1 = SelectExpr(N.getOperand(0));
1721 switch (N.getValueType()) {
1722 default: assert(0 && "Cannot shift this type!");
1723 case MVT::i8 : Opc = X86::SHR8rCL; break;
1724 case MVT::i16: Opc = X86::SHR16rCL; break;
1725 case MVT::i32: Opc = X86::SHR32rCL; break;
1727 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1728 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1731 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1732 switch (N.getValueType()) {
1733 default: assert(0 && "Cannot shift this type!");
1734 case MVT::i8: Opc = X86::SAR8ri; break;
1735 case MVT::i16: Opc = X86::SAR16ri; break;
1736 case MVT::i32: Opc = X86::SAR32ri; break;
1738 Tmp1 = SelectExpr(N.getOperand(0));
1739 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1743 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1744 Tmp1 = SelectExpr(N.getOperand(0));
1745 Tmp2 = SelectExpr(N.getOperand(1));
1747 Tmp2 = SelectExpr(N.getOperand(1));
1748 Tmp1 = SelectExpr(N.getOperand(0));
1751 switch (N.getValueType()) {
1752 default: assert(0 && "Cannot shift this type!");
1753 case MVT::i8 : Opc = X86::SAR8rCL; break;
1754 case MVT::i16: Opc = X86::SAR16rCL; break;
1755 case MVT::i32: Opc = X86::SAR32rCL; break;
1757 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1758 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1762 EmitCMP(N.getOperand(0), N.getOperand(1));
1763 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
1764 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
1767 // The chain for this load is now lowered.
1768 LoweredTokens.insert(SDOperand(Node, 1));
1770 // Make sure we generate both values.
1772 ExprMap[N.getValue(1)] = 1; // Generate the token
1774 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1776 switch (Node->getValueType(0)) {
1777 default: assert(0 && "Cannot load this type!");
1779 case MVT::i8: Opc = X86::MOV8rm; break;
1780 case MVT::i16: Opc = X86::MOV16rm; break;
1781 case MVT::i32: Opc = X86::MOV32rm; break;
1782 case MVT::f32: Opc = X86::FLD32m; ContainsFPCode = true; break;
1783 case MVT::f64: Opc = X86::FLD64m; ContainsFPCode = true; break;
1786 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
1787 Select(N.getOperand(0));
1788 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
1791 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1792 Select(N.getOperand(0));
1793 SelectAddress(N.getOperand(1), AM);
1795 SelectAddress(N.getOperand(1), AM);
1796 Select(N.getOperand(0));
1798 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
1802 case ISD::DYNAMIC_STACKALLOC:
1803 // Generate both result values.
1805 ExprMap[N.getValue(1)] = 1; // Generate the token
1807 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1809 // FIXME: We are currently ignoring the requested alignment for handling
1810 // greater than the stack alignment. This will need to be revisited at some
1811 // point. Align = N.getOperand(2);
1813 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
1814 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
1815 std::cerr << "Cannot allocate stack object with greater alignment than"
1816 << " the stack alignment yet!";
1820 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1821 Select(N.getOperand(0));
1822 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
1823 .addImm(CN->getValue());
1825 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1826 Select(N.getOperand(0));
1827 Tmp1 = SelectExpr(N.getOperand(1));
1829 Tmp1 = SelectExpr(N.getOperand(1));
1830 Select(N.getOperand(0));
1833 // Subtract size from stack pointer, thereby allocating some space.
1834 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
1837 // Put a pointer to the space into the result register, by copying the stack
1839 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
1843 // The chain for this call is now lowered.
1844 LoweredTokens.insert(N.getValue(Node->getNumValues()-1));
1846 if (GlobalAddressSDNode *GASD =
1847 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
1848 Select(N.getOperand(0));
1849 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
1850 } else if (ExternalSymbolSDNode *ESSDN =
1851 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
1852 Select(N.getOperand(0));
1853 BuildMI(BB, X86::CALLpcrel32,
1854 1).addExternalSymbol(ESSDN->getSymbol(), true);
1856 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1857 Select(N.getOperand(0));
1858 Tmp1 = SelectExpr(N.getOperand(1));
1860 Tmp1 = SelectExpr(N.getOperand(1));
1861 Select(N.getOperand(0));
1864 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
1866 switch (Node->getValueType(0)) {
1867 default: assert(0 && "Unknown value type for call result!");
1868 case MVT::Other: return 1;
1871 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1874 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
1877 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
1878 if (Node->getValueType(1) == MVT::i32)
1879 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
1882 case MVT::f64: // Floating-point return values live in %ST(0)
1883 ContainsFPCode = true;
1884 BuildMI(BB, X86::FpGETRESULT, 1, Result);
1887 return Result+N.ResNo;
1893 void ISel::Select(SDOperand N) {
1894 unsigned Tmp1, Tmp2, Opc;
1896 // FIXME: Disable for our current expansion model!
1897 if (/*!N->hasOneUse() &&*/ !LoweredTokens.insert(N).second)
1898 return; // Already selected.
1900 SDNode *Node = N.Val;
1902 switch (Node->getOpcode()) {
1904 Node->dump(); std::cerr << "\n";
1905 assert(0 && "Node not handled yet!");
1906 case ISD::EntryToken: return; // Noop
1907 case ISD::CopyToReg:
1908 Select(N.getOperand(0));
1909 Tmp1 = SelectExpr(N.getOperand(1));
1910 Tmp2 = cast<CopyRegSDNode>(N)->getReg();
1913 switch (N.getOperand(1).getValueType()) {
1914 default: assert(0 && "Invalid type for operation!");
1916 case MVT::i8: Opc = X86::MOV8rr; break;
1917 case MVT::i16: Opc = X86::MOV16rr; break;
1918 case MVT::i32: Opc = X86::MOV32rr; break;
1920 case MVT::f64: Opc = X86::FpMOV; ContainsFPCode = true; break;
1922 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1926 switch (N.getNumOperands()) {
1928 assert(0 && "Unknown return instruction!");
1930 assert(N.getOperand(1).getValueType() == MVT::i32 &&
1931 N.getOperand(2).getValueType() == MVT::i32 &&
1932 "Unknown two-register value!");
1933 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1934 Tmp1 = SelectExpr(N.getOperand(1));
1935 Tmp2 = SelectExpr(N.getOperand(2));
1937 Tmp2 = SelectExpr(N.getOperand(2));
1938 Tmp1 = SelectExpr(N.getOperand(1));
1940 Select(N.getOperand(0));
1942 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
1943 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
1944 // Declare that EAX & EDX are live on exit.
1945 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
1949 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1950 Select(N.getOperand(0));
1951 Tmp1 = SelectExpr(N.getOperand(1));
1953 Tmp1 = SelectExpr(N.getOperand(1));
1954 Select(N.getOperand(0));
1956 switch (N.getOperand(1).getValueType()) {
1957 default: assert(0 && "All other types should have been promoted!!");
1959 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
1960 // Declare that top-of-stack is live on exit
1961 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
1964 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
1965 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
1970 Select(N.getOperand(0));
1973 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
1976 Select(N.getOperand(0));
1977 MachineBasicBlock *Dest =
1978 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
1979 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
1984 MachineBasicBlock *Dest =
1985 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
1987 // Try to fold a setcc into the branch. If this fails, emit a test/jne
1989 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
1990 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1991 Select(N.getOperand(0));
1992 Tmp1 = SelectExpr(N.getOperand(1));
1994 Tmp1 = SelectExpr(N.getOperand(1));
1995 Select(N.getOperand(0));
1997 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
1998 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
2005 case ISD::DYNAMIC_STACKALLOC:
2009 // Select the address.
2012 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2014 switch (CN->getValueType(0)) {
2015 default: assert(0 && "Invalid type for operation!");
2017 case MVT::i8: Opc = X86::MOV8mi; break;
2018 case MVT::i16: Opc = X86::MOV16mi; break;
2019 case MVT::i32: Opc = X86::MOV32mi; break;
2021 case MVT::f64: break;
2024 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
2025 Select(N.getOperand(0));
2026 SelectAddress(N.getOperand(2), AM);
2028 SelectAddress(N.getOperand(2), AM);
2029 Select(N.getOperand(0));
2031 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
2035 switch (N.getOperand(1).getValueType()) {
2036 default: assert(0 && "Cannot store this type!");
2038 case MVT::i8: Opc = X86::MOV8mr; break;
2039 case MVT::i16: Opc = X86::MOV16mr; break;
2040 case MVT::i32: Opc = X86::MOV32mr; break;
2041 case MVT::f32: Opc = X86::FST32m; break;
2042 case MVT::f64: Opc = X86::FST64m; break;
2045 std::vector<std::pair<unsigned, unsigned> > RP;
2046 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
2047 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
2048 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
2049 std::sort(RP.begin(), RP.end());
2051 for (unsigned i = 0; i != 3; ++i)
2052 switch (RP[2-i].second) {
2053 default: assert(0 && "Unknown operand number!");
2054 case 0: Select(N.getOperand(0)); break;
2055 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
2056 case 2: SelectAddress(N.getOperand(2), AM); break;
2059 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
2062 case ISD::ADJCALLSTACKDOWN:
2063 case ISD::ADJCALLSTACKUP:
2064 Select(N.getOperand(0));
2065 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
2067 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? X86::ADJCALLSTACKDOWN :
2068 X86::ADJCALLSTACKUP;
2069 BuildMI(BB, Opc, 1).addImm(Tmp1);
2072 Select(N.getOperand(0)); // Select the chain.
2074 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2075 if (Align == 0) Align = 1;
2077 // Turn the byte code into # iterations
2080 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
2081 unsigned Val = ValC->getValue() & 255;
2083 // If the value is a constant, then we can potentially use larger sets.
2084 switch (Align & 3) {
2085 case 2: // WORD aligned
2086 CountReg = MakeReg(MVT::i32);
2087 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2088 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2090 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2091 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2093 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
2094 Opcode = X86::REP_STOSW;
2096 case 0: // DWORD aligned
2097 CountReg = MakeReg(MVT::i32);
2098 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2099 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2101 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2102 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2104 Val = (Val << 8) | Val;
2105 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
2106 Opcode = X86::REP_STOSD;
2108 default: // BYTE aligned
2109 CountReg = SelectExpr(Node->getOperand(3));
2110 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
2111 Opcode = X86::REP_STOSB;
2115 // If it's not a constant value we are storing, just fall back. We could
2116 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
2117 unsigned ValReg = SelectExpr(Node->getOperand(2));
2118 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
2119 CountReg = SelectExpr(Node->getOperand(3));
2120 Opcode = X86::REP_STOSB;
2123 // No matter what the alignment is, we put the source in ESI, the
2124 // destination in EDI, and the count in ECX.
2125 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2126 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2127 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2128 BuildMI(BB, Opcode, 0);
2132 Select(N.getOperand(0)); // Select the chain.
2134 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2135 if (Align == 0) Align = 1;
2137 // Turn the byte code into # iterations
2140 switch (Align & 3) {
2141 case 2: // WORD aligned
2142 CountReg = MakeReg(MVT::i32);
2143 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2144 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2146 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2147 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2149 Opcode = X86::REP_MOVSW;
2151 case 0: // DWORD aligned
2152 CountReg = MakeReg(MVT::i32);
2153 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2154 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2156 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2157 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2159 Opcode = X86::REP_MOVSD;
2161 default: // BYTE aligned
2162 CountReg = SelectExpr(Node->getOperand(3));
2163 Opcode = X86::REP_MOVSB;
2167 // No matter what the alignment is, we put the source in ESI, the
2168 // destination in EDI, and the count in ECX.
2169 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2170 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
2171 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2172 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2173 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
2174 BuildMI(BB, Opcode, 0);
2177 assert(0 && "Should not be reached!");
2181 /// createX86PatternInstructionSelector - This pass converts an LLVM function
2182 /// into a machine code representation using pattern matching and a machine
2183 /// description file.
2185 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
2186 return new ISel(TM);