1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "llvm/Constants.h" // FIXME: REMOVE
18 #include "llvm/Function.h"
19 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/CodeGen/SSARegMap.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetLowering.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/ADT/Statistic.h"
33 //===----------------------------------------------------------------------===//
34 // X86TargetLowering - X86 Implementation of the TargetLowering interface
36 class X86TargetLowering : public TargetLowering {
37 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
38 int ReturnAddrIndex; // FrameIndex for return slot.
40 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
41 // Set up the TargetLowering object.
42 addRegisterClass(MVT::i8, X86::R8RegisterClass);
43 addRegisterClass(MVT::i16, X86::R16RegisterClass);
44 addRegisterClass(MVT::i32, X86::R32RegisterClass);
45 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
47 // FIXME: Eliminate these two classes when legalize can handle promotions
49 addRegisterClass(MVT::i1, X86::R8RegisterClass);
50 addRegisterClass(MVT::f32, X86::RFPRegisterClass);
52 computeRegisterProperties();
54 setOperationUnsupported(ISD::MEMMOVE, MVT::Other);
56 setOperationUnsupported(ISD::MUL, MVT::i8);
57 setOperationUnsupported(ISD::SELECT, MVT::i1);
58 setOperationUnsupported(ISD::SELECT, MVT::i8);
60 addLegalFPImmediate(+0.0); // FLD0
61 addLegalFPImmediate(+1.0); // FLD1
62 addLegalFPImmediate(-0.0); // FLD0/FCHS
63 addLegalFPImmediate(-1.0); // FLD1/FCHS
66 /// LowerArguments - This hook must be implemented to indicate how we should
67 /// lower the arguments for the specified function, into the specified DAG.
68 virtual std::vector<SDOperand>
69 LowerArguments(Function &F, SelectionDAG &DAG);
71 /// LowerCallTo - This hook lowers an abstract call to a function into an
73 virtual std::pair<SDOperand, SDOperand>
74 LowerCallTo(SDOperand Chain, const Type *RetTy, SDOperand Callee,
75 ArgListTy &Args, SelectionDAG &DAG);
77 virtual std::pair<SDOperand, SDOperand>
78 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
80 virtual std::pair<SDOperand,SDOperand>
81 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
82 const Type *ArgTy, SelectionDAG &DAG);
84 virtual std::pair<SDOperand, SDOperand>
85 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
91 std::vector<SDOperand>
92 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
93 std::vector<SDOperand> ArgValues;
95 // Add DAG nodes to load the arguments... On entry to a function on the X86,
96 // the stack frame looks like this:
98 // [ESP] -- return address
99 // [ESP + 4] -- first argument (leftmost lexically)
100 // [ESP + 8] -- second argument, if first argument is four bytes in size
103 MachineFunction &MF = DAG.getMachineFunction();
104 MachineFrameInfo *MFI = MF.getFrameInfo();
106 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
107 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I) {
108 MVT::ValueType ObjectVT = getValueType(I->getType());
109 unsigned ArgIncrement = 4;
112 default: assert(0 && "Unhandled argument type!");
114 case MVT::i8: ObjSize = 1; break;
115 case MVT::i16: ObjSize = 2; break;
116 case MVT::i32: ObjSize = 4; break;
117 case MVT::i64: ObjSize = ArgIncrement = 8; break;
118 case MVT::f32: ObjSize = 4; break;
119 case MVT::f64: ObjSize = ArgIncrement = 8; break;
121 // Create the frame index object for this incoming parameter...
122 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
124 // Create the SelectionDAG nodes corresponding to a load from this parameter
125 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
127 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
131 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN);
133 if (MVT::isInteger(ObjectVT))
134 ArgValue = DAG.getConstant(0, ObjectVT);
136 ArgValue = DAG.getConstantFP(0, ObjectVT);
138 ArgValues.push_back(ArgValue);
140 ArgOffset += ArgIncrement; // Move on to the next argument...
143 // If the function takes variable number of arguments, make a frame index for
144 // the start of the first vararg value... for expansion of llvm.va_start.
146 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
147 ReturnAddrIndex = 0; // No return address slot generated yet.
151 std::pair<SDOperand, SDOperand>
152 X86TargetLowering::LowerCallTo(SDOperand Chain,
153 const Type *RetTy, SDOperand Callee,
154 ArgListTy &Args, SelectionDAG &DAG) {
155 // Count how many bytes are to be pushed on the stack.
156 unsigned NumBytes = 0;
160 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
161 DAG.getConstant(0, getPointerTy()));
163 for (unsigned i = 0, e = Args.size(); i != e; ++i)
164 switch (getValueType(Args[i].second)) {
165 default: assert(0 && "Unknown value type!");
179 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
180 DAG.getConstant(NumBytes, getPointerTy()));
182 // Arguments go on the stack in reverse order, as specified by the ABI.
183 unsigned ArgOffset = 0;
184 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32);
185 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
187 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
188 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
190 switch (getValueType(Args[i].second)) {
191 default: assert(0 && "Unexpected ValueType for argument!");
195 // Promote the integer to 32 bits. If the input type is signed use a
196 // sign extend, otherwise use a zero extend.
197 if (Args[i].second->isSigned())
198 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
200 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
205 // FIXME: Note that all of these stores are independent of each other.
206 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
207 Args[i].first, PtrOff);
212 // FIXME: Note that all of these stores are independent of each other.
213 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
214 Args[i].first, PtrOff);
221 std::vector<MVT::ValueType> RetVals;
222 MVT::ValueType RetTyVT = getValueType(RetTy);
223 if (RetTyVT != MVT::isVoid)
224 RetVals.push_back(RetTyVT);
225 RetVals.push_back(MVT::Other);
227 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain, Callee), 0);
228 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
229 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
230 DAG.getConstant(NumBytes, getPointerTy()));
231 return std::make_pair(TheCall, Chain);
234 std::pair<SDOperand, SDOperand>
235 X86TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
236 // vastart just returns the address of the VarArgsFrameIndex slot.
237 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32), Chain);
240 std::pair<SDOperand,SDOperand> X86TargetLowering::
241 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
242 const Type *ArgTy, SelectionDAG &DAG) {
243 MVT::ValueType ArgVT = getValueType(ArgTy);
246 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList);
249 if (ArgVT == MVT::i32)
252 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
253 "Other types should have been promoted for varargs!");
256 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
257 DAG.getConstant(Amt, VAList.getValueType()));
259 return std::make_pair(Result, Chain);
263 std::pair<SDOperand, SDOperand> X86TargetLowering::
264 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
267 if (Depth) // Depths > 0 not supported yet!
268 Result = DAG.getConstant(0, getPointerTy());
270 if (ReturnAddrIndex == 0) {
271 // Set up a frame object for the return address.
272 MachineFunction &MF = DAG.getMachineFunction();
273 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
276 SDOperand RetAddrFI = DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
279 // Just load the return address
280 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI);
282 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
283 DAG.getConstant(4, MVT::i32));
285 return std::make_pair(Result, Chain);
294 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
296 //===--------------------------------------------------------------------===//
297 /// ISel - X86 specific code to select X86 machine instructions for
298 /// SelectionDAG operations.
300 class ISel : public SelectionDAGISel {
301 /// ContainsFPCode - Every instruction we select that uses or defines a FP
302 /// register should set this to true.
305 /// X86Lowering - This object fully describes how to lower LLVM code to an
306 /// X86-specific SelectionDAG.
307 X86TargetLowering X86Lowering;
309 /// RegPressureMap - This keeps an approximate count of the number of
310 /// registers required to evaluate each node in the graph.
311 std::map<SDNode*, unsigned> RegPressureMap;
313 /// ExprMap - As shared expressions are codegen'd, we keep track of which
314 /// vreg the value is produced in, so we only emit one copy of each compiled
316 std::map<SDOperand, unsigned> ExprMap;
317 std::set<SDOperand> LoweredTokens;
320 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
323 unsigned getRegPressure(SDOperand O) {
324 return RegPressureMap[O.Val];
326 unsigned ComputeRegPressure(SDOperand O);
328 /// InstructionSelectBasicBlock - This callback is invoked by
329 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
330 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
332 bool isFoldableLoad(SDOperand Op);
333 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
336 void EmitCMP(SDOperand LHS, SDOperand RHS);
337 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
338 void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
339 unsigned RTrue, unsigned RFalse, unsigned RDest);
340 unsigned SelectExpr(SDOperand N);
341 bool SelectAddress(SDOperand N, X86AddressMode &AM);
342 void Select(SDOperand N);
346 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
347 /// when it has created a SelectionDAG for us to codegen.
348 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
349 // While we're doing this, keep track of whether we see any FP code for
350 // FP_REG_KILL insertion.
351 ContainsFPCode = false;
353 // Scan the PHI nodes that already are inserted into this basic block. If any
354 // of them is a PHI of a floating point value, we need to insert an
356 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
357 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
359 assert(I->getOpcode() == X86::PHI &&
360 "Isn't just PHI nodes?");
361 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
362 X86::RFPRegisterClass) {
363 ContainsFPCode = true;
368 // Compute the RegPressureMap, which is an approximation for the number of
369 // registers required to compute each node.
370 ComputeRegPressure(DAG.getRoot());
372 // Codegen the basic block.
373 Select(DAG.getRoot());
375 // Finally, look at all of the successors of this block. If any contain a PHI
376 // node of FP type, we need to insert an FP_REG_KILL in this block.
377 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
378 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
379 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
380 I != E && I->getOpcode() == X86::PHI; ++I) {
381 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
382 X86::RFPRegisterClass) {
383 ContainsFPCode = true;
388 // Insert FP_REG_KILL instructions into basic blocks that need them. This
389 // only occurs due to the floating point stackifier not being aggressive
390 // enough to handle arbitrary global stackification.
392 // Currently we insert an FP_REG_KILL instruction into each block that uses or
393 // defines a floating point virtual register.
395 // When the global register allocators (like linear scan) finally update live
396 // variable analysis, we can keep floating point values in registers across
397 // basic blocks. This will be a huge win, but we are waiting on the global
398 // allocators before we can do this.
400 if (ContainsFPCode && BB->succ_size()) {
401 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
405 // Clear state used for selection.
407 LoweredTokens.clear();
408 RegPressureMap.clear();
412 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
413 // for the number of registers required to compute each node. This is basically
414 // computing a generalized form of the Sethi-Ullman number for each node.
415 unsigned ISel::ComputeRegPressure(SDOperand O) {
417 unsigned &Result = RegPressureMap[N];
418 if (Result) return Result;
420 // FIXME: Should operations like CALL (which clobber lots o regs) have a
421 // higher fixed cost??
423 if (N->getNumOperands() == 0) {
426 unsigned MaxRegUse = 0;
427 unsigned NumExtraMaxRegUsers = 0;
428 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
430 if (N->getOperand(i).getOpcode() == ISD::Constant)
433 Regs = ComputeRegPressure(N->getOperand(i));
434 if (Regs > MaxRegUse) {
436 NumExtraMaxRegUsers = 0;
437 } else if (Regs == MaxRegUse &&
438 N->getOperand(i).getValueType() != MVT::Other) {
439 ++NumExtraMaxRegUsers;
443 Result = MaxRegUse+NumExtraMaxRegUsers;
446 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
450 /// SelectAddress - Add the specified node to the specified addressing mode,
451 /// returning true if it cannot be done.
452 bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
453 switch (N.getOpcode()) {
455 case ISD::FrameIndex:
456 if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0) {
457 AM.BaseType = X86AddressMode::FrameIndexBase;
458 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
462 case ISD::GlobalAddress:
464 AM.GV = cast<GlobalAddressSDNode>(N)->getGlobal();
469 AM.Disp += cast<ConstantSDNode>(N)->getValue();
472 // We might have folded the load into this shift, so don't regen the value
474 if (ExprMap.count(N)) break;
476 if (AM.IndexReg == 0 && AM.Scale == 1)
477 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
478 unsigned Val = CN->getValue();
479 if (Val == 1 || Val == 2 || Val == 3) {
481 SDOperand ShVal = N.Val->getOperand(0);
483 // Okay, we know that we have a scale by now. However, if the scaled
484 // value is an add of something and a constant, we can fold the
485 // constant into the disp field here.
486 if (ShVal.Val->getOpcode() == ISD::ADD && !ExprMap.count(ShVal) &&
487 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
488 AM.IndexReg = SelectExpr(ShVal.Val->getOperand(0));
489 ConstantSDNode *AddVal =
490 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
491 AM.Disp += AddVal->getValue() << Val;
493 AM.IndexReg = SelectExpr(ShVal);
500 // We might have folded the load into this mul, so don't regen the value if
502 if (ExprMap.count(N)) break;
504 // X*[3,5,9] -> X+X*[2,4,8]
505 if (AM.IndexReg == 0 && AM.BaseType == X86AddressMode::RegBase &&
507 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
508 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
509 AM.Scale = unsigned(CN->getValue())-1;
511 SDOperand MulVal = N.Val->getOperand(0);
514 // Okay, we know that we have a scale by now. However, if the scaled
515 // value is an add of something and a constant, we can fold the
516 // constant into the disp field here.
517 if (MulVal.Val->getOpcode() == ISD::ADD && !ExprMap.count(MulVal) &&
518 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
519 Reg = SelectExpr(MulVal.Val->getOperand(0));
520 ConstantSDNode *AddVal =
521 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
522 AM.Disp += AddVal->getValue() * CN->getValue();
524 Reg = SelectExpr(N.Val->getOperand(0));
527 AM.IndexReg = AM.Base.Reg = Reg;
533 // We might have folded the load into this mul, so don't regen the value if
535 if (ExprMap.count(N)) break;
537 X86AddressMode Backup = AM;
538 if (!SelectAddress(N.Val->getOperand(0), AM) &&
539 !SelectAddress(N.Val->getOperand(1), AM))
542 if (!SelectAddress(N.Val->getOperand(1), AM) &&
543 !SelectAddress(N.Val->getOperand(0), AM))
550 // Is the base register already occupied?
551 if (AM.BaseType != X86AddressMode::RegBase || AM.Base.Reg) {
552 // If so, check to see if the scale index register is set.
553 if (AM.IndexReg == 0) {
554 AM.IndexReg = SelectExpr(N);
559 // Otherwise, we cannot select it.
563 // Default, generate it as a register.
564 AM.BaseType = X86AddressMode::RegBase;
565 AM.Base.Reg = SelectExpr(N);
569 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
570 /// assuming that the temporary registers are in the 8-bit register class.
574 /// DestReg = logicalop Tmp1, Tmp2
576 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
577 unsigned SetCC2, unsigned LogicalOp,
579 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
580 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
581 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
582 BuildMI(BB, SetCC1, 0, Tmp1);
583 BuildMI(BB, SetCC2, 0, Tmp2);
584 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
587 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
588 /// condition codes match the specified SetCCOpcode. Note that some conditions
589 /// require multiple instructions to generate the correct value.
590 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
591 ISD::CondCode SetCCOpcode, bool isFP) {
594 switch (SetCCOpcode) {
595 default: assert(0 && "Illegal integer SetCC!");
596 case ISD::SETEQ: Opc = X86::SETEr; break;
597 case ISD::SETGT: Opc = X86::SETGr; break;
598 case ISD::SETGE: Opc = X86::SETGEr; break;
599 case ISD::SETLT: Opc = X86::SETLr; break;
600 case ISD::SETLE: Opc = X86::SETLEr; break;
601 case ISD::SETNE: Opc = X86::SETNEr; break;
602 case ISD::SETULT: Opc = X86::SETBr; break;
603 case ISD::SETUGT: Opc = X86::SETAr; break;
604 case ISD::SETULE: Opc = X86::SETBEr; break;
605 case ISD::SETUGE: Opc = X86::SETAEr; break;
608 // On a floating point condition, the flags are set as follows:
612 // 1 | 0 | 0 | X == Y
613 // 1 | 1 | 1 | unordered
615 switch (SetCCOpcode) {
616 default: assert(0 && "Invalid FP setcc!");
619 Opc = X86::SETEr; // True if ZF = 1
623 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
627 Opc = X86::SETAEr; // True if CF = 0
631 Opc = X86::SETBr; // True if CF = 1
635 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
639 Opc = X86::SETNEr; // True if ZF = 0
642 Opc = X86::SETPr; // True if PF = 1
645 Opc = X86::SETNPr; // True if PF = 0
647 case ISD::SETOEQ: // !PF & ZF
648 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
650 case ISD::SETOLT: // !PF & CF
651 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
653 case ISD::SETOLE: // !PF & (CF || ZF)
654 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
656 case ISD::SETUGT: // PF | (!ZF & !CF)
657 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
659 case ISD::SETUGE: // PF | !CF
660 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
662 case ISD::SETUNE: // PF | !ZF
663 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
667 BuildMI(BB, Opc, 0, DestReg);
671 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
672 /// the Dest block if the Cond condition is true. If we cannot fold this
673 /// condition into the branch, return true.
675 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
677 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
678 // B) using two conditional branches instead of one condbr, two setcc's, and
680 if ((Cond.getOpcode() == ISD::OR ||
681 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
682 // And and or set the flags for us, so there is no need to emit a TST of the
683 // result. It is only safe to do this if there is only a single use of the
684 // AND/OR though, otherwise we don't know it will be emitted here.
687 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
691 // Codegen br not C -> JE.
692 if (Cond.getOpcode() == ISD::XOR)
693 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
694 if (NC->isAllOnesValue()) {
696 if (getRegPressure(Chain) > getRegPressure(Cond)) {
698 CondR = SelectExpr(Cond.Val->getOperand(0));
700 CondR = SelectExpr(Cond.Val->getOperand(0));
703 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
704 BuildMI(BB, X86::JE, 1).addMBB(Dest);
708 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
710 return true; // Can only handle simple setcc's so far.
714 // Handle integer conditions first.
715 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
716 switch (SetCC->getCondition()) {
717 default: assert(0 && "Illegal integer SetCC!");
718 case ISD::SETEQ: Opc = X86::JE; break;
719 case ISD::SETGT: Opc = X86::JG; break;
720 case ISD::SETGE: Opc = X86::JGE; break;
721 case ISD::SETLT: Opc = X86::JL; break;
722 case ISD::SETLE: Opc = X86::JLE; break;
723 case ISD::SETNE: Opc = X86::JNE; break;
724 case ISD::SETULT: Opc = X86::JB; break;
725 case ISD::SETUGT: Opc = X86::JA; break;
726 case ISD::SETULE: Opc = X86::JBE; break;
727 case ISD::SETUGE: Opc = X86::JAE; break;
730 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
731 BuildMI(BB, Opc, 1).addMBB(Dest);
735 unsigned Opc2 = 0; // Second branch if needed.
737 // On a floating point condition, the flags are set as follows:
741 // 1 | 0 | 0 | X == Y
742 // 1 | 1 | 1 | unordered
744 switch (SetCC->getCondition()) {
745 default: assert(0 && "Invalid FP setcc!");
747 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
749 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
751 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
753 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
755 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
757 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
758 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
759 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
760 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
761 Opc = X86::JA; // ZF = 0 & CF = 0
762 Opc2 = X86::JP; // PF = 1
764 case ISD::SETUGE: // PF = 1 | CF = 0
765 Opc = X86::JAE; // CF = 0
766 Opc2 = X86::JP; // PF = 1
768 case ISD::SETUNE: // PF = 1 | ZF = 0
769 Opc = X86::JNE; // ZF = 0
770 Opc2 = X86::JP; // PF = 1
772 case ISD::SETOEQ: // PF = 0 & ZF = 1
775 return true; // FIXME: Emit more efficient code for this branch.
776 case ISD::SETOLT: // PF = 0 & CF = 1
779 return true; // FIXME: Emit more efficient code for this branch.
780 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
783 return true; // FIXME: Emit more efficient code for this branch.
787 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
788 BuildMI(BB, Opc, 1).addMBB(Dest);
790 BuildMI(BB, Opc2, 1).addMBB(Dest);
794 /// EmitSelectCC - Emit code into BB that performs a select operation between
795 /// the two registers RTrue and RFalse, generating a result into RDest. Return
796 /// true if the fold cannot be performed.
798 void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
799 unsigned RTrue, unsigned RFalse, unsigned RDest) {
801 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
803 } CondCode = NOT_SET;
805 static const unsigned CMOVTAB16[] = {
806 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
807 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
808 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
810 static const unsigned CMOVTAB32[] = {
811 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
812 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
813 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
815 static const unsigned CMOVTABFP[] = {
816 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
817 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
818 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
821 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond)) {
822 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
823 switch (SetCC->getCondition()) {
824 default: assert(0 && "Unknown integer comparison!");
825 case ISD::SETEQ: CondCode = EQ; break;
826 case ISD::SETGT: CondCode = GT; break;
827 case ISD::SETGE: CondCode = GE; break;
828 case ISD::SETLT: CondCode = LT; break;
829 case ISD::SETLE: CondCode = LE; break;
830 case ISD::SETNE: CondCode = NE; break;
831 case ISD::SETULT: CondCode = B; break;
832 case ISD::SETUGT: CondCode = A; break;
833 case ISD::SETULE: CondCode = BE; break;
834 case ISD::SETUGE: CondCode = AE; break;
837 // On a floating point condition, the flags are set as follows:
841 // 1 | 0 | 0 | X == Y
842 // 1 | 1 | 1 | unordered
844 switch (SetCC->getCondition()) {
845 default: assert(0 && "Unknown FP comparison!");
847 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
849 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
851 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
853 case ISD::SETLT: CondCode = B; break; // True if CF = 1
855 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
857 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
858 case ISD::SETUO: CondCode = P; break; // True if PF = 1
859 case ISD::SETO: CondCode = NP; break; // True if PF = 0
860 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
861 case ISD::SETUGE: // PF = 1 | CF = 0
862 case ISD::SETUNE: // PF = 1 | ZF = 0
863 case ISD::SETOEQ: // PF = 0 & ZF = 1
864 case ISD::SETOLT: // PF = 0 & CF = 1
865 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
866 // We cannot emit this comparison as a single cmov.
873 if (CondCode != NOT_SET) {
875 default: assert(0 && "Cannot select this type!");
876 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
877 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
879 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
883 // Finally, if we weren't able to fold this, just emit the condition and test
885 if (CondCode == NOT_SET || Opc == 0) {
886 // Get the condition into the zero flag.
887 unsigned CondReg = SelectExpr(Cond);
888 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
891 default: assert(0 && "Cannot select this type!");
892 case MVT::i16: Opc = X86::CMOVE16rr; break;
893 case MVT::i32: Opc = X86::CMOVE32rr; break;
895 case MVT::f64: Opc = X86::FCMOVE; break;
898 // FIXME: CMP R, 0 -> TEST R, R
899 EmitCMP(Cond.getOperand(0), Cond.getOperand(1));
900 std::swap(RTrue, RFalse);
902 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
905 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) {
907 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
909 if (isFoldableLoad(LHS)) {
910 switch (RHS.getValueType()) {
913 case MVT::i8: Opc = X86::CMP8mi; break;
914 case MVT::i16: Opc = X86::CMP16mi; break;
915 case MVT::i32: Opc = X86::CMP32mi; break;
919 EmitFoldedLoad(LHS, AM);
920 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
925 switch (RHS.getValueType()) {
928 case MVT::i8: Opc = X86::CMP8ri; break;
929 case MVT::i16: Opc = X86::CMP16ri; break;
930 case MVT::i32: Opc = X86::CMP32ri; break;
933 unsigned Tmp1 = SelectExpr(LHS);
934 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
940 if (isFoldableLoad(LHS)) {
941 switch (RHS.getValueType()) {
944 case MVT::i8: Opc = X86::CMP8mr; break;
945 case MVT::i16: Opc = X86::CMP16mr; break;
946 case MVT::i32: Opc = X86::CMP32mr; break;
950 EmitFoldedLoad(LHS, AM);
951 unsigned Reg = SelectExpr(RHS);
952 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
957 switch (LHS.getValueType()) {
958 default: assert(0 && "Cannot compare this value!");
960 case MVT::i8: Opc = X86::CMP8rr; break;
961 case MVT::i16: Opc = X86::CMP16rr; break;
962 case MVT::i32: Opc = X86::CMP32rr; break;
964 case MVT::f64: Opc = X86::FUCOMIr; break;
967 if (getRegPressure(LHS) > getRegPressure(RHS)) {
968 Tmp1 = SelectExpr(LHS);
969 Tmp2 = SelectExpr(RHS);
971 Tmp2 = SelectExpr(RHS);
972 Tmp1 = SelectExpr(LHS);
974 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
977 /// isFoldableLoad - Return true if this is a load instruction that can safely
978 /// be folded into an operation that uses it.
979 bool ISel::isFoldableLoad(SDOperand Op) {
980 if (Op.getOpcode() != ISD::LOAD ||
981 // FIXME: currently can't fold constant pool indexes.
982 isa<ConstantPoolSDNode>(Op.getOperand(1)))
985 // If this load has already been emitted, we clearly can't fold it.
986 assert(Op.ResNo == 0 && "Not a use of the value of the load?");
987 if (ExprMap.count(Op.getValue(1))) return false;
988 assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?");
989 assert(!LoweredTokens.count(Op.getValue(1)) &&
990 "Token lowered but value not in map?");
992 // Finally, there can only be one use of its value.
993 return Op.Val->hasNUsesOfValue(1, 0);
996 /// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
997 /// and compute the address being loaded into AM.
998 void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
999 SDOperand Chain = Op.getOperand(0);
1000 SDOperand Address = Op.getOperand(1);
1001 if (getRegPressure(Chain) > getRegPressure(Address)) {
1003 SelectAddress(Address, AM);
1005 SelectAddress(Address, AM);
1009 // The chain for this load is now lowered.
1010 assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 &&
1011 "Load emitted more than once?");
1012 ExprMap[SDOperand(Op.Val, 1)] = 1;
1013 if (!LoweredTokens.insert(Op.getValue(1)).second)
1014 assert(0 && "Load emitted more than once!");
1017 unsigned ISel::SelectExpr(SDOperand N) {
1019 unsigned Tmp1, Tmp2, Tmp3;
1021 SDNode *Node = N.Val;
1024 if (Node->getOpcode() == ISD::CopyFromReg)
1025 // Just use the specified register as our input.
1026 return dyn_cast<RegSDNode>(Node)->getReg();
1028 unsigned &Reg = ExprMap[N];
1029 if (Reg) return Reg;
1031 if (N.getOpcode() != ISD::CALL)
1032 Reg = Result = (N.getValueType() != MVT::Other) ?
1033 MakeReg(N.getValueType()) : 1;
1035 // If this is a call instruction, make sure to prepare ALL of the result
1036 // values as well as the chain.
1037 if (Node->getNumValues() == 1)
1038 Reg = Result = 1; // Void call, just a chain.
1040 Result = MakeReg(Node->getValueType(0));
1041 ExprMap[N.getValue(0)] = Result;
1042 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
1043 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
1044 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
1048 switch (N.getOpcode()) {
1051 assert(0 && "Node not handled!\n");
1052 case ISD::FrameIndex:
1053 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
1054 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
1056 case ISD::ConstantPool:
1057 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
1058 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
1060 case ISD::ConstantFP:
1061 ContainsFPCode = true;
1062 Tmp1 = Result; // Intermediate Register
1063 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
1064 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
1065 Tmp1 = MakeReg(MVT::f64);
1067 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
1068 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
1069 BuildMI(BB, X86::FLD0, 0, Tmp1);
1070 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
1071 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
1072 BuildMI(BB, X86::FLD1, 0, Tmp1);
1074 assert(0 && "Unexpected constant!");
1076 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
1079 switch (N.getValueType()) {
1080 default: assert(0 && "Cannot use constants of this type!");
1082 case MVT::i8: Opc = X86::MOV8ri; break;
1083 case MVT::i16: Opc = X86::MOV16ri; break;
1084 case MVT::i32: Opc = X86::MOV32ri; break;
1086 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
1088 case ISD::GlobalAddress: {
1089 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1090 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
1093 case ISD::ExternalSymbol: {
1094 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
1095 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
1098 case ISD::FP_EXTEND:
1099 Tmp1 = SelectExpr(N.getOperand(0));
1100 BuildMI(BB, X86::FpMOV, 1, Result).addReg(Tmp1);
1102 case ISD::ZERO_EXTEND: {
1103 int DestIs16 = N.getValueType() == MVT::i16;
1104 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1106 // FIXME: This hack is here for zero extension casts from bool to i8. This
1107 // would not be needed if bools were promoted by Legalize.
1108 if (N.getValueType() == MVT::i8) {
1109 Tmp1 = SelectExpr(N.getOperand(0));
1110 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
1114 if (isFoldableLoad(N.getOperand(0))) {
1115 static const unsigned Opc[3] = {
1116 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
1120 EmitFoldedLoad(N.getOperand(0), AM);
1121 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
1126 static const unsigned Opc[3] = {
1127 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
1129 Tmp1 = SelectExpr(N.getOperand(0));
1130 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1133 case ISD::SIGN_EXTEND: {
1134 int DestIs16 = N.getValueType() == MVT::i16;
1135 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1137 // FIXME: Legalize should promote bools to i8!
1138 assert(N.getOperand(0).getValueType() != MVT::i1 &&
1139 "Sign extend from bool not implemented!");
1141 if (isFoldableLoad(N.getOperand(0))) {
1142 static const unsigned Opc[3] = {
1143 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
1147 EmitFoldedLoad(N.getOperand(0), AM);
1148 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
1152 static const unsigned Opc[3] = {
1153 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
1155 Tmp1 = SelectExpr(N.getOperand(0));
1156 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1160 // Fold TRUNCATE (LOAD P) into a smaller load from P.
1161 if (isFoldableLoad(N.getOperand(0))) {
1162 switch (N.getValueType()) {
1163 default: assert(0 && "Unknown truncate!");
1165 case MVT::i8: Opc = X86::MOV8rm; break;
1166 case MVT::i16: Opc = X86::MOV16rm; break;
1169 EmitFoldedLoad(N.getOperand(0), AM);
1170 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
1174 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
1175 // a move out of AX or AL.
1176 switch (N.getOperand(0).getValueType()) {
1177 default: assert(0 && "Unknown truncate!");
1178 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1179 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1180 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
1182 Tmp1 = SelectExpr(N.getOperand(0));
1183 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1185 switch (N.getValueType()) {
1186 default: assert(0 && "Unknown truncate!");
1188 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1189 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1191 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1195 // Truncate from double to float by storing to memory as float,
1196 // then reading it back into a register.
1198 // Create as stack slot to use.
1199 // FIXME: This should automatically be made by the Legalizer!
1200 Tmp1 = TLI.getTargetData().getFloatAlignment();
1201 Tmp2 = BB->getParent()->getFrameInfo()->CreateStackObject(4, Tmp1);
1203 // Codegen the input.
1204 Tmp1 = SelectExpr(N.getOperand(0));
1206 // Emit the store, then the reload.
1207 addFrameReference(BuildMI(BB, X86::FST32m, 5), Tmp2).addReg(Tmp1);
1208 addFrameReference(BuildMI(BB, X86::FLD32m, 5, Result), Tmp2);
1211 case ISD::SINT_TO_FP:
1212 case ISD::UINT_TO_FP: {
1213 // FIXME: Most of this grunt work should be done by legalize!
1214 ContainsFPCode = true;
1216 // Promote the integer to a type supported by FLD. We do this because there
1217 // are no unsigned FLD instructions, so we must promote an unsigned value to
1218 // a larger signed value, then use FLD on the larger value.
1220 MVT::ValueType PromoteType = MVT::Other;
1221 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
1222 unsigned PromoteOpcode = 0;
1223 unsigned RealDestReg = Result;
1227 // We don't have the facilities for directly loading byte sized data from
1228 // memory (even signed). Promote it to 16 bits.
1229 PromoteType = MVT::i16;
1230 PromoteOpcode = Node->getOpcode() == ISD::SINT_TO_FP ?
1231 X86::MOVSX16rr8 : X86::MOVZX16rr8;
1234 if (Node->getOpcode() == ISD::UINT_TO_FP) {
1235 PromoteType = MVT::i32;
1236 PromoteOpcode = X86::MOVZX32rr16;
1240 // Don't fild into the real destination.
1241 if (Node->getOpcode() == ISD::UINT_TO_FP)
1242 Result = MakeReg(Node->getValueType(0));
1246 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1248 if (PromoteType != MVT::Other) {
1249 Tmp2 = MakeReg(PromoteType);
1250 BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
1251 SrcTy = PromoteType;
1255 // Spill the integer to memory and reload it from there.
1256 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
1257 MachineFunction *F = BB->getParent();
1258 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1262 assert(0 && "Cast ulong to FP not implemented yet!");
1263 // FIXME: this won't work for cast [u]long to FP
1264 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1265 FrameIdx).addReg(Tmp1);
1266 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1267 FrameIdx, 4).addReg(Tmp1+1);
1268 addFrameReference(BuildMI(BB, X86::FILD64m, 5, Result), FrameIdx);
1271 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1272 FrameIdx).addReg(Tmp1);
1273 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
1276 addFrameReference(BuildMI(BB, X86::MOV16mr, 5),
1277 FrameIdx).addReg(Tmp1);
1278 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
1280 default: break; // No promotion required.
1283 if (Node->getOpcode() == ISD::UINT_TO_FP && Result != RealDestReg) {
1284 // If this is a cast from uint -> double, we need to be careful when if
1285 // the "sign" bit is set. If so, we don't want to make a negative number,
1286 // we want to make a positive number. Emit code to add an offset if the
1289 // Compute whether the sign bit is set by shifting the reg right 31 bits.
1290 unsigned IsNeg = MakeReg(MVT::i32);
1291 BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(Tmp1).addImm(31);
1293 // Create a CP value that has the offset in one word and 0 in the other.
1294 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
1295 0x4f80000000000000ULL);
1296 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
1297 BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(Result)
1298 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
1300 } else if (Node->getOpcode() == ISD::UINT_TO_FP && SrcTy == MVT::i64) {
1301 // We need special handling for unsigned 64-bit integer sources. If the
1302 // input number has the "sign bit" set, then we loaded it incorrectly as a
1303 // negative 64-bit number. In this case, add an offset value.
1305 // Emit a test instruction to see if the dynamic input value was signed.
1306 BuildMI(BB, X86::TEST32rr, 2).addReg(Tmp1+1).addReg(Tmp1+1);
1308 // If the sign bit is set, get a pointer to an offset, otherwise get a
1309 // pointer to a zero.
1310 MachineConstantPool *CP = F->getConstantPool();
1311 unsigned Zero = MakeReg(MVT::i32);
1312 Constant *Null = Constant::getNullValue(Type::UIntTy);
1313 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Zero),
1314 CP->getConstantPoolIndex(Null));
1315 unsigned Offset = MakeReg(MVT::i32);
1316 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
1318 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Offset),
1319 CP->getConstantPoolIndex(OffsetCst));
1320 unsigned Addr = MakeReg(MVT::i32);
1321 BuildMI(BB, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
1323 // Load the constant for an add. FIXME: this could make an 'fadd' that
1324 // reads directly from memory, but we don't support these yet.
1325 unsigned ConstReg = MakeReg(MVT::f64);
1326 addDirectMem(BuildMI(BB, X86::FLD32m, 4, ConstReg), Addr);
1328 BuildMI(BB, X86::FpADD, 2, RealDestReg).addReg(ConstReg).addReg(Result);
1332 case ISD::FP_TO_SINT:
1333 case ISD::FP_TO_UINT: {
1334 // FIXME: Most of this grunt work should be done by legalize!
1335 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1337 // Change the floating point control register to use "round towards zero"
1338 // mode when truncating to an integer value.
1340 MachineFunction *F = BB->getParent();
1341 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1342 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1344 // Load the old value of the high byte of the control word...
1345 unsigned HighPartOfCW = MakeReg(MVT::i8);
1346 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, HighPartOfCW),
1349 // Set the high part to be round to zero...
1350 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1351 CWFrameIdx, 1).addImm(12);
1353 // Reload the modified control word now...
1354 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1356 // Restore the memory image of control word to original value
1357 addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
1358 CWFrameIdx, 1).addReg(HighPartOfCW);
1360 // We don't have the facilities for directly storing byte sized data to
1361 // memory. Promote it to 16 bits. We also must promote unsigned values to
1362 // larger classes because we only have signed FP stores.
1363 MVT::ValueType StoreClass = Node->getValueType(0);
1364 if (StoreClass == MVT::i8 || Node->getOpcode() == ISD::FP_TO_UINT)
1365 switch (StoreClass) {
1366 case MVT::i8: StoreClass = MVT::i16; break;
1367 case MVT::i16: StoreClass = MVT::i32; break;
1368 case MVT::i32: StoreClass = MVT::i64; break;
1369 // The following treatment of cLong may not be perfectly right,
1370 // but it survives chains of casts of the form
1371 // double->ulong->double.
1372 case MVT::i64: StoreClass = MVT::i64; break;
1373 default: assert(0 && "Unknown store class!");
1376 // Spill the integer to memory and reload it from there.
1377 unsigned Size = MVT::getSizeInBits(StoreClass)/8;
1378 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1380 switch (StoreClass) {
1381 default: assert(0 && "Unknown store class!");
1383 addFrameReference(BuildMI(BB, X86::FIST16m, 5), FrameIdx).addReg(Tmp1);
1386 addFrameReference(BuildMI(BB, X86::FIST32m, 5), FrameIdx).addReg(Tmp1);
1389 addFrameReference(BuildMI(BB, X86::FISTP64m, 5), FrameIdx).addReg(Tmp1);
1393 switch (Node->getValueType(0)) {
1395 assert(0 && "Unknown integer type!");
1397 // FIXME: this isn't gunna work.
1398 assert(0 && "Cast FP to long not implemented yet!");
1399 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1400 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result+1), FrameIdx, 4);
1402 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1405 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Result), FrameIdx);
1408 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Result), FrameIdx);
1412 // Reload the original control word now.
1413 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1417 Op0 = N.getOperand(0);
1418 Op1 = N.getOperand(1);
1420 if (isFoldableLoad(Op0))
1421 std::swap(Op0, Op1);
1423 if (isFoldableLoad(Op1)) {
1424 switch (N.getValueType()) {
1425 default: assert(0 && "Cannot add this type!");
1427 case MVT::i8: Opc = X86::ADD8rm; break;
1428 case MVT::i16: Opc = X86::ADD16rm; break;
1429 case MVT::i32: Opc = X86::ADD32rm; break;
1430 case MVT::f32: Opc = X86::FADD32m; break;
1431 case MVT::f64: Opc = X86::FADD64m; break;
1434 EmitFoldedLoad(Op1, AM);
1435 Tmp1 = SelectExpr(Op0);
1436 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1440 // See if we can codegen this as an LEA to fold operations together.
1441 if (N.getValueType() == MVT::i32) {
1443 if (!SelectAddress(Op0, AM) && !SelectAddress(Op1, AM)) {
1444 // If this is not just an add, emit the LEA. For a simple add (like
1445 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
1446 // leave this as LEA, then peephole it to 'ADD' after two address elim
1448 if (AM.Scale != 1 || AM.BaseType == X86AddressMode::FrameIndexBase ||
1449 AM.GV || (AM.Base.Reg && AM.IndexReg && AM.Disp)) {
1450 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
1456 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1458 if (CN->getValue() == 1) { // add X, 1 -> inc X
1459 switch (N.getValueType()) {
1460 default: assert(0 && "Cannot integer add this type!");
1461 case MVT::i8: Opc = X86::INC8r; break;
1462 case MVT::i16: Opc = X86::INC16r; break;
1463 case MVT::i32: Opc = X86::INC32r; break;
1465 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
1466 switch (N.getValueType()) {
1467 default: assert(0 && "Cannot integer add this type!");
1468 case MVT::i8: Opc = X86::DEC8r; break;
1469 case MVT::i16: Opc = X86::DEC16r; break;
1470 case MVT::i32: Opc = X86::DEC32r; break;
1475 Tmp1 = SelectExpr(Op0);
1476 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1480 switch (N.getValueType()) {
1481 default: assert(0 && "Cannot add this type!");
1482 case MVT::i8: Opc = X86::ADD8ri; break;
1483 case MVT::i16: Opc = X86::ADD16ri; break;
1484 case MVT::i32: Opc = X86::ADD32ri; break;
1487 Tmp1 = SelectExpr(Op0);
1488 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1493 switch (N.getValueType()) {
1494 default: assert(0 && "Cannot add this type!");
1495 case MVT::i8: Opc = X86::ADD8rr; break;
1496 case MVT::i16: Opc = X86::ADD16rr; break;
1497 case MVT::i32: Opc = X86::ADD32rr; break;
1499 case MVT::f64: Opc = X86::FpADD; break;
1502 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1503 Tmp1 = SelectExpr(Op0);
1504 Tmp2 = SelectExpr(Op1);
1506 Tmp2 = SelectExpr(Op1);
1507 Tmp1 = SelectExpr(Op0);
1510 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1517 static const unsigned SUBTab[] = {
1518 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
1519 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
1520 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
1522 static const unsigned MULTab[] = {
1523 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
1524 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
1525 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
1527 static const unsigned ANDTab[] = {
1528 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
1529 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
1530 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
1532 static const unsigned ORTab[] = {
1533 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
1534 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
1535 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
1537 static const unsigned XORTab[] = {
1538 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
1539 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
1540 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
1543 Op0 = Node->getOperand(0);
1544 Op1 = Node->getOperand(1);
1546 if (Node->getOpcode() == ISD::SUB && MVT::isInteger(N.getValueType()))
1547 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
1548 if (CN->isNullValue()) { // 0 - N -> neg N
1549 switch (N.getValueType()) {
1550 default: assert(0 && "Cannot sub this type!");
1552 case MVT::i8: Opc = X86::NEG8r; break;
1553 case MVT::i16: Opc = X86::NEG16r; break;
1554 case MVT::i32: Opc = X86::NEG32r; break;
1556 Tmp1 = SelectExpr(N.getOperand(1));
1557 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1561 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1562 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
1563 switch (N.getValueType()) {
1564 default: assert(0 && "Cannot add this type!");
1566 case MVT::i8: Opc = X86::NOT8r; break;
1567 case MVT::i16: Opc = X86::NOT16r; break;
1568 case MVT::i32: Opc = X86::NOT32r; break;
1570 Tmp1 = SelectExpr(Op0);
1571 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1575 switch (N.getValueType()) {
1576 default: assert(0 && "Cannot xor this type!");
1578 case MVT::i8: Opc = 0; break;
1579 case MVT::i16: Opc = 1; break;
1580 case MVT::i32: Opc = 2; break;
1582 switch (Node->getOpcode()) {
1583 default: assert(0 && "Unreachable!");
1584 case ISD::SUB: Opc = SUBTab[Opc]; break;
1585 case ISD::MUL: Opc = MULTab[Opc]; break;
1586 case ISD::AND: Opc = ANDTab[Opc]; break;
1587 case ISD::OR: Opc = ORTab[Opc]; break;
1588 case ISD::XOR: Opc = XORTab[Opc]; break;
1590 if (Opc) { // Can't fold MUL:i8 R, imm
1591 Tmp1 = SelectExpr(Op0);
1592 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1597 if (isFoldableLoad(Op0))
1598 if (Node->getOpcode() != ISD::SUB) {
1599 std::swap(Op0, Op1);
1601 // Emit 'reverse' subract, with a memory operand.
1602 switch (N.getValueType()) {
1603 default: Opc = 0; break;
1604 case MVT::f32: Opc = X86::FSUBR32m; break;
1605 case MVT::f64: Opc = X86::FSUBR64m; break;
1609 EmitFoldedLoad(Op0, AM);
1610 Tmp1 = SelectExpr(Op1);
1611 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1616 if (isFoldableLoad(Op1)) {
1617 switch (N.getValueType()) {
1618 default: assert(0 && "Cannot operate on this type!");
1620 case MVT::i8: Opc = 5; break;
1621 case MVT::i16: Opc = 6; break;
1622 case MVT::i32: Opc = 7; break;
1623 case MVT::f32: Opc = 8; break;
1624 case MVT::f64: Opc = 9; break;
1626 switch (Node->getOpcode()) {
1627 default: assert(0 && "Unreachable!");
1628 case ISD::SUB: Opc = SUBTab[Opc]; break;
1629 case ISD::MUL: Opc = MULTab[Opc]; break;
1630 case ISD::AND: Opc = ANDTab[Opc]; break;
1631 case ISD::OR: Opc = ORTab[Opc]; break;
1632 case ISD::XOR: Opc = XORTab[Opc]; break;
1636 EmitFoldedLoad(Op1, AM);
1637 Tmp1 = SelectExpr(Op0);
1639 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1641 assert(Node->getOpcode() == ISD::MUL &&
1642 N.getValueType() == MVT::i8 && "Unexpected situation!");
1643 // Must use the MUL instruction, which forces use of AL.
1644 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1645 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
1646 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1651 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1652 Tmp1 = SelectExpr(Op0);
1653 Tmp2 = SelectExpr(Op1);
1655 Tmp2 = SelectExpr(Op1);
1656 Tmp1 = SelectExpr(Op0);
1659 switch (N.getValueType()) {
1660 default: assert(0 && "Cannot add this type!");
1662 case MVT::i8: Opc = 10; break;
1663 case MVT::i16: Opc = 11; break;
1664 case MVT::i32: Opc = 12; break;
1665 case MVT::f32: Opc = 13; break;
1666 case MVT::f64: Opc = 14; break;
1668 switch (Node->getOpcode()) {
1669 default: assert(0 && "Unreachable!");
1670 case ISD::SUB: Opc = SUBTab[Opc]; break;
1671 case ISD::MUL: Opc = MULTab[Opc]; break;
1672 case ISD::AND: Opc = ANDTab[Opc]; break;
1673 case ISD::OR: Opc = ORTab[Opc]; break;
1674 case ISD::XOR: Opc = XORTab[Opc]; break;
1677 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1679 assert(Node->getOpcode() == ISD::MUL &&
1680 N.getValueType() == MVT::i8 && "Unexpected situation!");
1681 // Must use the MUL instruction, which forces use of AL.
1682 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1683 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
1684 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1689 if (N.getValueType() != MVT::i1 && N.getValueType() != MVT::i8) {
1690 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1691 Tmp2 = SelectExpr(N.getOperand(1));
1692 Tmp3 = SelectExpr(N.getOperand(2));
1694 Tmp3 = SelectExpr(N.getOperand(2));
1695 Tmp2 = SelectExpr(N.getOperand(1));
1697 EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result);
1700 // FIXME: This should not be implemented here, it should be in the generic
1702 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1703 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1705 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1708 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1710 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1713 unsigned TmpReg = MakeReg(MVT::i16);
1714 EmitSelectCC(N.getOperand(0), MVT::i16, Tmp2, Tmp3, TmpReg);
1715 // FIXME: need subregs to do better than this!
1716 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(TmpReg);
1717 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1725 if (N.getOpcode() == ISD::SDIV)
1726 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1727 // FIXME: These special cases should be handled by the lowering impl!
1728 unsigned RHS = CN->getValue();
1734 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
1735 unsigned Log = log2(RHS);
1736 unsigned TmpReg = MakeReg(N.getValueType());
1737 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
1738 switch (N.getValueType()) {
1739 default: assert("Unknown type to signed divide!");
1741 SAROpc = X86::SAR8ri;
1742 SHROpc = X86::SHR8ri;
1743 ADDOpc = X86::ADD8rr;
1744 NEGOpc = X86::NEG8r;
1747 SAROpc = X86::SAR16ri;
1748 SHROpc = X86::SHR16ri;
1749 ADDOpc = X86::ADD16rr;
1750 NEGOpc = X86::NEG16r;
1753 SAROpc = X86::SAR32ri;
1754 SHROpc = X86::SHR32ri;
1755 ADDOpc = X86::ADD32rr;
1756 NEGOpc = X86::NEG32r;
1759 Tmp1 = SelectExpr(N.getOperand(0));
1760 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
1761 unsigned TmpReg2 = MakeReg(N.getValueType());
1762 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(32-Log);
1763 unsigned TmpReg3 = MakeReg(N.getValueType());
1764 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
1766 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
1767 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
1769 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
1774 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1775 Tmp1 = SelectExpr(N.getOperand(0));
1776 Tmp2 = SelectExpr(N.getOperand(1));
1778 Tmp2 = SelectExpr(N.getOperand(1));
1779 Tmp1 = SelectExpr(N.getOperand(0));
1782 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
1783 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
1784 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
1785 switch (N.getValueType()) {
1786 default: assert(0 && "Cannot sdiv this type!");
1788 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
1791 MovOpcode = X86::MOV8rr;
1792 ClrOpcode = X86::MOV8ri;
1793 SExtOpcode = X86::CBW;
1796 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
1799 MovOpcode = X86::MOV16rr;
1800 ClrOpcode = X86::MOV16ri;
1801 SExtOpcode = X86::CWD;
1804 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
1807 MovOpcode = X86::MOV32rr;
1808 ClrOpcode = X86::MOV32ri;
1809 SExtOpcode = X86::CDQ;
1811 case MVT::i64: assert(0 && "FIXME: implement i64 DIV/REM libcalls!");
1814 if (N.getOpcode() == ISD::SDIV)
1815 BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2);
1817 assert(0 && "FIXME: Emit frem libcall to fmod!");
1821 // Set up the low part.
1822 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
1825 // Sign extend the low part into the high part.
1826 BuildMI(BB, SExtOpcode, 0);
1828 // Zero out the high part, effectively zero extending the input.
1829 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
1832 // Emit the DIV/IDIV instruction.
1833 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
1835 // Get the result of the divide or rem.
1836 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
1841 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1842 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
1843 switch (N.getValueType()) {
1844 default: assert(0 && "Cannot shift this type!");
1845 case MVT::i8: Opc = X86::ADD8rr; break;
1846 case MVT::i16: Opc = X86::ADD16rr; break;
1847 case MVT::i32: Opc = X86::ADD32rr; break;
1849 Tmp1 = SelectExpr(N.getOperand(0));
1850 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
1854 switch (N.getValueType()) {
1855 default: assert(0 && "Cannot shift this type!");
1856 case MVT::i8: Opc = X86::SHL8ri; break;
1857 case MVT::i16: Opc = X86::SHL16ri; break;
1858 case MVT::i32: Opc = X86::SHL32ri; break;
1860 Tmp1 = SelectExpr(N.getOperand(0));
1861 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1865 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1866 Tmp1 = SelectExpr(N.getOperand(0));
1867 Tmp2 = SelectExpr(N.getOperand(1));
1869 Tmp2 = SelectExpr(N.getOperand(1));
1870 Tmp1 = SelectExpr(N.getOperand(0));
1873 switch (N.getValueType()) {
1874 default: assert(0 && "Cannot shift this type!");
1875 case MVT::i8 : Opc = X86::SHL8rCL; break;
1876 case MVT::i16: Opc = X86::SHL16rCL; break;
1877 case MVT::i32: Opc = X86::SHL32rCL; break;
1879 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1880 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1883 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1884 switch (N.getValueType()) {
1885 default: assert(0 && "Cannot shift this type!");
1886 case MVT::i8: Opc = X86::SHR8ri; break;
1887 case MVT::i16: Opc = X86::SHR16ri; break;
1888 case MVT::i32: Opc = X86::SHR32ri; break;
1890 Tmp1 = SelectExpr(N.getOperand(0));
1891 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1895 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1896 Tmp1 = SelectExpr(N.getOperand(0));
1897 Tmp2 = SelectExpr(N.getOperand(1));
1899 Tmp2 = SelectExpr(N.getOperand(1));
1900 Tmp1 = SelectExpr(N.getOperand(0));
1903 switch (N.getValueType()) {
1904 default: assert(0 && "Cannot shift this type!");
1905 case MVT::i8 : Opc = X86::SHR8rCL; break;
1906 case MVT::i16: Opc = X86::SHR16rCL; break;
1907 case MVT::i32: Opc = X86::SHR32rCL; break;
1909 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1910 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1913 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1914 switch (N.getValueType()) {
1915 default: assert(0 && "Cannot shift this type!");
1916 case MVT::i8: Opc = X86::SAR8ri; break;
1917 case MVT::i16: Opc = X86::SAR16ri; break;
1918 case MVT::i32: Opc = X86::SAR32ri; break;
1920 Tmp1 = SelectExpr(N.getOperand(0));
1921 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1925 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1926 Tmp1 = SelectExpr(N.getOperand(0));
1927 Tmp2 = SelectExpr(N.getOperand(1));
1929 Tmp2 = SelectExpr(N.getOperand(1));
1930 Tmp1 = SelectExpr(N.getOperand(0));
1933 switch (N.getValueType()) {
1934 default: assert(0 && "Cannot shift this type!");
1935 case MVT::i8 : Opc = X86::SAR8rCL; break;
1936 case MVT::i16: Opc = X86::SAR16rCL; break;
1937 case MVT::i32: Opc = X86::SAR32rCL; break;
1939 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1940 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1944 EmitCMP(N.getOperand(0), N.getOperand(1));
1945 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
1946 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
1949 // Make sure we generate both values.
1951 ExprMap[N.getValue(1)] = 1; // Generate the token
1953 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1955 switch (Node->getValueType(0)) {
1956 default: assert(0 && "Cannot load this type!");
1958 case MVT::i8: Opc = X86::MOV8rm; break;
1959 case MVT::i16: Opc = X86::MOV16rm; break;
1960 case MVT::i32: Opc = X86::MOV32rm; break;
1961 case MVT::f32: Opc = X86::FLD32m; ContainsFPCode = true; break;
1962 case MVT::f64: Opc = X86::FLD64m; ContainsFPCode = true; break;
1965 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
1966 Select(N.getOperand(0));
1967 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
1971 SDOperand Chain = N.getOperand(0);
1972 SDOperand Address = N.getOperand(1);
1973 if (getRegPressure(Chain) > getRegPressure(Address)) {
1975 SelectAddress(Address, AM);
1977 SelectAddress(Address, AM);
1981 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
1985 case ISD::DYNAMIC_STACKALLOC:
1986 // Generate both result values.
1988 ExprMap[N.getValue(1)] = 1; // Generate the token
1990 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1992 // FIXME: We are currently ignoring the requested alignment for handling
1993 // greater than the stack alignment. This will need to be revisited at some
1994 // point. Align = N.getOperand(2);
1996 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
1997 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
1998 std::cerr << "Cannot allocate stack object with greater alignment than"
1999 << " the stack alignment yet!";
2003 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2004 Select(N.getOperand(0));
2005 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
2006 .addImm(CN->getValue());
2008 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2009 Select(N.getOperand(0));
2010 Tmp1 = SelectExpr(N.getOperand(1));
2012 Tmp1 = SelectExpr(N.getOperand(1));
2013 Select(N.getOperand(0));
2016 // Subtract size from stack pointer, thereby allocating some space.
2017 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
2020 // Put a pointer to the space into the result register, by copying the stack
2022 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
2026 // The chain for this call is now lowered.
2027 LoweredTokens.insert(N.getValue(Node->getNumValues()-1));
2029 if (GlobalAddressSDNode *GASD =
2030 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
2031 Select(N.getOperand(0));
2032 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
2033 } else if (ExternalSymbolSDNode *ESSDN =
2034 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
2035 Select(N.getOperand(0));
2036 BuildMI(BB, X86::CALLpcrel32,
2037 1).addExternalSymbol(ESSDN->getSymbol(), true);
2039 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2040 Select(N.getOperand(0));
2041 Tmp1 = SelectExpr(N.getOperand(1));
2043 Tmp1 = SelectExpr(N.getOperand(1));
2044 Select(N.getOperand(0));
2047 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
2049 switch (Node->getValueType(0)) {
2050 default: assert(0 && "Unknown value type for call result!");
2051 case MVT::Other: return 1;
2054 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2057 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
2060 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
2061 if (Node->getValueType(1) == MVT::i32)
2062 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
2065 case MVT::f64: // Floating-point return values live in %ST(0)
2066 ContainsFPCode = true;
2067 BuildMI(BB, X86::FpGETRESULT, 1, Result);
2070 return Result+N.ResNo;
2076 void ISel::Select(SDOperand N) {
2077 unsigned Tmp1, Tmp2, Opc;
2079 // FIXME: Disable for our current expansion model!
2080 if (/*!N->hasOneUse() &&*/ !LoweredTokens.insert(N).second)
2081 return; // Already selected.
2083 SDNode *Node = N.Val;
2085 switch (Node->getOpcode()) {
2087 Node->dump(); std::cerr << "\n";
2088 assert(0 && "Node not handled yet!");
2089 case ISD::EntryToken: return; // Noop
2090 case ISD::TokenFactor:
2091 if (Node->getNumOperands() == 2) {
2093 getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
2094 Select(Node->getOperand(OneFirst));
2095 Select(Node->getOperand(!OneFirst));
2097 std::vector<std::pair<unsigned, unsigned> > OpsP;
2098 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
2099 OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i));
2100 std::sort(OpsP.begin(), OpsP.end());
2101 std::reverse(OpsP.begin(), OpsP.end());
2102 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
2103 Select(Node->getOperand(OpsP[i].second));
2106 case ISD::CopyToReg:
2107 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2108 Select(N.getOperand(0));
2109 Tmp1 = SelectExpr(N.getOperand(1));
2111 Tmp1 = SelectExpr(N.getOperand(1));
2112 Select(N.getOperand(0));
2114 Tmp2 = cast<RegSDNode>(N)->getReg();
2117 switch (N.getOperand(1).getValueType()) {
2118 default: assert(0 && "Invalid type for operation!");
2120 case MVT::i8: Opc = X86::MOV8rr; break;
2121 case MVT::i16: Opc = X86::MOV16rr; break;
2122 case MVT::i32: Opc = X86::MOV32rr; break;
2124 case MVT::f64: Opc = X86::FpMOV; ContainsFPCode = true; break;
2126 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
2130 switch (N.getNumOperands()) {
2132 assert(0 && "Unknown return instruction!");
2134 assert(N.getOperand(1).getValueType() == MVT::i32 &&
2135 N.getOperand(2).getValueType() == MVT::i32 &&
2136 "Unknown two-register value!");
2137 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
2138 Tmp1 = SelectExpr(N.getOperand(1));
2139 Tmp2 = SelectExpr(N.getOperand(2));
2141 Tmp2 = SelectExpr(N.getOperand(2));
2142 Tmp1 = SelectExpr(N.getOperand(1));
2144 Select(N.getOperand(0));
2146 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
2147 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
2148 // Declare that EAX & EDX are live on exit.
2149 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
2153 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2154 Select(N.getOperand(0));
2155 Tmp1 = SelectExpr(N.getOperand(1));
2157 Tmp1 = SelectExpr(N.getOperand(1));
2158 Select(N.getOperand(0));
2160 switch (N.getOperand(1).getValueType()) {
2161 default: assert(0 && "All other types should have been promoted!!");
2163 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
2164 // Declare that top-of-stack is live on exit
2165 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
2168 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
2169 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
2174 Select(N.getOperand(0));
2177 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
2180 Select(N.getOperand(0));
2181 MachineBasicBlock *Dest =
2182 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
2183 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
2188 MachineBasicBlock *Dest =
2189 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
2191 // Try to fold a setcc into the branch. If this fails, emit a test/jne
2193 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
2194 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2195 Select(N.getOperand(0));
2196 Tmp1 = SelectExpr(N.getOperand(1));
2198 Tmp1 = SelectExpr(N.getOperand(1));
2199 Select(N.getOperand(0));
2201 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
2202 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
2209 case ISD::DYNAMIC_STACKALLOC:
2215 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2217 switch (CN->getValueType(0)) {
2218 default: assert(0 && "Invalid type for operation!");
2220 case MVT::i8: Opc = X86::MOV8mi; break;
2221 case MVT::i16: Opc = X86::MOV16mi; break;
2222 case MVT::i32: Opc = X86::MOV32mi; break;
2224 case MVT::f64: break;
2227 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
2228 Select(N.getOperand(0));
2229 SelectAddress(N.getOperand(2), AM);
2231 SelectAddress(N.getOperand(2), AM);
2232 Select(N.getOperand(0));
2234 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
2239 // Check to see if this is a load/op/store combination.
2240 if (N.getOperand(1).Val->hasOneUse() &&
2241 isFoldableLoad(N.getOperand(0).getValue(0)) &&
2242 !MVT::isFloatingPoint(N.getOperand(0).getValue(0).getValueType())) {
2243 SDOperand TheLoad = N.getOperand(0).getValue(0);
2244 // Check to see if we are loading the same pointer that we're storing to.
2245 if (TheLoad.getOperand(1) == N.getOperand(2)) {
2246 // See if the stored value is a simple binary operator that uses the
2247 // load as one of its operands.
2248 SDOperand Op = N.getOperand(1);
2249 if (Op.Val->getNumOperands() == 2 &&
2250 (Op.getOperand(0) == TheLoad || Op.getOperand(1) == TheLoad)) {
2251 // Finally, check to see if this is one of the ops we can handle!
2252 static const unsigned ADDTAB[] = {
2253 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
2254 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
2256 static const unsigned SUBTAB[] = {
2257 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
2258 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
2260 static const unsigned ANDTAB[] = {
2261 X86::AND8mi, X86::AND16mi, X86::AND32mi,
2262 X86::AND8mr, X86::AND16mr, X86::AND32mr,
2264 static const unsigned ORTAB[] = {
2265 X86::OR8mi, X86::OR16mi, X86::OR32mi,
2266 X86::OR8mr, X86::OR16mr, X86::OR32mr,
2268 static const unsigned XORTAB[] = {
2269 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
2270 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
2272 static const unsigned SHLTAB[] = {
2273 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
2274 /*Have to put the reg in CL*/0, 0, 0,
2276 static const unsigned SARTAB[] = {
2277 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
2278 /*Have to put the reg in CL*/0, 0, 0,
2280 static const unsigned SHRTAB[] = {
2281 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
2282 /*Have to put the reg in CL*/0, 0, 0,
2285 const unsigned *TabPtr = 0;
2286 switch (Op.getOpcode()) {
2287 default: std::cerr << "CANNOT [mem] op= val: "; Op.Val->dump(); std::cerr << "\n"; break;
2288 case ISD::ADD: TabPtr = ADDTAB; break;
2289 case ISD::SUB: TabPtr = SUBTAB; break;
2290 case ISD::AND: TabPtr = ANDTAB; break;
2291 case ISD:: OR: TabPtr = ORTAB; break;
2292 case ISD::XOR: TabPtr = XORTAB; break;
2293 case ISD::SHL: TabPtr = SHLTAB; break;
2294 case ISD::SRA: TabPtr = SARTAB; break;
2295 case ISD::SRL: TabPtr = SHRTAB; break;
2299 // Handle: [mem] op= CST
2300 SDOperand Op0 = Op.getOperand(0);
2301 SDOperand Op1 = Op.getOperand(1);
2302 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2303 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
2306 case MVT::i8: Opc = TabPtr[0]; break;
2307 case MVT::i16: Opc = TabPtr[1]; break;
2308 case MVT::i32: Opc = TabPtr[2]; break;
2312 if (getRegPressure(TheLoad.getOperand(0)) >
2313 getRegPressure(TheLoad.getOperand(1))) {
2314 Select(TheLoad.getOperand(0));
2315 SelectAddress(TheLoad.getOperand(1), AM);
2317 SelectAddress(TheLoad.getOperand(1), AM);
2318 Select(TheLoad.getOperand(0));
2321 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
2326 // If we have [mem] = V op [mem], try to turn it into:
2327 // [mem] = [mem] op V.
2328 if (Op1 == TheLoad && Op.getOpcode() != ISD::SUB &&
2329 Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRA &&
2330 Op.getOpcode() != ISD::SRL)
2331 std::swap(Op0, Op1);
2333 if (Op0 == TheLoad) {
2334 switch (Op0.getValueType()) {
2337 case MVT::i8: Opc = TabPtr[3]; break;
2338 case MVT::i16: Opc = TabPtr[4]; break;
2339 case MVT::i32: Opc = TabPtr[5]; break;
2343 Select(TheLoad.getOperand(0));
2344 SelectAddress(TheLoad.getOperand(1), AM);
2345 unsigned Reg = SelectExpr(Op1);
2346 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addReg(Reg);
2356 switch (N.getOperand(1).getValueType()) {
2357 default: assert(0 && "Cannot store this type!");
2359 case MVT::i8: Opc = X86::MOV8mr; break;
2360 case MVT::i16: Opc = X86::MOV16mr; break;
2361 case MVT::i32: Opc = X86::MOV32mr; break;
2362 case MVT::f32: Opc = X86::FST32m; break;
2363 case MVT::f64: Opc = X86::FST64m; break;
2366 std::vector<std::pair<unsigned, unsigned> > RP;
2367 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
2368 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
2369 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
2370 std::sort(RP.begin(), RP.end());
2372 for (unsigned i = 0; i != 3; ++i)
2373 switch (RP[2-i].second) {
2374 default: assert(0 && "Unknown operand number!");
2375 case 0: Select(N.getOperand(0)); break;
2376 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
2377 case 2: SelectAddress(N.getOperand(2), AM); break;
2380 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
2383 case ISD::ADJCALLSTACKDOWN:
2384 case ISD::ADJCALLSTACKUP:
2385 Select(N.getOperand(0));
2386 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
2388 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? X86::ADJCALLSTACKDOWN :
2389 X86::ADJCALLSTACKUP;
2390 BuildMI(BB, Opc, 1).addImm(Tmp1);
2393 Select(N.getOperand(0)); // Select the chain.
2395 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2396 if (Align == 0) Align = 1;
2398 // Turn the byte code into # iterations
2401 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
2402 unsigned Val = ValC->getValue() & 255;
2404 // If the value is a constant, then we can potentially use larger sets.
2405 switch (Align & 3) {
2406 case 2: // WORD aligned
2407 CountReg = MakeReg(MVT::i32);
2408 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2409 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2411 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2412 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2414 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
2415 Opcode = X86::REP_STOSW;
2417 case 0: // DWORD aligned
2418 CountReg = MakeReg(MVT::i32);
2419 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2420 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2422 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2423 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2425 Val = (Val << 8) | Val;
2426 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
2427 Opcode = X86::REP_STOSD;
2429 default: // BYTE aligned
2430 CountReg = SelectExpr(Node->getOperand(3));
2431 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
2432 Opcode = X86::REP_STOSB;
2436 // If it's not a constant value we are storing, just fall back. We could
2437 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
2438 unsigned ValReg = SelectExpr(Node->getOperand(2));
2439 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
2440 CountReg = SelectExpr(Node->getOperand(3));
2441 Opcode = X86::REP_STOSB;
2444 // No matter what the alignment is, we put the source in ESI, the
2445 // destination in EDI, and the count in ECX.
2446 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2447 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2448 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2449 BuildMI(BB, Opcode, 0);
2453 Select(N.getOperand(0)); // Select the chain.
2455 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2456 if (Align == 0) Align = 1;
2458 // Turn the byte code into # iterations
2461 switch (Align & 3) {
2462 case 2: // WORD aligned
2463 CountReg = MakeReg(MVT::i32);
2464 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2465 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2467 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2468 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2470 Opcode = X86::REP_MOVSW;
2472 case 0: // DWORD aligned
2473 CountReg = MakeReg(MVT::i32);
2474 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2475 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2477 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2478 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2480 Opcode = X86::REP_MOVSD;
2482 default: // BYTE aligned
2483 CountReg = SelectExpr(Node->getOperand(3));
2484 Opcode = X86::REP_MOVSB;
2488 // No matter what the alignment is, we put the source in ESI, the
2489 // destination in EDI, and the count in ECX.
2490 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2491 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
2492 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2493 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2494 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
2495 BuildMI(BB, Opcode, 0);
2498 assert(0 && "Should not be reached!");
2502 /// createX86PatternInstructionSelector - This pass converts an LLVM function
2503 /// into a machine code representation using pattern matching and a machine
2504 /// description file.
2506 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
2507 return new ISel(TM);