1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/Function.h"
22 #include "llvm/CodeGen/MachineConstantPool.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/SSARegMap.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CFG.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/ADT/Statistic.h"
40 #include "llvm/Support/CommandLine.h"
41 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
42 cl::desc("Enable fastcc on X86"));
45 // X86 Specific DAG Nodes
48 // Start the numbering where the builtin ops leave off.
49 FIRST_NUMBER = ISD::BUILTIN_OP_END,
51 /// FILD64m - This instruction implements SINT_TO_FP with a
52 /// 64-bit source in memory and a FP reg result. This corresponds to
53 /// the X86::FILD64m instruction. It has two inputs (token chain and
54 /// address) and two outputs (FP value and token chain).
57 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
58 /// integer destination in memory and a FP reg source. This corresponds
59 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
60 /// has two inputs (token chain and address) and two outputs (FP value and
66 /// CALL/TAILCALL - These operations represent an abstract X86 call
67 /// instruction, which includes a bunch of information. In particular the
68 /// operands of these node are:
70 /// #0 - The incoming token chain
72 /// #2 - The number of arg bytes the caller pushes on the stack.
73 /// #3 - The number of arg bytes the callee pops off the stack.
74 /// #4 - The value to pass in AL/AX/EAX (optional)
75 /// #5 - The value to pass in DL/DX/EDX (optional)
77 /// The result values of these nodes are:
79 /// #0 - The outgoing token chain
80 /// #1 - The first register result value (optional)
81 /// #2 - The second register result value (optional)
83 /// The CALL vs TAILCALL distinction boils down to whether the callee is
84 /// known not to modify the caller's stack frame, as is standard with
92 //===----------------------------------------------------------------------===//
93 // X86TargetLowering - X86 Implementation of the TargetLowering interface
95 class X86TargetLowering : public TargetLowering {
96 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
97 int ReturnAddrIndex; // FrameIndex for return slot.
98 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
99 int BytesCallerReserves; // Number of arg bytes caller makes.
101 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
102 // Set up the TargetLowering object.
104 // X86 is weird, it always uses i8 for shift amounts and setcc results.
105 setShiftAmountType(MVT::i8);
106 setSetCCResultType(MVT::i8);
107 setSetCCResultContents(ZeroOrOneSetCCResult);
108 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
110 // Set up the register classes.
111 // FIXME: Eliminate these two classes when legalize can handle promotions
113 addRegisterClass(MVT::i1, X86::R8RegisterClass);
114 addRegisterClass(MVT::i8, X86::R8RegisterClass);
115 addRegisterClass(MVT::i16, X86::R16RegisterClass);
116 addRegisterClass(MVT::i32, X86::R32RegisterClass);
118 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
120 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
121 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
122 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
123 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
125 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
127 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
128 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
131 // We can handle SINT_TO_FP and FP_TO_SINT from/TO i64 even though i64
133 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
134 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
135 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
136 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
139 // Handle FP_TO_UINT by promoting the destination to a larger signed
141 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
142 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
143 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
146 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
148 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
150 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
151 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
152 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
154 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
155 setOperationAction(ISD::BRTWOWAY_CC , MVT::Other, Expand);
156 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
157 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
158 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
159 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
160 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
161 setOperationAction(ISD::SREM , MVT::f64 , Expand);
162 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
163 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
164 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
165 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
166 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
167 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
168 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
169 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
170 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
172 setOperationAction(ISD::READIO , MVT::i1 , Expand);
173 setOperationAction(ISD::READIO , MVT::i8 , Expand);
174 setOperationAction(ISD::READIO , MVT::i16 , Expand);
175 setOperationAction(ISD::READIO , MVT::i32 , Expand);
176 setOperationAction(ISD::WRITEIO , MVT::i1 , Expand);
177 setOperationAction(ISD::WRITEIO , MVT::i8 , Expand);
178 setOperationAction(ISD::WRITEIO , MVT::i16 , Expand);
179 setOperationAction(ISD::WRITEIO , MVT::i32 , Expand);
181 // These should be promoted to a larger select which is supported.
182 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
183 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
186 // Set up the FP register classes.
187 addRegisterClass(MVT::f32, X86::RXMMRegisterClass);
188 addRegisterClass(MVT::f64, X86::RXMMRegisterClass);
190 // SSE has no load+extend ops
191 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
192 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
194 // SSE has no i16 to fp conversion, only i32
195 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
196 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
198 // Expand FP_TO_UINT into a select.
199 // FIXME: We would like to use a Custom expander here eventually to do
200 // the optimal thing for SSE vs. the default expansion in the legalizer.
201 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
203 // We don't support sin/cos/sqrt/fmod
204 setOperationAction(ISD::FSIN , MVT::f64, Expand);
205 setOperationAction(ISD::FCOS , MVT::f64, Expand);
206 setOperationAction(ISD::FABS , MVT::f64, Expand);
207 setOperationAction(ISD::FNEG , MVT::f64, Expand);
208 setOperationAction(ISD::SREM , MVT::f64, Expand);
209 setOperationAction(ISD::FSIN , MVT::f32, Expand);
210 setOperationAction(ISD::FCOS , MVT::f32, Expand);
211 setOperationAction(ISD::FABS , MVT::f32, Expand);
212 setOperationAction(ISD::FNEG , MVT::f32, Expand);
213 setOperationAction(ISD::SREM , MVT::f32, Expand);
215 addLegalFPImmediate(+0.0); // xorps / xorpd
217 // Set up the FP register classes.
218 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
221 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
222 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
225 addLegalFPImmediate(+0.0); // FLD0
226 addLegalFPImmediate(+1.0); // FLD1
227 addLegalFPImmediate(-0.0); // FLD0/FCHS
228 addLegalFPImmediate(-1.0); // FLD1/FCHS
230 computeRegisterProperties();
232 maxStoresPerMemSet = 8; // For %llvm.memset -> sequence of stores
233 maxStoresPerMemCpy = 8; // For %llvm.memcpy -> sequence of stores
234 maxStoresPerMemMove = 8; // For %llvm.memmove -> sequence of stores
235 allowUnalignedStores = true; // x86 supports it!
238 // Return the number of bytes that a function should pop when it returns (in
239 // addition to the space used by the return address).
241 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
243 // Return the number of bytes that the caller reserves for arguments passed
245 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
247 /// LowerOperation - Provide custom lowering hooks for some operations.
249 virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
251 /// LowerArguments - This hook must be implemented to indicate how we should
252 /// lower the arguments for the specified function, into the specified DAG.
253 virtual std::vector<SDOperand>
254 LowerArguments(Function &F, SelectionDAG &DAG);
256 /// LowerCallTo - This hook lowers an abstract call to a function into an
258 virtual std::pair<SDOperand, SDOperand>
259 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, unsigned CC,
260 bool isTailCall, SDOperand Callee, ArgListTy &Args,
263 virtual SDOperand LowerVAStart(SDOperand Chain, SDOperand VAListP,
264 Value *VAListV, SelectionDAG &DAG);
265 virtual std::pair<SDOperand,SDOperand>
266 LowerVAArg(SDOperand Chain, SDOperand VAListP, Value *VAListV,
267 const Type *ArgTy, SelectionDAG &DAG);
269 virtual std::pair<SDOperand, SDOperand>
270 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
273 SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
276 // C Calling Convention implementation.
277 std::vector<SDOperand> LowerCCCArguments(Function &F, SelectionDAG &DAG);
278 std::pair<SDOperand, SDOperand>
279 LowerCCCCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
281 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
283 // Fast Calling Convention implementation.
284 std::vector<SDOperand> LowerFastCCArguments(Function &F, SelectionDAG &DAG);
285 std::pair<SDOperand, SDOperand>
286 LowerFastCCCallTo(SDOperand Chain, const Type *RetTy, bool isTailCall,
287 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
291 std::vector<SDOperand>
292 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
293 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
294 return LowerFastCCArguments(F, DAG);
295 return LowerCCCArguments(F, DAG);
298 std::pair<SDOperand, SDOperand>
299 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
300 bool isVarArg, unsigned CallingConv,
302 SDOperand Callee, ArgListTy &Args,
304 assert((!isVarArg || CallingConv == CallingConv::C) &&
305 "Only C takes varargs!");
306 if (CallingConv == CallingConv::Fast && EnableFastCC)
307 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
308 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
311 //===----------------------------------------------------------------------===//
312 // C Calling Convention implementation
313 //===----------------------------------------------------------------------===//
315 std::vector<SDOperand>
316 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
317 std::vector<SDOperand> ArgValues;
319 MachineFunction &MF = DAG.getMachineFunction();
320 MachineFrameInfo *MFI = MF.getFrameInfo();
322 // Add DAG nodes to load the arguments... On entry to a function on the X86,
323 // the stack frame looks like this:
325 // [ESP] -- return address
326 // [ESP + 4] -- first argument (leftmost lexically)
327 // [ESP + 8] -- second argument, if first argument is four bytes in size
330 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
331 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
332 MVT::ValueType ObjectVT = getValueType(I->getType());
333 unsigned ArgIncrement = 4;
336 default: assert(0 && "Unhandled argument type!");
338 case MVT::i8: ObjSize = 1; break;
339 case MVT::i16: ObjSize = 2; break;
340 case MVT::i32: ObjSize = 4; break;
341 case MVT::i64: ObjSize = ArgIncrement = 8; break;
342 case MVT::f32: ObjSize = 4; break;
343 case MVT::f64: ObjSize = ArgIncrement = 8; break;
345 // Create the frame index object for this incoming parameter...
346 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
348 // Create the SelectionDAG nodes corresponding to a load from this parameter
349 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
351 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
355 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
356 DAG.getSrcValue(NULL));
358 if (MVT::isInteger(ObjectVT))
359 ArgValue = DAG.getConstant(0, ObjectVT);
361 ArgValue = DAG.getConstantFP(0, ObjectVT);
363 ArgValues.push_back(ArgValue);
365 ArgOffset += ArgIncrement; // Move on to the next argument...
368 // If the function takes variable number of arguments, make a frame index for
369 // the start of the first vararg value... for expansion of llvm.va_start.
371 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
372 ReturnAddrIndex = 0; // No return address slot generated yet.
373 BytesToPopOnReturn = 0; // Callee pops nothing.
374 BytesCallerReserves = ArgOffset;
376 // Finally, inform the code generator which regs we return values in.
377 switch (getValueType(F.getReturnType())) {
378 default: assert(0 && "Unknown type!");
379 case MVT::isVoid: break;
384 MF.addLiveOut(X86::EAX);
387 MF.addLiveOut(X86::EAX);
388 MF.addLiveOut(X86::EDX);
392 MF.addLiveOut(X86::ST0);
398 std::pair<SDOperand, SDOperand>
399 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
400 bool isVarArg, bool isTailCall,
401 SDOperand Callee, ArgListTy &Args,
403 // Count how many bytes are to be pushed on the stack.
404 unsigned NumBytes = 0;
408 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
409 DAG.getConstant(0, getPointerTy()));
411 for (unsigned i = 0, e = Args.size(); i != e; ++i)
412 switch (getValueType(Args[i].second)) {
413 default: assert(0 && "Unknown value type!");
427 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
428 DAG.getConstant(NumBytes, getPointerTy()));
430 // Arguments go on the stack in reverse order, as specified by the ABI.
431 unsigned ArgOffset = 0;
432 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
434 std::vector<SDOperand> Stores;
436 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
437 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
438 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
440 switch (getValueType(Args[i].second)) {
441 default: assert(0 && "Unexpected ValueType for argument!");
445 // Promote the integer to 32 bits. If the input type is signed use a
446 // sign extend, otherwise use a zero extend.
447 if (Args[i].second->isSigned())
448 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
450 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
455 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
456 Args[i].first, PtrOff,
457 DAG.getSrcValue(NULL)));
462 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
463 Args[i].first, PtrOff,
464 DAG.getSrcValue(NULL)));
469 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
472 std::vector<MVT::ValueType> RetVals;
473 MVT::ValueType RetTyVT = getValueType(RetTy);
474 RetVals.push_back(MVT::Other);
476 // The result values produced have to be legal. Promote the result.
478 case MVT::isVoid: break;
480 RetVals.push_back(RetTyVT);
485 RetVals.push_back(MVT::i32);
489 RetVals.push_back(MVT::f32);
491 RetVals.push_back(MVT::f64);
494 RetVals.push_back(MVT::i32);
495 RetVals.push_back(MVT::i32);
498 std::vector<SDOperand> Ops;
499 Ops.push_back(Chain);
500 Ops.push_back(Callee);
501 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
502 Ops.push_back(DAG.getConstant(0, getPointerTy()));
503 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
505 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
509 case MVT::isVoid: break;
511 ResultVal = TheCall.getValue(1);
516 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
519 // FIXME: we would really like to remember that this FP_ROUND operation is
520 // okay to eliminate if we allow excess FP precision.
521 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
524 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
525 TheCall.getValue(2));
529 return std::make_pair(ResultVal, Chain);
533 X86TargetLowering::LowerVAStart(SDOperand Chain, SDOperand VAListP,
534 Value *VAListV, SelectionDAG &DAG) {
535 // vastart just stores the address of the VarArgsFrameIndex slot.
536 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
537 return DAG.getNode(ISD::STORE, MVT::Other, Chain, FR, VAListP,
538 DAG.getSrcValue(VAListV));
542 std::pair<SDOperand,SDOperand>
543 X86TargetLowering::LowerVAArg(SDOperand Chain, SDOperand VAListP,
544 Value *VAListV, const Type *ArgTy,
546 MVT::ValueType ArgVT = getValueType(ArgTy);
547 SDOperand Val = DAG.getLoad(MVT::i32, Chain,
548 VAListP, DAG.getSrcValue(VAListV));
549 SDOperand Result = DAG.getLoad(ArgVT, Chain, Val,
550 DAG.getSrcValue(NULL));
552 if (ArgVT == MVT::i32)
555 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
556 "Other types should have been promoted for varargs!");
559 Val = DAG.getNode(ISD::ADD, Val.getValueType(), Val,
560 DAG.getConstant(Amt, Val.getValueType()));
561 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
562 Val, VAListP, DAG.getSrcValue(VAListV));
563 return std::make_pair(Result, Chain);
566 //===----------------------------------------------------------------------===//
567 // Fast Calling Convention implementation
568 //===----------------------------------------------------------------------===//
570 // The X86 'fast' calling convention passes up to two integer arguments in
571 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
572 // and requires that the callee pop its arguments off the stack (allowing proper
573 // tail calls), and has the same return value conventions as C calling convs.
575 // This calling convention always arranges for the callee pop value to be 8n+4
576 // bytes, which is needed for tail recursion elimination and stack alignment
579 // Note that this can be enhanced in the future to pass fp vals in registers
580 // (when we have a global fp allocator) and do other tricks.
583 /// AddLiveIn - This helper function adds the specified physical register to the
584 /// MachineFunction as a live in value. It also creates a corresponding virtual
586 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
587 TargetRegisterClass *RC) {
588 assert(RC->contains(PReg) && "Not the correct regclass!");
589 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
590 MF.addLiveIn(PReg, VReg);
595 std::vector<SDOperand>
596 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
597 std::vector<SDOperand> ArgValues;
599 MachineFunction &MF = DAG.getMachineFunction();
600 MachineFrameInfo *MFI = MF.getFrameInfo();
602 // Add DAG nodes to load the arguments... On entry to a function the stack
603 // frame looks like this:
605 // [ESP] -- return address
606 // [ESP + 4] -- first nonreg argument (leftmost lexically)
607 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
609 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
611 // Keep track of the number of integer regs passed so far. This can be either
612 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
614 unsigned NumIntRegs = 0;
616 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
617 MVT::ValueType ObjectVT = getValueType(I->getType());
618 unsigned ArgIncrement = 4;
619 unsigned ObjSize = 0;
623 default: assert(0 && "Unhandled argument type!");
626 if (NumIntRegs < 2) {
627 if (!I->use_empty()) {
628 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
629 X86::R8RegisterClass);
630 ArgValue = DAG.getCopyFromReg(VReg, MVT::i8, DAG.getRoot());
631 DAG.setRoot(ArgValue.getValue(1));
640 if (NumIntRegs < 2) {
641 if (!I->use_empty()) {
642 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
643 X86::R16RegisterClass);
644 ArgValue = DAG.getCopyFromReg(VReg, MVT::i16, DAG.getRoot());
645 DAG.setRoot(ArgValue.getValue(1));
653 if (NumIntRegs < 2) {
654 if (!I->use_empty()) {
655 unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
656 X86::R32RegisterClass);
657 ArgValue = DAG.getCopyFromReg(VReg, MVT::i32, DAG.getRoot());
658 DAG.setRoot(ArgValue.getValue(1));
666 if (NumIntRegs == 0) {
667 if (!I->use_empty()) {
668 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
669 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
671 SDOperand Low=DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
672 SDOperand Hi =DAG.getCopyFromReg(TopReg, MVT::i32, Low.getValue(1));
673 DAG.setRoot(Hi.getValue(1));
675 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
679 } else if (NumIntRegs == 1) {
680 if (!I->use_empty()) {
681 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
682 SDOperand Low = DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
683 DAG.setRoot(Low.getValue(1));
685 // Load the high part from memory.
686 // Create the frame index object for this incoming parameter...
687 int FI = MFI->CreateFixedObject(4, ArgOffset);
688 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
689 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
690 DAG.getSrcValue(NULL));
691 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
697 ObjSize = ArgIncrement = 8;
699 case MVT::f32: ObjSize = 4; break;
700 case MVT::f64: ObjSize = ArgIncrement = 8; break;
703 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
705 if (ObjSize && !I->use_empty()) {
706 // Create the frame index object for this incoming parameter...
707 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
709 // Create the SelectionDAG nodes corresponding to a load from this
711 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
713 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
714 DAG.getSrcValue(NULL));
715 } else if (ArgValue.Val == 0) {
716 if (MVT::isInteger(ObjectVT))
717 ArgValue = DAG.getConstant(0, ObjectVT);
719 ArgValue = DAG.getConstantFP(0, ObjectVT);
721 ArgValues.push_back(ArgValue);
724 ArgOffset += ArgIncrement; // Move on to the next argument.
727 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
728 // arguments and the arguments after the retaddr has been pushed are aligned.
729 if ((ArgOffset & 7) == 0)
732 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
733 ReturnAddrIndex = 0; // No return address slot generated yet.
734 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
735 BytesCallerReserves = 0;
737 // Finally, inform the code generator which regs we return values in.
738 switch (getValueType(F.getReturnType())) {
739 default: assert(0 && "Unknown type!");
740 case MVT::isVoid: break;
745 MF.addLiveOut(X86::EAX);
748 MF.addLiveOut(X86::EAX);
749 MF.addLiveOut(X86::EDX);
753 MF.addLiveOut(X86::ST0);
759 std::pair<SDOperand, SDOperand>
760 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
761 bool isTailCall, SDOperand Callee,
762 ArgListTy &Args, SelectionDAG &DAG) {
763 // Count how many bytes are to be pushed on the stack.
764 unsigned NumBytes = 0;
766 // Keep track of the number of integer regs passed so far. This can be either
767 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
769 unsigned NumIntRegs = 0;
771 for (unsigned i = 0, e = Args.size(); i != e; ++i)
772 switch (getValueType(Args[i].second)) {
773 default: assert(0 && "Unknown value type!");
778 if (NumIntRegs < 2) {
787 if (NumIntRegs == 0) {
790 } else if (NumIntRegs == 1) {
802 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
803 // arguments and the arguments after the retaddr has been pushed are aligned.
804 if ((NumBytes & 7) == 0)
807 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
808 DAG.getConstant(NumBytes, getPointerTy()));
810 // Arguments go on the stack in reverse order, as specified by the ABI.
811 unsigned ArgOffset = 0;
812 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
815 std::vector<SDOperand> Stores;
816 std::vector<SDOperand> RegValuesToPass;
817 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
818 switch (getValueType(Args[i].second)) {
819 default: assert(0 && "Unexpected ValueType for argument!");
824 if (NumIntRegs < 2) {
825 RegValuesToPass.push_back(Args[i].first);
831 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
832 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
833 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
834 Args[i].first, PtrOff,
835 DAG.getSrcValue(NULL)));
840 if (NumIntRegs < 2) { // Can pass part of it in regs?
841 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
842 Args[i].first, DAG.getConstant(1, MVT::i32));
843 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
844 Args[i].first, DAG.getConstant(0, MVT::i32));
845 RegValuesToPass.push_back(Lo);
847 if (NumIntRegs < 2) { // Pass both parts in regs?
848 RegValuesToPass.push_back(Hi);
851 // Pass the high part in memory.
852 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
853 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
854 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
855 Hi, PtrOff, DAG.getSrcValue(NULL)));
862 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
863 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
864 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
865 Args[i].first, PtrOff,
866 DAG.getSrcValue(NULL)));
872 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
874 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
875 // arguments and the arguments after the retaddr has been pushed are aligned.
876 if ((ArgOffset & 7) == 0)
879 std::vector<MVT::ValueType> RetVals;
880 MVT::ValueType RetTyVT = getValueType(RetTy);
882 RetVals.push_back(MVT::Other);
884 // The result values produced have to be legal. Promote the result.
886 case MVT::isVoid: break;
888 RetVals.push_back(RetTyVT);
893 RetVals.push_back(MVT::i32);
897 RetVals.push_back(MVT::f32);
899 RetVals.push_back(MVT::f64);
902 RetVals.push_back(MVT::i32);
903 RetVals.push_back(MVT::i32);
907 std::vector<SDOperand> Ops;
908 Ops.push_back(Chain);
909 Ops.push_back(Callee);
910 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
911 // Callee pops all arg values on the stack.
912 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
914 // Pass register arguments as needed.
915 Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
917 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
919 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
923 case MVT::isVoid: break;
925 ResultVal = TheCall.getValue(1);
930 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
933 // FIXME: we would really like to remember that this FP_ROUND operation is
934 // okay to eliminate if we allow excess FP precision.
935 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
938 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
939 TheCall.getValue(2));
943 return std::make_pair(ResultVal, Chain);
946 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
947 if (ReturnAddrIndex == 0) {
948 // Set up a frame object for the return address.
949 MachineFunction &MF = DAG.getMachineFunction();
950 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
953 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
958 std::pair<SDOperand, SDOperand> X86TargetLowering::
959 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
962 if (Depth) // Depths > 0 not supported yet!
963 Result = DAG.getConstant(0, getPointerTy());
965 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
967 // Just load the return address
968 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
969 DAG.getSrcValue(NULL));
971 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
972 DAG.getConstant(4, MVT::i32));
974 return std::make_pair(Result, Chain);
977 //===----------------------------------------------------------------------===//
978 // X86 Custom Lowering Hooks
979 //===----------------------------------------------------------------------===//
981 /// LowerOperation - Provide custom lowering hooks for some operations.
983 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
984 switch (Op.getOpcode()) {
985 default: assert(0 && "Should not custom lower this!");
986 case ISD::SINT_TO_FP: {
987 assert(Op.getValueType() == MVT::f64 &&
988 Op.getOperand(0).getValueType() == MVT::i64 &&
989 "Unknown SINT_TO_FP to lower!");
990 // We lower sint64->FP into a store to a temporary stack slot, followed by a
992 MachineFunction &MF = DAG.getMachineFunction();
993 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
994 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
995 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
996 Op.getOperand(0), StackSlot, DAG.getSrcValue(NULL));
997 std::vector<MVT::ValueType> RTs;
998 RTs.push_back(MVT::f64);
999 RTs.push_back(MVT::Other);
1000 std::vector<SDOperand> Ops;
1001 Ops.push_back(Store);
1002 Ops.push_back(StackSlot);
1003 return DAG.getNode(X86ISD::FILD64m, RTs, Ops);
1005 case ISD::FP_TO_SINT: {
1006 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
1007 Op.getOperand(0).getValueType() == MVT::f64 &&
1008 "Unknown FP_TO_SINT to lower!");
1009 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
1011 MachineFunction &MF = DAG.getMachineFunction();
1012 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
1013 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1014 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1017 switch (Op.getValueType()) {
1018 default: assert(0 && "Invalid FP_TO_SINT to lower!");
1019 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
1020 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
1021 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
1024 // Build the FP_TO_INT*_IN_MEM
1025 std::vector<SDOperand> Ops;
1026 Ops.push_back(DAG.getEntryNode());
1027 Ops.push_back(Op.getOperand(0));
1028 Ops.push_back(StackSlot);
1029 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
1032 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
1033 DAG.getSrcValue(NULL));
1039 //===----------------------------------------------------------------------===//
1040 // Pattern Matcher Implementation
1041 //===----------------------------------------------------------------------===//
1044 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
1045 /// SDOperand's instead of register numbers for the leaves of the matched
1047 struct X86ISelAddressMode {
1053 struct { // This is really a union, discriminated by BaseType!
1063 X86ISelAddressMode()
1064 : BaseType(RegBase), Scale(1), IndexReg(), Disp(), GV(0) {
1072 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
1074 //===--------------------------------------------------------------------===//
1075 /// ISel - X86 specific code to select X86 machine instructions for
1076 /// SelectionDAG operations.
1078 class ISel : public SelectionDAGISel {
1079 /// ContainsFPCode - Every instruction we select that uses or defines a FP
1080 /// register should set this to true.
1081 bool ContainsFPCode;
1083 /// X86Lowering - This object fully describes how to lower LLVM code to an
1084 /// X86-specific SelectionDAG.
1085 X86TargetLowering X86Lowering;
1087 /// RegPressureMap - This keeps an approximate count of the number of
1088 /// registers required to evaluate each node in the graph.
1089 std::map<SDNode*, unsigned> RegPressureMap;
1091 /// ExprMap - As shared expressions are codegen'd, we keep track of which
1092 /// vreg the value is produced in, so we only emit one copy of each compiled
1094 std::map<SDOperand, unsigned> ExprMap;
1096 /// TheDAG - The DAG being selected during Select* operations.
1097 SelectionDAG *TheDAG;
1099 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
1100 /// make the right decision when generating code for different targets.
1101 const X86Subtarget *Subtarget;
1103 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
1104 Subtarget = &TM.getSubtarget<X86Subtarget>();
1107 virtual const char *getPassName() const {
1108 return "X86 Pattern Instruction Selection";
1111 unsigned getRegPressure(SDOperand O) {
1112 return RegPressureMap[O.Val];
1114 unsigned ComputeRegPressure(SDOperand O);
1116 /// InstructionSelectBasicBlock - This callback is invoked by
1117 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
1118 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
1120 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
1122 bool isFoldableLoad(SDOperand Op, SDOperand OtherOp,
1123 bool FloatPromoteOk = false);
1124 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
1125 bool TryToFoldLoadOpStore(SDNode *Node);
1126 bool EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg);
1127 void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse);
1128 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
1129 void EmitSelectCC(SDOperand Cond, SDOperand True, SDOperand False,
1130 MVT::ValueType SVT, unsigned RDest);
1131 unsigned SelectExpr(SDOperand N);
1133 X86AddressMode SelectAddrExprs(const X86ISelAddressMode &IAM);
1134 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM);
1135 void SelectAddress(SDOperand N, X86AddressMode &AM);
1136 bool EmitPotentialTailCall(SDNode *Node);
1137 void EmitFastCCToFastCCTailCall(SDNode *TailCallNode);
1138 void Select(SDOperand N);
1142 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
1143 /// the main function.
1144 static void EmitSpecialCodeForMain(MachineBasicBlock *BB,
1145 MachineFrameInfo *MFI) {
1146 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
1147 int CWFrameIdx = MFI->CreateStackObject(2, 2);
1148 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1150 // Set the high part to be 64-bit precision.
1151 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1152 CWFrameIdx, 1).addImm(2);
1154 // Reload the modified control word now.
1155 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1158 void ISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
1159 // If this function has live-in values, emit the copies from pregs to vregs at
1160 // the top of the function, before anything else.
1161 MachineBasicBlock *BB = MF.begin();
1162 if (MF.livein_begin() != MF.livein_end()) {
1163 SSARegMap *RegMap = MF.getSSARegMap();
1164 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1165 E = MF.livein_end(); LI != E; ++LI) {
1166 const TargetRegisterClass *RC = RegMap->getRegClass(LI->second);
1167 if (RC == X86::R8RegisterClass) {
1168 BuildMI(BB, X86::MOV8rr, 1, LI->second).addReg(LI->first);
1169 } else if (RC == X86::R16RegisterClass) {
1170 BuildMI(BB, X86::MOV16rr, 1, LI->second).addReg(LI->first);
1171 } else if (RC == X86::R32RegisterClass) {
1172 BuildMI(BB, X86::MOV32rr, 1, LI->second).addReg(LI->first);
1173 } else if (RC == X86::RFPRegisterClass) {
1174 BuildMI(BB, X86::FpMOV, 1, LI->second).addReg(LI->first);
1175 } else if (RC == X86::RXMMRegisterClass) {
1176 BuildMI(BB, X86::MOVAPDrr, 1, LI->second).addReg(LI->first);
1178 assert(0 && "Unknown regclass!");
1184 // If this is main, emit special code for main.
1185 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
1186 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
1190 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
1191 /// when it has created a SelectionDAG for us to codegen.
1192 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
1193 // While we're doing this, keep track of whether we see any FP code for
1194 // FP_REG_KILL insertion.
1195 ContainsFPCode = false;
1196 MachineFunction *MF = BB->getParent();
1198 // Scan the PHI nodes that already are inserted into this basic block. If any
1199 // of them is a PHI of a floating point value, we need to insert an
1201 SSARegMap *RegMap = MF->getSSARegMap();
1202 if (BB != MF->begin())
1203 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
1205 assert(I->getOpcode() == X86::PHI &&
1206 "Isn't just PHI nodes?");
1207 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1208 X86::RFPRegisterClass) {
1209 ContainsFPCode = true;
1214 // Compute the RegPressureMap, which is an approximation for the number of
1215 // registers required to compute each node.
1216 ComputeRegPressure(DAG.getRoot());
1220 // Codegen the basic block.
1221 Select(DAG.getRoot());
1225 // Finally, look at all of the successors of this block. If any contain a PHI
1226 // node of FP type, we need to insert an FP_REG_KILL in this block.
1227 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1228 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
1229 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
1230 I != E && I->getOpcode() == X86::PHI; ++I) {
1231 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1232 X86::RFPRegisterClass) {
1233 ContainsFPCode = true;
1238 // Final check, check LLVM BB's that are successors to the LLVM BB
1239 // corresponding to BB for FP PHI nodes.
1240 const BasicBlock *LLVMBB = BB->getBasicBlock();
1242 if (!ContainsFPCode)
1243 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
1244 SI != E && !ContainsFPCode; ++SI)
1245 for (BasicBlock::const_iterator II = SI->begin();
1246 (PN = dyn_cast<PHINode>(II)); ++II)
1247 if (PN->getType()->isFloatingPoint()) {
1248 ContainsFPCode = true;
1253 // Insert FP_REG_KILL instructions into basic blocks that need them. This
1254 // only occurs due to the floating point stackifier not being aggressive
1255 // enough to handle arbitrary global stackification.
1257 // Currently we insert an FP_REG_KILL instruction into each block that uses or
1258 // defines a floating point virtual register.
1260 // When the global register allocators (like linear scan) finally update live
1261 // variable analysis, we can keep floating point values in registers across
1262 // basic blocks. This will be a huge win, but we are waiting on the global
1263 // allocators before we can do this.
1265 if (ContainsFPCode) {
1266 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
1270 // Clear state used for selection.
1272 RegPressureMap.clear();
1276 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
1277 // for the number of registers required to compute each node. This is basically
1278 // computing a generalized form of the Sethi-Ullman number for each node.
1279 unsigned ISel::ComputeRegPressure(SDOperand O) {
1281 unsigned &Result = RegPressureMap[N];
1282 if (Result) return Result;
1284 // FIXME: Should operations like CALL (which clobber lots o regs) have a
1285 // higher fixed cost??
1287 if (N->getNumOperands() == 0) {
1290 unsigned MaxRegUse = 0;
1291 unsigned NumExtraMaxRegUsers = 0;
1292 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1294 if (N->getOperand(i).getOpcode() == ISD::Constant)
1297 Regs = ComputeRegPressure(N->getOperand(i));
1298 if (Regs > MaxRegUse) {
1300 NumExtraMaxRegUsers = 0;
1301 } else if (Regs == MaxRegUse &&
1302 N->getOperand(i).getValueType() != MVT::Other) {
1303 ++NumExtraMaxRegUsers;
1307 if (O.getOpcode() != ISD::TokenFactor)
1308 Result = MaxRegUse+NumExtraMaxRegUsers;
1310 Result = MaxRegUse == 1 ? 0 : MaxRegUse-1;
1313 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
1317 /// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op.
1318 /// The DAG cannot have cycles in it, by definition, so the visited set is not
1319 /// needed to prevent infinite loops. The DAG CAN, however, have unbounded
1320 /// reuse, so it prevents exponential cases.
1322 static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op,
1323 std::set<SDNode*> &Visited) {
1324 if (N == Op) return true; // Found it.
1325 SDNode *Node = N.Val;
1326 if (Node->getNumOperands() == 0 || // Leaf?
1327 Node->getNodeDepth() <= Op.getNodeDepth()) return false; // Can't find it?
1328 if (!Visited.insert(Node).second) return false; // Already visited?
1330 // Recurse for the first N-1 operands.
1331 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
1332 if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited))
1335 // Tail recurse for the last operand.
1336 return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited);
1339 X86AddressMode ISel::SelectAddrExprs(const X86ISelAddressMode &IAM) {
1340 X86AddressMode Result;
1342 // If we need to emit two register operands, emit the one with the highest
1343 // register pressure first.
1344 if (IAM.BaseType == X86ISelAddressMode::RegBase &&
1345 IAM.Base.Reg.Val && IAM.IndexReg.Val) {
1346 bool EmitBaseThenIndex;
1347 if (getRegPressure(IAM.Base.Reg) > getRegPressure(IAM.IndexReg)) {
1348 std::set<SDNode*> Visited;
1349 EmitBaseThenIndex = true;
1350 // If Base ends up pointing to Index, we must emit index first. This is
1351 // because of the way we fold loads, we may end up doing bad things with
1353 if (NodeTransitivelyUsesValue(IAM.Base.Reg, IAM.IndexReg, Visited))
1354 EmitBaseThenIndex = false;
1356 std::set<SDNode*> Visited;
1357 EmitBaseThenIndex = false;
1358 // If Base ends up pointing to Index, we must emit index first. This is
1359 // because of the way we fold loads, we may end up doing bad things with
1361 if (NodeTransitivelyUsesValue(IAM.IndexReg, IAM.Base.Reg, Visited))
1362 EmitBaseThenIndex = true;
1365 if (EmitBaseThenIndex) {
1366 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1367 Result.IndexReg = SelectExpr(IAM.IndexReg);
1369 Result.IndexReg = SelectExpr(IAM.IndexReg);
1370 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1373 } else if (IAM.BaseType == X86ISelAddressMode::RegBase && IAM.Base.Reg.Val) {
1374 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1375 } else if (IAM.IndexReg.Val) {
1376 Result.IndexReg = SelectExpr(IAM.IndexReg);
1379 switch (IAM.BaseType) {
1380 case X86ISelAddressMode::RegBase:
1381 Result.BaseType = X86AddressMode::RegBase;
1383 case X86ISelAddressMode::FrameIndexBase:
1384 Result.BaseType = X86AddressMode::FrameIndexBase;
1385 Result.Base.FrameIndex = IAM.Base.FrameIndex;
1388 assert(0 && "Unknown base type!");
1391 Result.Scale = IAM.Scale;
1392 Result.Disp = IAM.Disp;
1397 /// SelectAddress - Pattern match the maximal addressing mode for this node and
1398 /// emit all of the leaf registers.
1399 void ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
1400 X86ISelAddressMode IAM;
1401 MatchAddress(N, IAM);
1402 AM = SelectAddrExprs(IAM);
1405 /// MatchAddress - Add the specified node to the specified addressing mode,
1406 /// returning true if it cannot be done. This just pattern matches for the
1407 /// addressing mode, it does not cause any code to be emitted. For that, use
1409 bool ISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM) {
1410 switch (N.getOpcode()) {
1412 case ISD::FrameIndex:
1413 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
1414 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1415 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1419 case ISD::GlobalAddress:
1421 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1422 // For Darwin, external and weak symbols are indirect, so we want to load
1423 // the value at address GV, not the value of GV itself. This means that
1424 // the GlobalAddress must be in the base or index register of the address,
1425 // not the GV offset field.
1426 if (Subtarget->getIndirectExternAndWeakGlobals() &&
1427 (GV->hasWeakLinkage() || GV->isExternal())) {
1436 AM.Disp += cast<ConstantSDNode>(N)->getValue();
1439 // We might have folded the load into this shift, so don't regen the value
1441 if (ExprMap.count(N)) break;
1443 if (AM.IndexReg.Val == 0 && AM.Scale == 1)
1444 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
1445 unsigned Val = CN->getValue();
1446 if (Val == 1 || Val == 2 || Val == 3) {
1447 AM.Scale = 1 << Val;
1448 SDOperand ShVal = N.Val->getOperand(0);
1450 // Okay, we know that we have a scale by now. However, if the scaled
1451 // value is an add of something and a constant, we can fold the
1452 // constant into the disp field here.
1453 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
1454 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
1455 AM.IndexReg = ShVal.Val->getOperand(0);
1456 ConstantSDNode *AddVal =
1457 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
1458 AM.Disp += AddVal->getValue() << Val;
1460 AM.IndexReg = ShVal;
1467 // We might have folded the load into this mul, so don't regen the value if
1469 if (ExprMap.count(N)) break;
1471 // X*[3,5,9] -> X+X*[2,4,8]
1472 if (AM.IndexReg.Val == 0 && AM.BaseType == X86ISelAddressMode::RegBase &&
1473 AM.Base.Reg.Val == 0)
1474 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
1475 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
1476 AM.Scale = unsigned(CN->getValue())-1;
1478 SDOperand MulVal = N.Val->getOperand(0);
1481 // Okay, we know that we have a scale by now. However, if the scaled
1482 // value is an add of something and a constant, we can fold the
1483 // constant into the disp field here.
1484 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1485 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
1486 Reg = MulVal.Val->getOperand(0);
1487 ConstantSDNode *AddVal =
1488 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
1489 AM.Disp += AddVal->getValue() * CN->getValue();
1491 Reg = N.Val->getOperand(0);
1494 AM.IndexReg = AM.Base.Reg = Reg;
1500 // We might have folded the load into this mul, so don't regen the value if
1502 if (ExprMap.count(N)) break;
1504 X86ISelAddressMode Backup = AM;
1505 if (!MatchAddress(N.Val->getOperand(0), AM) &&
1506 !MatchAddress(N.Val->getOperand(1), AM))
1509 if (!MatchAddress(N.Val->getOperand(1), AM) &&
1510 !MatchAddress(N.Val->getOperand(0), AM))
1517 // Is the base register already occupied?
1518 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
1519 // If so, check to see if the scale index register is set.
1520 if (AM.IndexReg.Val == 0) {
1526 // Otherwise, we cannot select it.
1530 // Default, generate it as a register.
1531 AM.BaseType = X86ISelAddressMode::RegBase;
1536 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
1537 /// assuming that the temporary registers are in the 8-bit register class.
1541 /// DestReg = logicalop Tmp1, Tmp2
1543 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
1544 unsigned SetCC2, unsigned LogicalOp,
1546 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
1547 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1548 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1549 BuildMI(BB, SetCC1, 0, Tmp1);
1550 BuildMI(BB, SetCC2, 0, Tmp2);
1551 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
1554 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
1555 /// condition codes match the specified SetCCOpcode. Note that some conditions
1556 /// require multiple instructions to generate the correct value.
1557 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
1558 ISD::CondCode SetCCOpcode, bool isFP) {
1561 switch (SetCCOpcode) {
1562 default: assert(0 && "Illegal integer SetCC!");
1563 case ISD::SETEQ: Opc = X86::SETEr; break;
1564 case ISD::SETGT: Opc = X86::SETGr; break;
1565 case ISD::SETGE: Opc = X86::SETGEr; break;
1566 case ISD::SETLT: Opc = X86::SETLr; break;
1567 case ISD::SETLE: Opc = X86::SETLEr; break;
1568 case ISD::SETNE: Opc = X86::SETNEr; break;
1569 case ISD::SETULT: Opc = X86::SETBr; break;
1570 case ISD::SETUGT: Opc = X86::SETAr; break;
1571 case ISD::SETULE: Opc = X86::SETBEr; break;
1572 case ISD::SETUGE: Opc = X86::SETAEr; break;
1575 // On a floating point condition, the flags are set as follows:
1577 // 0 | 0 | 0 | X > Y
1578 // 0 | 0 | 1 | X < Y
1579 // 1 | 0 | 0 | X == Y
1580 // 1 | 1 | 1 | unordered
1582 switch (SetCCOpcode) {
1583 default: assert(0 && "Invalid FP setcc!");
1586 Opc = X86::SETEr; // True if ZF = 1
1590 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
1594 Opc = X86::SETAEr; // True if CF = 0
1598 Opc = X86::SETBr; // True if CF = 1
1602 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
1606 Opc = X86::SETNEr; // True if ZF = 0
1609 Opc = X86::SETPr; // True if PF = 1
1612 Opc = X86::SETNPr; // True if PF = 0
1614 case ISD::SETOEQ: // !PF & ZF
1615 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
1617 case ISD::SETOLT: // !PF & CF
1618 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
1620 case ISD::SETOLE: // !PF & (CF || ZF)
1621 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
1623 case ISD::SETUGT: // PF | (!ZF & !CF)
1624 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
1626 case ISD::SETUGE: // PF | !CF
1627 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
1629 case ISD::SETUNE: // PF | !ZF
1630 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
1634 BuildMI(BB, Opc, 0, DestReg);
1638 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
1639 /// the Dest block if the Cond condition is true. If we cannot fold this
1640 /// condition into the branch, return true.
1642 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
1644 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
1645 // B) using two conditional branches instead of one condbr, two setcc's, and
1647 if ((Cond.getOpcode() == ISD::OR ||
1648 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
1649 // And and or set the flags for us, so there is no need to emit a TST of the
1650 // result. It is only safe to do this if there is only a single use of the
1651 // AND/OR though, otherwise we don't know it will be emitted here.
1654 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
1658 // Codegen br not C -> JE.
1659 if (Cond.getOpcode() == ISD::XOR)
1660 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
1661 if (NC->isAllOnesValue()) {
1663 if (getRegPressure(Chain) > getRegPressure(Cond)) {
1665 CondR = SelectExpr(Cond.Val->getOperand(0));
1667 CondR = SelectExpr(Cond.Val->getOperand(0));
1670 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
1671 BuildMI(BB, X86::JE, 1).addMBB(Dest);
1675 if (Cond.getOpcode() != ISD::SETCC)
1676 return true; // Can only handle simple setcc's so far.
1677 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
1681 // Handle integer conditions first.
1682 if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
1684 default: assert(0 && "Illegal integer SetCC!");
1685 case ISD::SETEQ: Opc = X86::JE; break;
1686 case ISD::SETGT: Opc = X86::JG; break;
1687 case ISD::SETGE: Opc = X86::JGE; break;
1688 case ISD::SETLT: Opc = X86::JL; break;
1689 case ISD::SETLE: Opc = X86::JLE; break;
1690 case ISD::SETNE: Opc = X86::JNE; break;
1691 case ISD::SETULT: Opc = X86::JB; break;
1692 case ISD::SETUGT: Opc = X86::JA; break;
1693 case ISD::SETULE: Opc = X86::JBE; break;
1694 case ISD::SETUGE: Opc = X86::JAE; break;
1697 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
1698 BuildMI(BB, Opc, 1).addMBB(Dest);
1702 unsigned Opc2 = 0; // Second branch if needed.
1704 // On a floating point condition, the flags are set as follows:
1706 // 0 | 0 | 0 | X > Y
1707 // 0 | 0 | 1 | X < Y
1708 // 1 | 0 | 0 | X == Y
1709 // 1 | 1 | 1 | unordered
1712 default: assert(0 && "Invalid FP setcc!");
1714 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
1716 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
1718 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
1720 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
1722 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
1724 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
1725 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
1726 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
1727 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1728 Opc = X86::JA; // ZF = 0 & CF = 0
1729 Opc2 = X86::JP; // PF = 1
1731 case ISD::SETUGE: // PF = 1 | CF = 0
1732 Opc = X86::JAE; // CF = 0
1733 Opc2 = X86::JP; // PF = 1
1735 case ISD::SETUNE: // PF = 1 | ZF = 0
1736 Opc = X86::JNE; // ZF = 0
1737 Opc2 = X86::JP; // PF = 1
1739 case ISD::SETOEQ: // PF = 0 & ZF = 1
1742 return true; // FIXME: Emit more efficient code for this branch.
1743 case ISD::SETOLT: // PF = 0 & CF = 1
1746 return true; // FIXME: Emit more efficient code for this branch.
1747 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1748 //X86::JNP, X86::JBE
1750 return true; // FIXME: Emit more efficient code for this branch.
1754 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
1755 BuildMI(BB, Opc, 1).addMBB(Dest);
1757 BuildMI(BB, Opc2, 1).addMBB(Dest);
1761 /// EmitSelectCC - Emit code into BB that performs a select operation between
1762 /// the two registers RTrue and RFalse, generating a result into RDest.
1764 void ISel::EmitSelectCC(SDOperand Cond, SDOperand True, SDOperand False,
1765 MVT::ValueType SVT, unsigned RDest) {
1766 unsigned RTrue, RFalse;
1768 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
1770 } CondCode = NOT_SET;
1772 static const unsigned CMOVTAB16[] = {
1773 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
1774 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
1775 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
1777 static const unsigned CMOVTAB32[] = {
1778 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
1779 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
1780 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
1782 static const unsigned CMOVTABFP[] = {
1783 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
1784 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
1785 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
1787 static const int SSE_CMOVTAB[] = {
1788 /*CMPEQ*/ 0, /*CMPNEQ*/ 4, /*missing*/ 0, /*missing*/ 0,
1789 /*missing*/ 0, /*missing*/ 0, /*CMPLT*/ 1, /*CMPLE*/ 2,
1790 /*CMPNLE*/ 6, /*CMPNLT*/ 5, /*CMPUNORD*/ 3, /*CMPORD*/ 7
1793 if (Cond.getOpcode() == ISD::SETCC) {
1794 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
1795 if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
1797 default: assert(0 && "Unknown integer comparison!");
1798 case ISD::SETEQ: CondCode = EQ; break;
1799 case ISD::SETGT: CondCode = GT; break;
1800 case ISD::SETGE: CondCode = GE; break;
1801 case ISD::SETLT: CondCode = LT; break;
1802 case ISD::SETLE: CondCode = LE; break;
1803 case ISD::SETNE: CondCode = NE; break;
1804 case ISD::SETULT: CondCode = B; break;
1805 case ISD::SETUGT: CondCode = A; break;
1806 case ISD::SETULE: CondCode = BE; break;
1807 case ISD::SETUGE: CondCode = AE; break;
1810 // On a floating point condition, the flags are set as follows:
1812 // 0 | 0 | 0 | X > Y
1813 // 0 | 0 | 1 | X < Y
1814 // 1 | 0 | 0 | X == Y
1815 // 1 | 1 | 1 | unordered
1818 default: assert(0 && "Unknown FP comparison!");
1820 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
1822 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
1824 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
1826 case ISD::SETLT: CondCode = B; break; // True if CF = 1
1828 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
1830 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
1831 case ISD::SETUO: CondCode = P; break; // True if PF = 1
1832 case ISD::SETO: CondCode = NP; break; // True if PF = 0
1833 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1834 case ISD::SETUGE: // PF = 1 | CF = 0
1835 case ISD::SETUNE: // PF = 1 | ZF = 0
1836 case ISD::SETOEQ: // PF = 0 & ZF = 1
1837 case ISD::SETOLT: // PF = 0 & CF = 1
1838 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1839 // We cannot emit this comparison as a single cmov.
1845 // There's no SSE equivalent of FCMOVE. For cases where we set a condition
1846 // code above and one of the results of the select is +0.0, then we can fake
1847 // it up through a clever AND with mask. Otherwise, we will fall through to
1848 // the code below that will use a PHI node to select the right value.
1849 if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
1850 if (Cond.getOperand(0).getValueType() == SVT &&
1851 NOT_SET != CondCode) {
1852 ConstantFPSDNode *CT = dyn_cast<ConstantFPSDNode>(True);
1853 ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(False);
1854 bool TrueZero = CT && CT->isExactlyValue(0.0);
1855 bool FalseZero = CF && CF->isExactlyValue(0.0);
1856 if (TrueZero || FalseZero) {
1857 SDOperand LHS = Cond.getOperand(0);
1858 SDOperand RHS = Cond.getOperand(1);
1860 // Select the two halves of the condition
1861 unsigned RLHS, RRHS;
1862 if (getRegPressure(LHS) > getRegPressure(RHS)) {
1863 RLHS = SelectExpr(LHS);
1864 RRHS = SelectExpr(RHS);
1866 RRHS = SelectExpr(RHS);
1867 RLHS = SelectExpr(LHS);
1870 // Emit the comparison and generate a mask from it
1871 unsigned MaskReg = MakeReg(SVT);
1872 unsigned Opc = (SVT == MVT::f32) ? X86::CMPSSrr : X86::CMPSDrr;
1873 BuildMI(BB, Opc, 3, MaskReg).addReg(RLHS).addReg(RRHS)
1874 .addImm(SSE_CMOVTAB[CondCode]);
1877 RFalse = SelectExpr(False);
1878 Opc = (SVT == MVT::f32) ? X86::ANDNPSrr : X86::ANDNPDrr;
1879 BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RFalse);
1881 RTrue = SelectExpr(True);
1882 Opc = (SVT == MVT::f32) ? X86::ANDPSrr : X86::ANDPDrr;
1883 BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RTrue);
1891 // Select the true and false values for use in both the SSE PHI case, and the
1892 // integer or x87 cmov cases below.
1893 if (getRegPressure(True) > getRegPressure(False)) {
1894 RTrue = SelectExpr(True);
1895 RFalse = SelectExpr(False);
1897 RFalse = SelectExpr(False);
1898 RTrue = SelectExpr(True);
1901 // Since there's no SSE equivalent of FCMOVE, and we couldn't generate an
1902 // AND with mask, we'll have to do the normal RISC thing and generate a PHI
1903 // node to select between the true and false values.
1904 if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
1905 // FIXME: emit a direct compare and branch rather than setting a cond reg
1907 unsigned CondReg = SelectExpr(Cond);
1908 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1910 // Create an iterator with which to insert the MBB for copying the false
1911 // value and the MBB to hold the PHI instruction for this SetCC.
1912 MachineBasicBlock *thisMBB = BB;
1913 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1914 ilist<MachineBasicBlock>::iterator It = BB;
1920 // cmpTY ccX, r1, r2
1922 // fallthrough --> copy0MBB
1923 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1924 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1925 BuildMI(BB, X86::JNE, 1).addMBB(sinkMBB);
1926 MachineFunction *F = BB->getParent();
1927 F->getBasicBlockList().insert(It, copy0MBB);
1928 F->getBasicBlockList().insert(It, sinkMBB);
1929 // Update machine-CFG edges
1930 BB->addSuccessor(copy0MBB);
1931 BB->addSuccessor(sinkMBB);
1934 // %FalseValue = ...
1935 // # fallthrough to sinkMBB
1937 // Update machine-CFG edges
1938 BB->addSuccessor(sinkMBB);
1941 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1944 BuildMI(BB, X86::PHI, 4, RDest).addReg(RFalse)
1945 .addMBB(copy0MBB).addReg(RTrue).addMBB(thisMBB);
1950 if (CondCode != NOT_SET) {
1952 default: assert(0 && "Cannot select this type!");
1953 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
1954 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
1955 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
1959 // Finally, if we weren't able to fold this, just emit the condition and test
1961 if (CondCode == NOT_SET || Opc == 0) {
1962 // Get the condition into the zero flag.
1963 unsigned CondReg = SelectExpr(Cond);
1964 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1967 default: assert(0 && "Cannot select this type!");
1968 case MVT::i16: Opc = X86::CMOVE16rr; break;
1969 case MVT::i32: Opc = X86::CMOVE32rr; break;
1970 case MVT::f64: Opc = X86::FCMOVE; break;
1973 // FIXME: CMP R, 0 -> TEST R, R
1974 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse());
1975 std::swap(RTrue, RFalse);
1977 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
1980 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) {
1982 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
1984 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
1985 switch (RHS.getValueType()) {
1988 case MVT::i8: Opc = X86::CMP8mi; break;
1989 case MVT::i16: Opc = X86::CMP16mi; break;
1990 case MVT::i32: Opc = X86::CMP32mi; break;
1994 EmitFoldedLoad(LHS, AM);
1995 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
2000 switch (RHS.getValueType()) {
2003 case MVT::i8: Opc = X86::CMP8ri; break;
2004 case MVT::i16: Opc = X86::CMP16ri; break;
2005 case MVT::i32: Opc = X86::CMP32ri; break;
2008 unsigned Tmp1 = SelectExpr(LHS);
2009 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
2012 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(RHS)) {
2013 if (!X86ScalarSSE && (CN->isExactlyValue(+0.0) ||
2014 CN->isExactlyValue(-0.0))) {
2015 unsigned Reg = SelectExpr(LHS);
2016 BuildMI(BB, X86::FTST, 1).addReg(Reg);
2017 BuildMI(BB, X86::FNSTSW8r, 0);
2018 BuildMI(BB, X86::SAHF, 1);
2024 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
2025 switch (RHS.getValueType()) {
2028 case MVT::i8: Opc = X86::CMP8mr; break;
2029 case MVT::i16: Opc = X86::CMP16mr; break;
2030 case MVT::i32: Opc = X86::CMP32mr; break;
2034 EmitFoldedLoad(LHS, AM);
2035 unsigned Reg = SelectExpr(RHS);
2036 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
2041 switch (LHS.getValueType()) {
2042 default: assert(0 && "Cannot compare this value!");
2044 case MVT::i8: Opc = X86::CMP8rr; break;
2045 case MVT::i16: Opc = X86::CMP16rr; break;
2046 case MVT::i32: Opc = X86::CMP32rr; break;
2047 case MVT::f32: Opc = X86::UCOMISSrr; break;
2048 case MVT::f64: Opc = X86ScalarSSE ? X86::UCOMISDrr : X86::FUCOMIr; break;
2050 unsigned Tmp1, Tmp2;
2051 if (getRegPressure(LHS) > getRegPressure(RHS)) {
2052 Tmp1 = SelectExpr(LHS);
2053 Tmp2 = SelectExpr(RHS);
2055 Tmp2 = SelectExpr(RHS);
2056 Tmp1 = SelectExpr(LHS);
2058 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
2061 /// isFoldableLoad - Return true if this is a load instruction that can safely
2062 /// be folded into an operation that uses it.
2063 bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp, bool FloatPromoteOk){
2064 if (Op.getOpcode() == ISD::LOAD) {
2065 // FIXME: currently can't fold constant pool indexes.
2066 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
2068 } else if (FloatPromoteOk && Op.getOpcode() == ISD::EXTLOAD &&
2069 cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::f32) {
2070 // FIXME: currently can't fold constant pool indexes.
2071 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
2077 // If this load has already been emitted, we clearly can't fold it.
2078 assert(Op.ResNo == 0 && "Not a use of the value of the load?");
2079 if (ExprMap.count(Op.getValue(1))) return false;
2080 assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?");
2081 assert(!ExprMap.count(Op.getValue(1))&&"Token lowered but value not in map?");
2083 // If there is not just one use of its value, we cannot fold.
2084 if (!Op.Val->hasNUsesOfValue(1, 0)) return false;
2086 // Finally, we cannot fold the load into the operation if this would induce a
2087 // cycle into the resultant dag. To check for this, see if OtherOp (the other
2088 // operand of the operation we are folding the load into) can possible use the
2089 // chain node defined by the load.
2090 if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain?
2091 std::set<SDNode*> Visited;
2092 if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited))
2099 /// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
2100 /// and compute the address being loaded into AM.
2101 void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
2102 SDOperand Chain = Op.getOperand(0);
2103 SDOperand Address = Op.getOperand(1);
2105 if (getRegPressure(Chain) > getRegPressure(Address)) {
2107 SelectAddress(Address, AM);
2109 SelectAddress(Address, AM);
2113 // The chain for this load is now lowered.
2114 assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 &&
2115 "Load emitted more than once?");
2116 if (!ExprMap.insert(std::make_pair(Op.getValue(1), 1)).second)
2117 assert(0 && "Load emitted more than once!");
2120 // EmitOrOpOp - Pattern match the expression (Op1|Op2), where we know that op1
2121 // and op2 are i8/i16/i32 values with one use each (the or). If we can form a
2122 // SHLD or SHRD, emit the instruction (generating the value into DestReg) and
2124 bool ISel::EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg) {
2125 if (Op1.getOpcode() == ISD::SHL && Op2.getOpcode() == ISD::SRL) {
2127 } else if (Op2.getOpcode() == ISD::SHL && Op1.getOpcode() == ISD::SRL) {
2128 std::swap(Op1, Op2); // Op1 is the SHL now.
2130 return false; // No match
2133 SDOperand ShlVal = Op1.getOperand(0);
2134 SDOperand ShlAmt = Op1.getOperand(1);
2135 SDOperand ShrVal = Op2.getOperand(0);
2136 SDOperand ShrAmt = Op2.getOperand(1);
2138 unsigned RegSize = MVT::getSizeInBits(Op1.getValueType());
2140 // Find out if ShrAmt = 32-ShlAmt or ShlAmt = 32-ShrAmt.
2141 if (ShlAmt.getOpcode() == ISD::SUB && ShlAmt.getOperand(1) == ShrAmt)
2142 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShlAmt.getOperand(0)))
2143 if (SubCST->getValue() == RegSize) {
2144 // (A >> ShrAmt) | (A << (32-ShrAmt)) ==> ROR A, ShrAmt
2145 // (A >> ShrAmt) | (B << (32-ShrAmt)) ==> SHRD A, B, ShrAmt
2146 if (ShrVal == ShlVal) {
2147 unsigned Reg, ShAmt;
2148 if (getRegPressure(ShrVal) > getRegPressure(ShrAmt)) {
2149 Reg = SelectExpr(ShrVal);
2150 ShAmt = SelectExpr(ShrAmt);
2152 ShAmt = SelectExpr(ShrAmt);
2153 Reg = SelectExpr(ShrVal);
2155 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2156 unsigned Opc = RegSize == 8 ? X86::ROR8rCL :
2157 (RegSize == 16 ? X86::ROR16rCL : X86::ROR32rCL);
2158 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2160 } else if (RegSize != 8) {
2161 unsigned AReg, BReg;
2162 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
2163 BReg = SelectExpr(ShlVal);
2164 AReg = SelectExpr(ShrVal);
2166 AReg = SelectExpr(ShrVal);
2167 BReg = SelectExpr(ShlVal);
2169 unsigned ShAmt = SelectExpr(ShrAmt);
2170 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2171 unsigned Opc = RegSize == 16 ? X86::SHRD16rrCL : X86::SHRD32rrCL;
2172 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
2177 if (ShrAmt.getOpcode() == ISD::SUB && ShrAmt.getOperand(1) == ShlAmt)
2178 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShrAmt.getOperand(0)))
2179 if (SubCST->getValue() == RegSize) {
2180 // (A << ShlAmt) | (A >> (32-ShlAmt)) ==> ROL A, ShrAmt
2181 // (A << ShlAmt) | (B >> (32-ShlAmt)) ==> SHLD A, B, ShrAmt
2182 if (ShrVal == ShlVal) {
2183 unsigned Reg, ShAmt;
2184 if (getRegPressure(ShrVal) > getRegPressure(ShlAmt)) {
2185 Reg = SelectExpr(ShrVal);
2186 ShAmt = SelectExpr(ShlAmt);
2188 ShAmt = SelectExpr(ShlAmt);
2189 Reg = SelectExpr(ShrVal);
2191 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2192 unsigned Opc = RegSize == 8 ? X86::ROL8rCL :
2193 (RegSize == 16 ? X86::ROL16rCL : X86::ROL32rCL);
2194 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2196 } else if (RegSize != 8) {
2197 unsigned AReg, BReg;
2198 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
2199 AReg = SelectExpr(ShlVal);
2200 BReg = SelectExpr(ShrVal);
2202 BReg = SelectExpr(ShrVal);
2203 AReg = SelectExpr(ShlVal);
2205 unsigned ShAmt = SelectExpr(ShlAmt);
2206 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2207 unsigned Opc = RegSize == 16 ? X86::SHLD16rrCL : X86::SHLD32rrCL;
2208 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
2213 if (ConstantSDNode *ShrCst = dyn_cast<ConstantSDNode>(ShrAmt))
2214 if (ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(ShlAmt))
2215 if (ShrCst->getValue() < RegSize && ShlCst->getValue() < RegSize)
2216 if (ShrCst->getValue() == RegSize-ShlCst->getValue()) {
2217 // (A >> 5) | (A << 27) --> ROR A, 5
2218 // (A >> 5) | (B << 27) --> SHRD A, B, 5
2219 if (ShrVal == ShlVal) {
2220 unsigned Reg = SelectExpr(ShrVal);
2221 unsigned Opc = RegSize == 8 ? X86::ROR8ri :
2222 (RegSize == 16 ? X86::ROR16ri : X86::ROR32ri);
2223 BuildMI(BB, Opc, 2, DestReg).addReg(Reg).addImm(ShrCst->getValue());
2225 } else if (RegSize != 8) {
2226 unsigned AReg, BReg;
2227 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
2228 BReg = SelectExpr(ShlVal);
2229 AReg = SelectExpr(ShrVal);
2231 AReg = SelectExpr(ShrVal);
2232 BReg = SelectExpr(ShlVal);
2234 unsigned Opc = RegSize == 16 ? X86::SHRD16rri8 : X86::SHRD32rri8;
2235 BuildMI(BB, Opc, 3, DestReg).addReg(AReg).addReg(BReg)
2236 .addImm(ShrCst->getValue());
2244 unsigned ISel::SelectExpr(SDOperand N) {
2246 unsigned Tmp1, Tmp2, Tmp3;
2248 SDNode *Node = N.Val;
2251 if (Node->getOpcode() == ISD::CopyFromReg) {
2252 if (MRegisterInfo::isVirtualRegister(cast<RegSDNode>(Node)->getReg()) ||
2253 cast<RegSDNode>(Node)->getReg() == X86::ESP) {
2254 // Just use the specified register as our input.
2255 return cast<RegSDNode>(Node)->getReg();
2259 unsigned &Reg = ExprMap[N];
2260 if (Reg) return Reg;
2262 switch (N.getOpcode()) {
2264 Reg = Result = (N.getValueType() != MVT::Other) ?
2265 MakeReg(N.getValueType()) : 1;
2267 case X86ISD::TAILCALL:
2269 // If this is a call instruction, make sure to prepare ALL of the result
2270 // values as well as the chain.
2271 ExprMap[N.getValue(0)] = 1;
2272 if (Node->getNumValues() > 1) {
2273 Result = MakeReg(Node->getValueType(1));
2274 ExprMap[N.getValue(1)] = Result;
2275 for (unsigned i = 2, e = Node->getNumValues(); i != e; ++i)
2276 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2281 case ISD::ADD_PARTS:
2282 case ISD::SUB_PARTS:
2283 case ISD::SHL_PARTS:
2284 case ISD::SRL_PARTS:
2285 case ISD::SRA_PARTS:
2286 Result = MakeReg(Node->getValueType(0));
2287 ExprMap[N.getValue(0)] = Result;
2288 for (unsigned i = 1, e = N.Val->getNumValues(); i != e; ++i)
2289 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2293 switch (N.getOpcode()) {
2296 assert(0 && "Node not handled!\n");
2297 case ISD::FP_EXTEND:
2298 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
2299 Tmp1 = SelectExpr(N.getOperand(0));
2300 BuildMI(BB, X86::CVTSS2SDrr, 1, Result).addReg(Tmp1);
2303 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
2304 Tmp1 = SelectExpr(N.getOperand(0));
2305 BuildMI(BB, X86::CVTSD2SSrr, 1, Result).addReg(Tmp1);
2307 case ISD::CopyFromReg:
2308 Select(N.getOperand(0));
2310 Reg = Result = ExprMap[N.getValue(0)] =
2311 MakeReg(N.getValue(0).getValueType());
2313 switch (Node->getValueType(0)) {
2314 default: assert(0 && "Cannot CopyFromReg this!");
2317 BuildMI(BB, X86::MOV8rr, 1,
2318 Result).addReg(cast<RegSDNode>(Node)->getReg());
2321 BuildMI(BB, X86::MOV16rr, 1,
2322 Result).addReg(cast<RegSDNode>(Node)->getReg());
2325 BuildMI(BB, X86::MOV32rr, 1,
2326 Result).addReg(cast<RegSDNode>(Node)->getReg());
2330 case ISD::FrameIndex:
2331 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
2332 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
2334 case ISD::ConstantPool:
2335 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
2336 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
2338 case ISD::ConstantFP:
2340 assert(cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) &&
2341 "SSE only supports +0.0");
2342 Opc = (N.getValueType() == MVT::f32) ? X86::FLD0SS : X86::FLD0SD;
2343 BuildMI(BB, Opc, 0, Result);
2346 ContainsFPCode = true;
2347 Tmp1 = Result; // Intermediate Register
2348 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
2349 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2350 Tmp1 = MakeReg(MVT::f64);
2352 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
2353 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2354 BuildMI(BB, X86::FLD0, 0, Tmp1);
2355 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
2356 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
2357 BuildMI(BB, X86::FLD1, 0, Tmp1);
2359 assert(0 && "Unexpected constant!");
2361 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
2364 switch (N.getValueType()) {
2365 default: assert(0 && "Cannot use constants of this type!");
2367 case MVT::i8: Opc = X86::MOV8ri; break;
2368 case MVT::i16: Opc = X86::MOV16ri; break;
2369 case MVT::i32: Opc = X86::MOV32ri; break;
2371 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
2374 if (Node->getValueType(0) == MVT::f64) {
2375 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
2376 BuildMI(BB, X86::FLD0, 0, Result);
2378 BuildMI(BB, X86::IMPLICIT_DEF, 0, Result);
2381 case ISD::GlobalAddress: {
2382 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
2383 // For Darwin, external and weak symbols are indirect, so we want to load
2384 // the value at address GV, not the value of GV itself.
2385 if (Subtarget->getIndirectExternAndWeakGlobals() &&
2386 (GV->hasWeakLinkage() || GV->isExternal())) {
2387 BuildMI(BB, X86::MOV32rm, 4, Result).addReg(0).addZImm(1).addReg(0)
2388 .addGlobalAddress(GV, false, 0);
2390 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
2394 case ISD::ExternalSymbol: {
2395 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
2396 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
2399 case ISD::ZERO_EXTEND: {
2400 int DestIs16 = N.getValueType() == MVT::i16;
2401 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2403 // FIXME: This hack is here for zero extension casts from bool to i8. This
2404 // would not be needed if bools were promoted by Legalize.
2405 if (N.getValueType() == MVT::i8) {
2406 Tmp1 = SelectExpr(N.getOperand(0));
2407 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
2411 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2412 static const unsigned Opc[3] = {
2413 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
2417 EmitFoldedLoad(N.getOperand(0), AM);
2418 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2423 static const unsigned Opc[3] = {
2424 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
2426 Tmp1 = SelectExpr(N.getOperand(0));
2427 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2430 case ISD::SIGN_EXTEND: {
2431 int DestIs16 = N.getValueType() == MVT::i16;
2432 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2434 // FIXME: Legalize should promote bools to i8!
2435 assert(N.getOperand(0).getValueType() != MVT::i1 &&
2436 "Sign extend from bool not implemented!");
2438 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2439 static const unsigned Opc[3] = {
2440 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
2444 EmitFoldedLoad(N.getOperand(0), AM);
2445 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2449 static const unsigned Opc[3] = {
2450 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
2452 Tmp1 = SelectExpr(N.getOperand(0));
2453 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2457 // Fold TRUNCATE (LOAD P) into a smaller load from P.
2458 // FIXME: This should be performed by the DAGCombiner.
2459 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2460 switch (N.getValueType()) {
2461 default: assert(0 && "Unknown truncate!");
2463 case MVT::i8: Opc = X86::MOV8rm; break;
2464 case MVT::i16: Opc = X86::MOV16rm; break;
2467 EmitFoldedLoad(N.getOperand(0), AM);
2468 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
2472 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
2473 // a move out of AX or AL.
2474 switch (N.getOperand(0).getValueType()) {
2475 default: assert(0 && "Unknown truncate!");
2476 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2477 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2478 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
2480 Tmp1 = SelectExpr(N.getOperand(0));
2481 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
2483 switch (N.getValueType()) {
2484 default: assert(0 && "Unknown truncate!");
2486 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2487 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2489 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
2492 case ISD::SINT_TO_FP: {
2493 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2494 unsigned PromoteOpcode = 0;
2496 // We can handle any sint to fp with the direct sse conversion instructions.
2498 Opc = (N.getValueType() == MVT::f64) ? X86::CVTSI2SDrr : X86::CVTSI2SSrr;
2499 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2503 ContainsFPCode = true;
2505 // Spill the integer to memory and reload it from there.
2506 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
2507 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
2508 MachineFunction *F = BB->getParent();
2509 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2513 addFrameReference(BuildMI(BB, X86::MOV32mr, 5), FrameIdx).addReg(Tmp1);
2514 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
2517 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), FrameIdx).addReg(Tmp1);
2518 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
2520 default: break; // No promotion required.
2524 case ISD::FP_TO_SINT:
2525 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2527 // If the target supports SSE2 and is performing FP operations in SSE regs
2528 // instead of the FP stack, then we can use the efficient CVTSS2SI and
2529 // CVTSD2SI instructions.
2530 assert(X86ScalarSSE);
2531 if (MVT::f32 == N.getOperand(0).getValueType()) {
2532 BuildMI(BB, X86::CVTTSS2SIrr, 1, Result).addReg(Tmp1);
2533 } else if (MVT::f64 == N.getOperand(0).getValueType()) {
2534 BuildMI(BB, X86::CVTTSD2SIrr, 1, Result).addReg(Tmp1);
2536 assert(0 && "Not an f32 or f64?");
2542 Op0 = N.getOperand(0);
2543 Op1 = N.getOperand(1);
2545 if (isFoldableLoad(Op0, Op1, true)) {
2546 std::swap(Op0, Op1);
2550 if (isFoldableLoad(Op1, Op0, true)) {
2552 switch (N.getValueType()) {
2553 default: assert(0 && "Cannot add this type!");
2555 case MVT::i8: Opc = X86::ADD8rm; break;
2556 case MVT::i16: Opc = X86::ADD16rm; break;
2557 case MVT::i32: Opc = X86::ADD32rm; break;
2558 case MVT::f32: Opc = X86::ADDSSrm; break;
2560 // For F64, handle promoted load operations (from F32) as well!
2562 assert(Op1.getOpcode() == ISD::LOAD && "SSE load not promoted");
2565 Opc = Op1.getOpcode() == ISD::LOAD ? X86::FADD64m : X86::FADD32m;
2570 EmitFoldedLoad(Op1, AM);
2571 Tmp1 = SelectExpr(Op0);
2572 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2576 // See if we can codegen this as an LEA to fold operations together.
2577 if (N.getValueType() == MVT::i32) {
2579 X86ISelAddressMode AM;
2580 MatchAddress(N, AM);
2581 ExprMap[N] = Result;
2583 // If this is not just an add, emit the LEA. For a simple add (like
2584 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
2585 // leave this as LEA, then peephole it to 'ADD' after two address elim
2587 if (AM.Scale != 1 || AM.BaseType == X86ISelAddressMode::FrameIndexBase||
2588 AM.GV || (AM.Base.Reg.Val && AM.IndexReg.Val && AM.Disp)) {
2589 X86AddressMode XAM = SelectAddrExprs(AM);
2590 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), XAM);
2595 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2597 if (CN->getValue() == 1) { // add X, 1 -> inc X
2598 switch (N.getValueType()) {
2599 default: assert(0 && "Cannot integer add this type!");
2600 case MVT::i8: Opc = X86::INC8r; break;
2601 case MVT::i16: Opc = X86::INC16r; break;
2602 case MVT::i32: Opc = X86::INC32r; break;
2604 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
2605 switch (N.getValueType()) {
2606 default: assert(0 && "Cannot integer add this type!");
2607 case MVT::i8: Opc = X86::DEC8r; break;
2608 case MVT::i16: Opc = X86::DEC16r; break;
2609 case MVT::i32: Opc = X86::DEC32r; break;
2614 Tmp1 = SelectExpr(Op0);
2615 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2619 switch (N.getValueType()) {
2620 default: assert(0 && "Cannot add this type!");
2621 case MVT::i8: Opc = X86::ADD8ri; break;
2622 case MVT::i16: Opc = X86::ADD16ri; break;
2623 case MVT::i32: Opc = X86::ADD32ri; break;
2626 Tmp1 = SelectExpr(Op0);
2627 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2632 switch (N.getValueType()) {
2633 default: assert(0 && "Cannot add this type!");
2634 case MVT::i8: Opc = X86::ADD8rr; break;
2635 case MVT::i16: Opc = X86::ADD16rr; break;
2636 case MVT::i32: Opc = X86::ADD32rr; break;
2637 case MVT::f32: Opc = X86::ADDSSrr; break;
2638 case MVT::f64: Opc = X86ScalarSSE ? X86::ADDSDrr : X86::FpADD; break;
2641 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2642 Tmp1 = SelectExpr(Op0);
2643 Tmp2 = SelectExpr(Op1);
2645 Tmp2 = SelectExpr(Op1);
2646 Tmp1 = SelectExpr(Op0);
2649 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2653 Tmp1 = SelectExpr(Node->getOperand(0));
2655 Opc = (N.getValueType() == MVT::f32) ? X86::SQRTSSrr : X86::SQRTSDrr;
2656 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2658 BuildMI(BB, X86::FSQRT, 1, Result).addReg(Tmp1);
2663 // Once we can spill 16 byte constants into the constant pool, we can
2664 // implement SSE equivalents of FABS and FCHS.
2669 assert(N.getValueType()==MVT::f64 && "Illegal type for this operation");
2670 Tmp1 = SelectExpr(Node->getOperand(0));
2671 switch (N.getOpcode()) {
2672 default: assert(0 && "Unreachable!");
2673 case ISD::FABS: BuildMI(BB, X86::FABS, 1, Result).addReg(Tmp1); break;
2674 case ISD::FNEG: BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1); break;
2675 case ISD::FSIN: BuildMI(BB, X86::FSIN, 1, Result).addReg(Tmp1); break;
2676 case ISD::FCOS: BuildMI(BB, X86::FCOS, 1, Result).addReg(Tmp1); break;
2681 switch (N.getValueType()) {
2682 default: assert(0 && "Unsupported VT!");
2683 case MVT::i8: Tmp2 = X86::MUL8r; break;
2684 case MVT::i16: Tmp2 = X86::MUL16r; break;
2685 case MVT::i32: Tmp2 = X86::MUL32r; break;
2689 unsigned MovOpc, LowReg, HiReg;
2690 switch (N.getValueType()) {
2691 default: assert(0 && "Unsupported VT!");
2693 MovOpc = X86::MOV8rr;
2699 MovOpc = X86::MOV16rr;
2705 MovOpc = X86::MOV32rr;
2711 if (Node->getOpcode() != ISD::MULHS)
2712 Opc = Tmp2; // Get the MULHU opcode.
2714 Op0 = Node->getOperand(0);
2715 Op1 = Node->getOperand(1);
2716 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2717 Tmp1 = SelectExpr(Op0);
2718 Tmp2 = SelectExpr(Op1);
2720 Tmp2 = SelectExpr(Op1);
2721 Tmp1 = SelectExpr(Op0);
2724 // FIXME: Implement folding of loads into the memory operands here!
2725 BuildMI(BB, MovOpc, 1, LowReg).addReg(Tmp1);
2726 BuildMI(BB, Opc, 1).addReg(Tmp2);
2727 BuildMI(BB, MovOpc, 1, Result).addReg(HiReg);
2736 static const unsigned SUBTab[] = {
2737 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2738 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
2739 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
2741 static const unsigned SSE_SUBTab[] = {
2742 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2743 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::SUBSSrm, X86::SUBSDrm,
2744 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::SUBSSrr, X86::SUBSDrr,
2746 static const unsigned MULTab[] = {
2747 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2748 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
2749 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
2751 static const unsigned SSE_MULTab[] = {
2752 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2753 0, X86::IMUL16rm , X86::IMUL32rm, X86::MULSSrm, X86::MULSDrm,
2754 0, X86::IMUL16rr , X86::IMUL32rr, X86::MULSSrr, X86::MULSDrr,
2756 static const unsigned ANDTab[] = {
2757 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
2758 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
2759 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
2761 static const unsigned ORTab[] = {
2762 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
2763 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
2764 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
2766 static const unsigned XORTab[] = {
2767 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
2768 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
2769 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
2772 Op0 = Node->getOperand(0);
2773 Op1 = Node->getOperand(1);
2775 if (Node->getOpcode() == ISD::OR && Op0.hasOneUse() && Op1.hasOneUse())
2776 if (EmitOrOpOp(Op0, Op1, Result)) // Match SHLD, SHRD, and rotates.
2779 if (Node->getOpcode() == ISD::SUB)
2780 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
2781 if (CN->isNullValue()) { // 0 - N -> neg N
2782 switch (N.getValueType()) {
2783 default: assert(0 && "Cannot sub this type!");
2785 case MVT::i8: Opc = X86::NEG8r; break;
2786 case MVT::i16: Opc = X86::NEG16r; break;
2787 case MVT::i32: Opc = X86::NEG32r; break;
2789 Tmp1 = SelectExpr(N.getOperand(1));
2790 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2794 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2795 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
2797 switch (N.getValueType()) {
2798 default: assert(0 && "Cannot add this type!");
2799 case MVT::i1: break; // Not supported, don't invert upper bits!
2800 case MVT::i8: Opc = X86::NOT8r; break;
2801 case MVT::i16: Opc = X86::NOT16r; break;
2802 case MVT::i32: Opc = X86::NOT32r; break;
2805 Tmp1 = SelectExpr(Op0);
2806 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2811 // Fold common multiplies into LEA instructions.
2812 if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) {
2813 switch ((int)CN->getValue()) {
2818 // Remove N from exprmap so SelectAddress doesn't get confused.
2821 SelectAddress(N, AM);
2822 // Restore it to the map.
2823 ExprMap[N] = Result;
2824 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
2829 switch (N.getValueType()) {
2830 default: assert(0 && "Cannot xor this type!");
2832 case MVT::i8: Opc = 0; break;
2833 case MVT::i16: Opc = 1; break;
2834 case MVT::i32: Opc = 2; break;
2836 switch (Node->getOpcode()) {
2837 default: assert(0 && "Unreachable!");
2838 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2839 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
2840 case ISD::AND: Opc = ANDTab[Opc]; break;
2841 case ISD::OR: Opc = ORTab[Opc]; break;
2842 case ISD::XOR: Opc = XORTab[Opc]; break;
2844 if (Opc) { // Can't fold MUL:i8 R, imm
2845 Tmp1 = SelectExpr(Op0);
2846 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2851 if (isFoldableLoad(Op0, Op1, true))
2852 if (Node->getOpcode() != ISD::SUB) {
2853 std::swap(Op0, Op1);
2856 // For FP, emit 'reverse' subract, with a memory operand.
2857 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
2858 if (Op0.getOpcode() == ISD::EXTLOAD)
2859 Opc = X86::FSUBR32m;
2861 Opc = X86::FSUBR64m;
2864 EmitFoldedLoad(Op0, AM);
2865 Tmp1 = SelectExpr(Op1);
2866 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2871 if (isFoldableLoad(Op1, Op0, true)) {
2873 switch (N.getValueType()) {
2874 default: assert(0 && "Cannot operate on this type!");
2876 case MVT::i8: Opc = 5; break;
2877 case MVT::i16: Opc = 6; break;
2878 case MVT::i32: Opc = 7; break;
2879 case MVT::f32: Opc = 8; break;
2880 // For F64, handle promoted load operations (from F32) as well!
2882 assert((!X86ScalarSSE || Op1.getOpcode() == ISD::LOAD) &&
2883 "SSE load should have been promoted");
2884 Opc = Op1.getOpcode() == ISD::LOAD ? 9 : 8; break;
2886 switch (Node->getOpcode()) {
2887 default: assert(0 && "Unreachable!");
2888 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2889 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
2890 case ISD::AND: Opc = ANDTab[Opc]; break;
2891 case ISD::OR: Opc = ORTab[Opc]; break;
2892 case ISD::XOR: Opc = XORTab[Opc]; break;
2896 EmitFoldedLoad(Op1, AM);
2897 Tmp1 = SelectExpr(Op0);
2899 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2901 assert(Node->getOpcode() == ISD::MUL &&
2902 N.getValueType() == MVT::i8 && "Unexpected situation!");
2903 // Must use the MUL instruction, which forces use of AL.
2904 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2905 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
2906 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2911 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2912 Tmp1 = SelectExpr(Op0);
2913 Tmp2 = SelectExpr(Op1);
2915 Tmp2 = SelectExpr(Op1);
2916 Tmp1 = SelectExpr(Op0);
2919 switch (N.getValueType()) {
2920 default: assert(0 && "Cannot add this type!");
2922 case MVT::i8: Opc = 10; break;
2923 case MVT::i16: Opc = 11; break;
2924 case MVT::i32: Opc = 12; break;
2925 case MVT::f32: Opc = 13; break;
2926 case MVT::f64: Opc = 14; break;
2928 switch (Node->getOpcode()) {
2929 default: assert(0 && "Unreachable!");
2930 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2931 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
2932 case ISD::AND: Opc = ANDTab[Opc]; break;
2933 case ISD::OR: Opc = ORTab[Opc]; break;
2934 case ISD::XOR: Opc = XORTab[Opc]; break;
2937 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2939 assert(Node->getOpcode() == ISD::MUL &&
2940 N.getValueType() == MVT::i8 && "Unexpected situation!");
2941 // Must use the MUL instruction, which forces use of AL.
2942 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2943 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
2944 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2948 case ISD::ADD_PARTS:
2949 case ISD::SUB_PARTS: {
2950 assert(N.getNumOperands() == 4 && N.getValueType() == MVT::i32 &&
2951 "Not an i64 add/sub!");
2952 // Emit all of the operands.
2953 std::vector<unsigned> InVals;
2954 for (unsigned i = 0, e = N.getNumOperands(); i != e; ++i)
2955 InVals.push_back(SelectExpr(N.getOperand(i)));
2956 if (N.getOpcode() == ISD::ADD_PARTS) {
2957 BuildMI(BB, X86::ADD32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2958 BuildMI(BB, X86::ADC32rr,2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2960 BuildMI(BB, X86::SUB32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2961 BuildMI(BB, X86::SBB32rr, 2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2963 return Result+N.ResNo;
2966 case ISD::SHL_PARTS:
2967 case ISD::SRA_PARTS:
2968 case ISD::SRL_PARTS: {
2969 assert(N.getNumOperands() == 3 && N.getValueType() == MVT::i32 &&
2970 "Not an i64 shift!");
2971 unsigned ShiftOpLo = SelectExpr(N.getOperand(0));
2972 unsigned ShiftOpHi = SelectExpr(N.getOperand(1));
2973 unsigned TmpReg = MakeReg(MVT::i32);
2974 if (N.getOpcode() == ISD::SRA_PARTS) {
2975 // If this is a SHR of a Long, then we need to do funny sign extension
2976 // stuff. TmpReg gets the value to use as the high-part if we are
2977 // shifting more than 32 bits.
2978 BuildMI(BB, X86::SAR32ri, 2, TmpReg).addReg(ShiftOpHi).addImm(31);
2980 // Other shifts use a fixed zero value if the shift is more than 32 bits.
2981 BuildMI(BB, X86::MOV32ri, 1, TmpReg).addImm(0);
2984 // Initialize CL with the shift amount.
2985 unsigned ShiftAmountReg = SelectExpr(N.getOperand(2));
2986 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2988 unsigned TmpReg2 = MakeReg(MVT::i32);
2989 unsigned TmpReg3 = MakeReg(MVT::i32);
2990 if (N.getOpcode() == ISD::SHL_PARTS) {
2991 // TmpReg2 = shld inHi, inLo
2992 BuildMI(BB, X86::SHLD32rrCL, 2,TmpReg2).addReg(ShiftOpHi)
2994 // TmpReg3 = shl inLo, CL
2995 BuildMI(BB, X86::SHL32rCL, 1, TmpReg3).addReg(ShiftOpLo);
2997 // Set the flags to indicate whether the shift was by more than 32 bits.
2998 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
3000 // DestHi = (>32) ? TmpReg3 : TmpReg2;
3001 BuildMI(BB, X86::CMOVNE32rr, 2,
3002 Result+1).addReg(TmpReg2).addReg(TmpReg3);
3003 // DestLo = (>32) ? TmpReg : TmpReg3;
3004 BuildMI(BB, X86::CMOVNE32rr, 2,
3005 Result).addReg(TmpReg3).addReg(TmpReg);
3007 // TmpReg2 = shrd inLo, inHi
3008 BuildMI(BB, X86::SHRD32rrCL,2,TmpReg2).addReg(ShiftOpLo)
3010 // TmpReg3 = s[ah]r inHi, CL
3011 BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
3012 : X86::SHR32rCL, 1, TmpReg3)
3015 // Set the flags to indicate whether the shift was by more than 32 bits.
3016 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
3018 // DestLo = (>32) ? TmpReg3 : TmpReg2;
3019 BuildMI(BB, X86::CMOVNE32rr, 2,
3020 Result).addReg(TmpReg2).addReg(TmpReg3);
3022 // DestHi = (>32) ? TmpReg : TmpReg3;
3023 BuildMI(BB, X86::CMOVNE32rr, 2,
3024 Result+1).addReg(TmpReg3).addReg(TmpReg);
3026 return Result+N.ResNo;
3030 EmitSelectCC(N.getOperand(0), N.getOperand(1), N.getOperand(2),
3031 N.getValueType(), Result);
3038 assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
3039 "We don't support this operator!");
3041 if (N.getOpcode() == ISD::SDIV) {
3042 // We can fold loads into FpDIVs, but not really into any others.
3043 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
3044 // Check for reversed and unreversed DIV.
3045 if (isFoldableLoad(N.getOperand(0), N.getOperand(1), true)) {
3046 if (N.getOperand(0).getOpcode() == ISD::EXTLOAD)
3047 Opc = X86::FDIVR32m;
3049 Opc = X86::FDIVR64m;
3051 EmitFoldedLoad(N.getOperand(0), AM);
3052 Tmp1 = SelectExpr(N.getOperand(1));
3053 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3055 } else if (isFoldableLoad(N.getOperand(1), N.getOperand(0), true) &&
3056 N.getOperand(1).getOpcode() == ISD::LOAD) {
3057 if (N.getOperand(1).getOpcode() == ISD::EXTLOAD)
3062 EmitFoldedLoad(N.getOperand(1), AM);
3063 Tmp1 = SelectExpr(N.getOperand(0));
3064 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3069 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3070 // FIXME: These special cases should be handled by the lowering impl!
3071 unsigned RHS = CN->getValue();
3077 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
3078 unsigned Log = Log2_32(RHS);
3079 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
3080 switch (N.getValueType()) {
3081 default: assert("Unknown type to signed divide!");
3083 SAROpc = X86::SAR8ri;
3084 SHROpc = X86::SHR8ri;
3085 ADDOpc = X86::ADD8rr;
3086 NEGOpc = X86::NEG8r;
3089 SAROpc = X86::SAR16ri;
3090 SHROpc = X86::SHR16ri;
3091 ADDOpc = X86::ADD16rr;
3092 NEGOpc = X86::NEG16r;
3095 SAROpc = X86::SAR32ri;
3096 SHROpc = X86::SHR32ri;
3097 ADDOpc = X86::ADD32rr;
3098 NEGOpc = X86::NEG32r;
3101 unsigned RegSize = MVT::getSizeInBits(N.getValueType());
3102 Tmp1 = SelectExpr(N.getOperand(0));
3105 TmpReg = MakeReg(N.getValueType());
3106 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
3110 unsigned TmpReg2 = MakeReg(N.getValueType());
3111 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(RegSize-Log);
3112 unsigned TmpReg3 = MakeReg(N.getValueType());
3113 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
3115 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
3116 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
3118 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
3124 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3125 Tmp1 = SelectExpr(N.getOperand(0));
3126 Tmp2 = SelectExpr(N.getOperand(1));
3128 Tmp2 = SelectExpr(N.getOperand(1));
3129 Tmp1 = SelectExpr(N.getOperand(0));
3132 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
3133 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
3134 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
3135 switch (N.getValueType()) {
3136 default: assert(0 && "Cannot sdiv this type!");
3138 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
3141 MovOpcode = X86::MOV8rr;
3142 ClrOpcode = X86::MOV8ri;
3143 SExtOpcode = X86::CBW;
3146 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
3149 MovOpcode = X86::MOV16rr;
3150 ClrOpcode = X86::MOV16ri;
3151 SExtOpcode = X86::CWD;
3154 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
3157 MovOpcode = X86::MOV32rr;
3158 ClrOpcode = X86::MOV32ri;
3159 SExtOpcode = X86::CDQ;
3162 BuildMI(BB, X86::DIVSSrr, 2, Result).addReg(Tmp1).addReg(Tmp2);
3165 Opc = X86ScalarSSE ? X86::DIVSDrr : X86::FpDIV;
3166 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3170 // Set up the low part.
3171 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
3174 // Sign extend the low part into the high part.
3175 BuildMI(BB, SExtOpcode, 0);
3177 // Zero out the high part, effectively zero extending the input.
3178 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
3181 // Emit the DIV/IDIV instruction.
3182 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
3184 // Get the result of the divide or rem.
3185 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
3190 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3191 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
3192 switch (N.getValueType()) {
3193 default: assert(0 && "Cannot shift this type!");
3194 case MVT::i8: Opc = X86::ADD8rr; break;
3195 case MVT::i16: Opc = X86::ADD16rr; break;
3196 case MVT::i32: Opc = X86::ADD32rr; break;
3198 Tmp1 = SelectExpr(N.getOperand(0));
3199 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
3203 switch (N.getValueType()) {
3204 default: assert(0 && "Cannot shift this type!");
3205 case MVT::i8: Opc = X86::SHL8ri; break;
3206 case MVT::i16: Opc = X86::SHL16ri; break;
3207 case MVT::i32: Opc = X86::SHL32ri; break;
3209 Tmp1 = SelectExpr(N.getOperand(0));
3210 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3214 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3215 Tmp1 = SelectExpr(N.getOperand(0));
3216 Tmp2 = SelectExpr(N.getOperand(1));
3218 Tmp2 = SelectExpr(N.getOperand(1));
3219 Tmp1 = SelectExpr(N.getOperand(0));
3222 switch (N.getValueType()) {
3223 default: assert(0 && "Cannot shift this type!");
3224 case MVT::i8 : Opc = X86::SHL8rCL; break;
3225 case MVT::i16: Opc = X86::SHL16rCL; break;
3226 case MVT::i32: Opc = X86::SHL32rCL; break;
3228 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3229 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3232 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3233 switch (N.getValueType()) {
3234 default: assert(0 && "Cannot shift this type!");
3235 case MVT::i8: Opc = X86::SHR8ri; break;
3236 case MVT::i16: Opc = X86::SHR16ri; break;
3237 case MVT::i32: Opc = X86::SHR32ri; break;
3239 Tmp1 = SelectExpr(N.getOperand(0));
3240 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3244 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3245 Tmp1 = SelectExpr(N.getOperand(0));
3246 Tmp2 = SelectExpr(N.getOperand(1));
3248 Tmp2 = SelectExpr(N.getOperand(1));
3249 Tmp1 = SelectExpr(N.getOperand(0));
3252 switch (N.getValueType()) {
3253 default: assert(0 && "Cannot shift this type!");
3254 case MVT::i8 : Opc = X86::SHR8rCL; break;
3255 case MVT::i16: Opc = X86::SHR16rCL; break;
3256 case MVT::i32: Opc = X86::SHR32rCL; break;
3258 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3259 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3262 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3263 switch (N.getValueType()) {
3264 default: assert(0 && "Cannot shift this type!");
3265 case MVT::i8: Opc = X86::SAR8ri; break;
3266 case MVT::i16: Opc = X86::SAR16ri; break;
3267 case MVT::i32: Opc = X86::SAR32ri; break;
3269 Tmp1 = SelectExpr(N.getOperand(0));
3270 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3274 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3275 Tmp1 = SelectExpr(N.getOperand(0));
3276 Tmp2 = SelectExpr(N.getOperand(1));
3278 Tmp2 = SelectExpr(N.getOperand(1));
3279 Tmp1 = SelectExpr(N.getOperand(0));
3282 switch (N.getValueType()) {
3283 default: assert(0 && "Cannot shift this type!");
3284 case MVT::i8 : Opc = X86::SAR8rCL; break;
3285 case MVT::i16: Opc = X86::SAR16rCL; break;
3286 case MVT::i32: Opc = X86::SAR32rCL; break;
3288 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3289 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3293 EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse());
3294 EmitSetCC(BB, Result, cast<CondCodeSDNode>(N.getOperand(2))->get(),
3295 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
3298 // Make sure we generate both values.
3299 if (Result != 1) { // Generate the token
3300 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3301 assert(0 && "Load already emitted!?");
3303 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3305 switch (Node->getValueType(0)) {
3306 default: assert(0 && "Cannot load this type!");
3308 case MVT::i8: Opc = X86::MOV8rm; break;
3309 case MVT::i16: Opc = X86::MOV16rm; break;
3310 case MVT::i32: Opc = X86::MOV32rm; break;
3311 case MVT::f32: Opc = X86::MOVSSrm; break;
3317 ContainsFPCode = true;
3322 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
3323 Select(N.getOperand(0));
3324 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
3328 SDOperand Chain = N.getOperand(0);
3329 SDOperand Address = N.getOperand(1);
3330 if (getRegPressure(Chain) > getRegPressure(Address)) {
3332 SelectAddress(Address, AM);
3334 SelectAddress(Address, AM);
3338 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
3341 case X86ISD::FILD64m:
3342 // Make sure we generate both values.
3343 assert(Result != 1 && N.getValueType() == MVT::f64);
3344 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3345 assert(0 && "Load already emitted!?");
3350 SDOperand Chain = N.getOperand(0);
3351 SDOperand Address = N.getOperand(1);
3352 if (getRegPressure(Chain) > getRegPressure(Address)) {
3354 SelectAddress(Address, AM);
3356 SelectAddress(Address, AM);
3360 addFullAddress(BuildMI(BB, X86::FILD64m, 4, Result), AM);
3364 case ISD::EXTLOAD: // Arbitrarily codegen extloads as MOVZX*
3365 case ISD::ZEXTLOAD: {
3366 // Make sure we generate both values.
3368 ExprMap[N.getValue(1)] = 1; // Generate the token
3370 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3372 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
3373 if (Node->getValueType(0) == MVT::f64) {
3374 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
3376 addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result),
3382 if (getRegPressure(Node->getOperand(0)) >
3383 getRegPressure(Node->getOperand(1))) {
3384 Select(Node->getOperand(0)); // chain
3385 SelectAddress(Node->getOperand(1), AM);
3387 SelectAddress(Node->getOperand(1), AM);
3388 Select(Node->getOperand(0)); // chain
3391 switch (Node->getValueType(0)) {
3392 default: assert(0 && "Unknown type to sign extend to.");
3394 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
3396 addFullAddress(BuildMI(BB, X86::FLD32m, 5, Result), AM);
3399 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
3401 assert(0 && "Bad zero extend!");
3404 addFullAddress(BuildMI(BB, X86::MOVZX32rm8, 5, Result), AM);
3407 addFullAddress(BuildMI(BB, X86::MOVZX32rm16, 5, Result), AM);
3412 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() <= MVT::i8 &&
3413 "Bad zero extend!");
3414 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3417 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i1 &&
3418 "Bad zero extend!");
3419 addFullAddress(BuildMI(BB, X86::MOV8rm, 5, Result), AM);
3424 case ISD::SEXTLOAD: {
3425 // Make sure we generate both values.
3427 ExprMap[N.getValue(1)] = 1; // Generate the token
3429 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3432 if (getRegPressure(Node->getOperand(0)) >
3433 getRegPressure(Node->getOperand(1))) {
3434 Select(Node->getOperand(0)); // chain
3435 SelectAddress(Node->getOperand(1), AM);
3437 SelectAddress(Node->getOperand(1), AM);
3438 Select(Node->getOperand(0)); // chain
3441 switch (Node->getValueType(0)) {
3442 case MVT::i8: assert(0 && "Cannot sign extend from bool!");
3443 default: assert(0 && "Unknown type to sign extend to.");
3445 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
3447 case MVT::i1: assert(0 && "Cannot sign extend from bool!");
3449 addFullAddress(BuildMI(BB, X86::MOVSX32rm8, 5, Result), AM);
3452 addFullAddress(BuildMI(BB, X86::MOVSX32rm16, 5, Result), AM);
3457 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i8 &&
3458 "Cannot sign extend from bool!");
3459 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3465 case ISD::DYNAMIC_STACKALLOC:
3466 // Generate both result values.
3468 ExprMap[N.getValue(1)] = 1; // Generate the token
3470 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3472 // FIXME: We are currently ignoring the requested alignment for handling
3473 // greater than the stack alignment. This will need to be revisited at some
3474 // point. Align = N.getOperand(2);
3476 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
3477 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
3478 std::cerr << "Cannot allocate stack object with greater alignment than"
3479 << " the stack alignment yet!";
3483 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3484 Select(N.getOperand(0));
3485 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
3486 .addImm(CN->getValue());
3488 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3489 Select(N.getOperand(0));
3490 Tmp1 = SelectExpr(N.getOperand(1));
3492 Tmp1 = SelectExpr(N.getOperand(1));
3493 Select(N.getOperand(0));
3496 // Subtract size from stack pointer, thereby allocating some space.
3497 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
3500 // Put a pointer to the space into the result register, by copying the stack
3502 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
3505 case X86ISD::TAILCALL:
3506 case X86ISD::CALL: {
3507 // The chain for this call is now lowered.
3508 ExprMap.insert(std::make_pair(N.getValue(0), 1));
3510 bool isDirect = isa<GlobalAddressSDNode>(N.getOperand(1)) ||
3511 isa<ExternalSymbolSDNode>(N.getOperand(1));
3512 unsigned Callee = 0;
3514 Select(N.getOperand(0));
3516 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3517 Select(N.getOperand(0));
3518 Callee = SelectExpr(N.getOperand(1));
3520 Callee = SelectExpr(N.getOperand(1));
3521 Select(N.getOperand(0));
3525 // If this call has values to pass in registers, do so now.
3526 if (Node->getNumOperands() > 4) {
3527 // The first value is passed in (a part of) EAX, the second in EDX.
3528 unsigned RegOp1 = SelectExpr(N.getOperand(4));
3530 Node->getNumOperands() > 5 ? SelectExpr(N.getOperand(5)) : 0;
3532 switch (N.getOperand(4).getValueType()) {
3533 default: assert(0 && "Bad thing to pass in regs");
3535 case MVT::i8: BuildMI(BB, X86::MOV8rr , 1,X86::AL).addReg(RegOp1); break;
3536 case MVT::i16: BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1); break;
3537 case MVT::i32: BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);break;
3540 switch (N.getOperand(5).getValueType()) {
3541 default: assert(0 && "Bad thing to pass in regs");
3544 BuildMI(BB, X86::MOV8rr , 1, X86::DL).addReg(RegOp2);
3547 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
3550 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
3555 if (GlobalAddressSDNode *GASD =
3556 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
3557 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
3558 } else if (ExternalSymbolSDNode *ESSDN =
3559 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
3560 BuildMI(BB, X86::CALLpcrel32,
3561 1).addExternalSymbol(ESSDN->getSymbol(), true);
3563 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3564 Select(N.getOperand(0));
3565 Tmp1 = SelectExpr(N.getOperand(1));
3567 Tmp1 = SelectExpr(N.getOperand(1));
3568 Select(N.getOperand(0));
3571 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
3574 // Get caller stack amount and amount the callee added to the stack pointer.
3575 Tmp1 = cast<ConstantSDNode>(N.getOperand(2))->getValue();
3576 Tmp2 = cast<ConstantSDNode>(N.getOperand(3))->getValue();
3577 BuildMI(BB, X86::ADJCALLSTACKUP, 2).addImm(Tmp1).addImm(Tmp2);
3579 if (Node->getNumValues() != 1)
3580 switch (Node->getValueType(1)) {
3581 default: assert(0 && "Unknown value type for call result!");
3582 case MVT::Other: return 1;
3585 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3588 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3591 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3592 if (Node->getNumValues() == 3 && Node->getValueType(2) == MVT::i32)
3593 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
3595 case MVT::f64: // Floating-point return values live in %ST(0)
3597 ContainsFPCode = true;
3598 BuildMI(BB, X86::FpGETRESULT, 1, X86::FP0);
3600 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
3601 MachineFunction *F = BB->getParent();
3602 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
3603 addFrameReference(BuildMI(BB, X86::FST64m, 5), FrameIdx).addReg(X86::FP0);
3604 addFrameReference(BuildMI(BB, X86::MOVSDrm, 4, Result), FrameIdx);
3607 ContainsFPCode = true;
3608 BuildMI(BB, X86::FpGETRESULT, 1, Result);
3612 return Result+N.ResNo-1;
3615 // First, determine that the size of the operand falls within the acceptable
3616 // range for this architecture.
3618 if (Node->getOperand(1).getValueType() != MVT::i16) {
3619 std::cerr << "llvm.readport: Address size is not 16 bits\n";
3623 // Make sure we generate both values.
3624 if (Result != 1) { // Generate the token
3625 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3626 assert(0 && "readport already emitted!?");
3628 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3630 Select(Node->getOperand(0)); // Select the chain.
3632 // If the port is a single-byte constant, use the immediate form.
3633 if (ConstantSDNode *Port = dyn_cast<ConstantSDNode>(Node->getOperand(1)))
3634 if ((Port->getValue() & 255) == Port->getValue()) {
3635 switch (Node->getValueType(0)) {
3637 BuildMI(BB, X86::IN8ri, 1).addImm(Port->getValue());
3638 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3641 BuildMI(BB, X86::IN16ri, 1).addImm(Port->getValue());
3642 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3645 BuildMI(BB, X86::IN32ri, 1).addImm(Port->getValue());
3646 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3652 // Now, move the I/O port address into the DX register and use the IN
3653 // instruction to get the input data.
3655 Tmp1 = SelectExpr(Node->getOperand(1));
3656 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Tmp1);
3657 switch (Node->getValueType(0)) {
3659 BuildMI(BB, X86::IN8rr, 0);
3660 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3663 BuildMI(BB, X86::IN16rr, 0);
3664 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3667 BuildMI(BB, X86::IN32rr, 0);
3668 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3671 std::cerr << "Cannot do input on this data type";
3680 /// TryToFoldLoadOpStore - Given a store node, try to fold together a
3681 /// load/op/store instruction. If successful return true.
3682 bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
3683 assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!");
3684 SDOperand Chain = Node->getOperand(0);
3685 SDOperand StVal = Node->getOperand(1);
3686 SDOperand StPtr = Node->getOperand(2);
3688 // The chain has to be a load, the stored value must be an integer binary
3689 // operation with one use.
3690 if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 ||
3691 MVT::isFloatingPoint(StVal.getValueType()))
3694 // Token chain must either be a factor node or the load to fold.
3695 if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor)
3700 // Check to see if there is a load from the same pointer that we're storing
3701 // to in either operand of the binop.
3702 if (StVal.getOperand(0).getOpcode() == ISD::LOAD &&
3703 StVal.getOperand(0).getOperand(1) == StPtr)
3704 TheLoad = StVal.getOperand(0);
3705 else if (StVal.getOperand(1).getOpcode() == ISD::LOAD &&
3706 StVal.getOperand(1).getOperand(1) == StPtr)
3707 TheLoad = StVal.getOperand(1);
3709 return false; // No matching load operand.
3711 // We can only fold the load if there are no intervening side-effecting
3712 // operations. This means that the store uses the load as its token chain, or
3713 // there are only token factor nodes in between the store and load.
3714 if (Chain != TheLoad.getValue(1)) {
3715 // Okay, the other option is that we have a store referring to (possibly
3716 // nested) token factor nodes. For now, just try peeking through one level
3717 // of token factors to see if this is the case.
3718 bool ChainOk = false;
3719 if (Chain.getOpcode() == ISD::TokenFactor) {
3720 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3721 if (Chain.getOperand(i) == TheLoad.getValue(1)) {
3727 if (!ChainOk) return false;
3730 if (TheLoad.getOperand(1) != StPtr)
3733 // Make sure that one of the operands of the binop is the load, and that the
3734 // load folds into the binop.
3735 if (((StVal.getOperand(0) != TheLoad ||
3736 !isFoldableLoad(TheLoad, StVal.getOperand(1))) &&
3737 (StVal.getOperand(1) != TheLoad ||
3738 !isFoldableLoad(TheLoad, StVal.getOperand(0)))))
3741 // Finally, check to see if this is one of the ops we can handle!
3742 static const unsigned ADDTAB[] = {
3743 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
3744 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
3746 static const unsigned SUBTAB[] = {
3747 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
3748 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
3750 static const unsigned ANDTAB[] = {
3751 X86::AND8mi, X86::AND16mi, X86::AND32mi,
3752 X86::AND8mr, X86::AND16mr, X86::AND32mr,
3754 static const unsigned ORTAB[] = {
3755 X86::OR8mi, X86::OR16mi, X86::OR32mi,
3756 X86::OR8mr, X86::OR16mr, X86::OR32mr,
3758 static const unsigned XORTAB[] = {
3759 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
3760 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
3762 static const unsigned SHLTAB[] = {
3763 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
3764 /*Have to put the reg in CL*/0, 0, 0,
3766 static const unsigned SARTAB[] = {
3767 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
3768 /*Have to put the reg in CL*/0, 0, 0,
3770 static const unsigned SHRTAB[] = {
3771 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
3772 /*Have to put the reg in CL*/0, 0, 0,
3775 const unsigned *TabPtr = 0;
3776 switch (StVal.getOpcode()) {
3778 std::cerr << "CANNOT [mem] op= val: ";
3779 StVal.Val->dump(); std::cerr << "\n";
3784 case ISD::UREM: return false;
3786 case ISD::ADD: TabPtr = ADDTAB; break;
3787 case ISD::SUB: TabPtr = SUBTAB; break;
3788 case ISD::AND: TabPtr = ANDTAB; break;
3789 case ISD:: OR: TabPtr = ORTAB; break;
3790 case ISD::XOR: TabPtr = XORTAB; break;
3791 case ISD::SHL: TabPtr = SHLTAB; break;
3792 case ISD::SRA: TabPtr = SARTAB; break;
3793 case ISD::SRL: TabPtr = SHRTAB; break;
3796 // Handle: [mem] op= CST
3797 SDOperand Op0 = StVal.getOperand(0);
3798 SDOperand Op1 = StVal.getOperand(1);
3800 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3801 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
3804 case MVT::i8: Opc = TabPtr[0]; break;
3805 case MVT::i16: Opc = TabPtr[1]; break;
3806 case MVT::i32: Opc = TabPtr[2]; break;
3810 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3811 assert(0 && "Already emitted?");
3815 if (getRegPressure(TheLoad.getOperand(0)) >
3816 getRegPressure(TheLoad.getOperand(1))) {
3817 Select(TheLoad.getOperand(0));
3818 SelectAddress(TheLoad.getOperand(1), AM);
3820 SelectAddress(TheLoad.getOperand(1), AM);
3821 Select(TheLoad.getOperand(0));
3824 if (StVal.getOpcode() == ISD::ADD) {
3825 if (CN->getValue() == 1) {
3826 switch (Op0.getValueType()) {
3829 addFullAddress(BuildMI(BB, X86::INC8m, 4), AM);
3831 case MVT::i16: Opc = TabPtr[1];
3832 addFullAddress(BuildMI(BB, X86::INC16m, 4), AM);
3834 case MVT::i32: Opc = TabPtr[2];
3835 addFullAddress(BuildMI(BB, X86::INC32m, 4), AM);
3838 } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X]
3839 switch (Op0.getValueType()) {
3842 addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM);
3844 case MVT::i16: Opc = TabPtr[1];
3845 addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM);
3847 case MVT::i32: Opc = TabPtr[2];
3848 addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM);
3854 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
3859 // If we have [mem] = V op [mem], try to turn it into:
3860 // [mem] = [mem] op V.
3861 if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
3862 StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
3863 StVal.getOpcode() != ISD::SRL)
3864 std::swap(Op0, Op1);
3866 if (Op0 != TheLoad) return false;
3868 switch (Op0.getValueType()) {
3869 default: return false;
3871 case MVT::i8: Opc = TabPtr[3]; break;
3872 case MVT::i16: Opc = TabPtr[4]; break;
3873 case MVT::i32: Opc = TabPtr[5]; break;
3876 // Table entry doesn't exist?
3877 if (Opc == 0) return false;
3879 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3880 assert(0 && "Already emitted?");
3882 Select(TheLoad.getOperand(0));
3885 SelectAddress(TheLoad.getOperand(1), AM);
3886 unsigned Reg = SelectExpr(Op1);
3887 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Reg);
3891 /// If node is a ret(tailcall) node, emit the specified tail call and return
3892 /// true, otherwise return false.
3894 /// FIXME: This whole thing should be a post-legalize optimization pass which
3895 /// recognizes and transforms the dag. We don't want the selection phase doing
3898 bool ISel::EmitPotentialTailCall(SDNode *RetNode) {
3899 assert(RetNode->getOpcode() == ISD::RET && "Not a return");
3901 SDOperand Chain = RetNode->getOperand(0);
3903 // If this is a token factor node where one operand is a call, dig into it.
3904 SDOperand TokFactor;
3905 unsigned TokFactorOperand = 0;
3906 if (Chain.getOpcode() == ISD::TokenFactor) {
3907 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3908 if (Chain.getOperand(i).getOpcode() == ISD::CALLSEQ_END ||
3909 Chain.getOperand(i).getOpcode() == X86ISD::TAILCALL) {
3910 TokFactorOperand = i;
3912 Chain = Chain.getOperand(i);
3915 if (TokFactor.Val == 0) return false; // No call operand.
3918 // Skip the CALLSEQ_END node if present.
3919 if (Chain.getOpcode() == ISD::CALLSEQ_END)
3920 Chain = Chain.getOperand(0);
3922 // Is a tailcall the last control operation that occurs before the return?
3923 if (Chain.getOpcode() != X86ISD::TAILCALL)
3926 // If we return a value, is it the value produced by the call?
3927 if (RetNode->getNumOperands() > 1) {
3928 // Not returning the ret val of the call?
3929 if (Chain.Val->getNumValues() == 1 ||
3930 RetNode->getOperand(1) != Chain.getValue(1))
3933 if (RetNode->getNumOperands() > 2) {
3934 if (Chain.Val->getNumValues() == 2 ||
3935 RetNode->getOperand(2) != Chain.getValue(2))
3938 assert(RetNode->getNumOperands() <= 3);
3941 // CalleeCallArgAmt - The total number of bytes used for the callee arg area.
3942 // For FastCC, this will always be > 0.
3943 unsigned CalleeCallArgAmt =
3944 cast<ConstantSDNode>(Chain.getOperand(2))->getValue();
3946 // CalleeCallArgPopAmt - The number of bytes in the call area popped by the
3947 // callee. For FastCC this will always be > 0, for CCC this is always 0.
3948 unsigned CalleeCallArgPopAmt =
3949 cast<ConstantSDNode>(Chain.getOperand(3))->getValue();
3951 // There are several cases we can handle here. First, if the caller and
3952 // callee are both CCC functions, we can tailcall if the callee takes <= the
3953 // number of argument bytes that the caller does.
3954 if (CalleeCallArgPopAmt == 0 && // Callee is C CallingConv?
3955 X86Lowering.getBytesToPopOnReturn() == 0) { // Caller is C CallingConv?
3956 // Check to see if caller arg area size >= callee arg area size.
3957 if (X86Lowering.getBytesCallerReserves() >= CalleeCallArgAmt) {
3958 //std::cerr << "CCC TAILCALL UNIMP!\n";
3959 // If TokFactor is non-null, emit all operands.
3961 //EmitCCCToCCCTailCall(Chain.Val);
3967 // Second, if both are FastCC functions, we can always perform the tail call.
3968 if (CalleeCallArgPopAmt && X86Lowering.getBytesToPopOnReturn()) {
3969 // If TokFactor is non-null, emit all operands before the call.
3970 if (TokFactor.Val) {
3971 for (unsigned i = 0, e = TokFactor.getNumOperands(); i != e; ++i)
3972 if (i != TokFactorOperand)
3973 Select(TokFactor.getOperand(i));
3976 EmitFastCCToFastCCTailCall(Chain.Val);
3980 // We don't support mixed calls, due to issues with alignment. We could in
3981 // theory handle some mixed calls from CCC -> FastCC if the stack is properly
3982 // aligned (which depends on the number of arguments to the callee). TODO.
3986 static SDOperand GetAdjustedArgumentStores(SDOperand Chain, int Offset,
3987 SelectionDAG &DAG) {
3988 MVT::ValueType StoreVT;
3989 switch (Chain.getOpcode()) {
3990 case ISD::CALLSEQ_START:
3991 // If we found the start of the call sequence, we're done. We actually
3992 // strip off the CALLSEQ_START node, to avoid generating the
3993 // ADJCALLSTACKDOWN marker for the tail call.
3994 return Chain.getOperand(0);
3995 case ISD::TokenFactor: {
3996 std::vector<SDOperand> Ops;
3997 Ops.reserve(Chain.getNumOperands());
3998 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3999 Ops.push_back(GetAdjustedArgumentStores(Chain.getOperand(i), Offset,DAG));
4000 return DAG.getNode(ISD::TokenFactor, MVT::Other, Ops);
4002 case ISD::STORE: // Normal store
4003 StoreVT = Chain.getOperand(1).getValueType();
4005 case ISD::TRUNCSTORE: // FLOAT store
4006 StoreVT = cast<VTSDNode>(Chain.getOperand(4))->getVT();
4010 SDOperand OrigDest = Chain.getOperand(2);
4011 unsigned OrigOffset;
4013 if (OrigDest.getOpcode() == ISD::CopyFromReg) {
4015 assert(cast<RegSDNode>(OrigDest)->getReg() == X86::ESP);
4017 // We expect only (ESP+C)
4018 assert(OrigDest.getOpcode() == ISD::ADD &&
4019 isa<ConstantSDNode>(OrigDest.getOperand(1)) &&
4020 OrigDest.getOperand(0).getOpcode() == ISD::CopyFromReg &&
4021 cast<RegSDNode>(OrigDest.getOperand(0))->getReg() == X86::ESP);
4022 OrigOffset = cast<ConstantSDNode>(OrigDest.getOperand(1))->getValue();
4025 // Compute the new offset from the incoming ESP value we wish to use.
4026 unsigned NewOffset = OrigOffset + Offset;
4028 unsigned OpSize = (MVT::getSizeInBits(StoreVT)+7)/8; // Bits -> Bytes
4029 MachineFunction &MF = DAG.getMachineFunction();
4030 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, NewOffset);
4031 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
4033 SDOperand InChain = GetAdjustedArgumentStores(Chain.getOperand(0), Offset,
4035 if (Chain.getOpcode() == ISD::STORE)
4036 return DAG.getNode(ISD::STORE, MVT::Other, InChain, Chain.getOperand(1),
4038 assert(Chain.getOpcode() == ISD::TRUNCSTORE);
4039 return DAG.getNode(ISD::TRUNCSTORE, MVT::Other, InChain, Chain.getOperand(1),
4040 FIN, DAG.getSrcValue(NULL), DAG.getValueType(StoreVT));
4044 /// EmitFastCCToFastCCTailCall - Given a tailcall in the tail position to a
4045 /// fastcc function from a fastcc function, emit the code to emit a 'proper'
4047 void ISel::EmitFastCCToFastCCTailCall(SDNode *TailCallNode) {
4048 unsigned CalleeCallArgSize =
4049 cast<ConstantSDNode>(TailCallNode->getOperand(2))->getValue();
4050 unsigned CallerArgSize = X86Lowering.getBytesToPopOnReturn();
4052 //std::cerr << "****\n*** EMITTING TAIL CALL!\n****\n";
4054 // Adjust argument stores. Instead of storing to [ESP], f.e., store to frame
4055 // indexes that are relative to the incoming ESP. If the incoming and
4056 // outgoing arg sizes are the same we will store to [InESP] instead of
4057 // [CurESP] and the ESP referenced will be relative to the incoming function
4059 int ESPOffset = CallerArgSize-CalleeCallArgSize;
4060 SDOperand AdjustedArgStores =
4061 GetAdjustedArgumentStores(TailCallNode->getOperand(0), ESPOffset, *TheDAG);
4063 // Copy the return address of the caller into a virtual register so we don't
4067 SDOperand RetValAddr = X86Lowering.getReturnAddressFrameIndex(*TheDAG);
4068 RetVal = TheDAG->getLoad(MVT::i32, TheDAG->getEntryNode(),
4069 RetValAddr, TheDAG->getSrcValue(NULL));
4073 // Codegen all of the argument stores.
4074 Select(AdjustedArgStores);
4077 // Emit a store of the saved ret value to the new location.
4078 MachineFunction &MF = TheDAG->getMachineFunction();
4079 int ReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(4, ESPOffset-4);
4080 SDOperand RetValAddr = TheDAG->getFrameIndex(ReturnAddrFI, MVT::i32);
4081 Select(TheDAG->getNode(ISD::STORE, MVT::Other, TheDAG->getEntryNode(),
4082 RetVal, RetValAddr));
4085 // Get the destination value.
4086 SDOperand Callee = TailCallNode->getOperand(1);
4087 bool isDirect = isa<GlobalAddressSDNode>(Callee) ||
4088 isa<ExternalSymbolSDNode>(Callee);
4089 unsigned CalleeReg = 0;
4090 if (!isDirect) CalleeReg = SelectExpr(Callee);
4092 unsigned RegOp1 = 0;
4093 unsigned RegOp2 = 0;
4095 if (TailCallNode->getNumOperands() > 4) {
4096 // The first value is passed in (a part of) EAX, the second in EDX.
4097 RegOp1 = SelectExpr(TailCallNode->getOperand(4));
4098 if (TailCallNode->getNumOperands() > 5)
4099 RegOp2 = SelectExpr(TailCallNode->getOperand(5));
4101 switch (TailCallNode->getOperand(4).getValueType()) {
4102 default: assert(0 && "Bad thing to pass in regs");
4105 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(RegOp1);
4109 BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1);
4113 BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);
4118 switch (TailCallNode->getOperand(5).getValueType()) {
4119 default: assert(0 && "Bad thing to pass in regs");
4122 BuildMI(BB, X86::MOV8rr, 1, X86::DL).addReg(RegOp2);
4126 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
4130 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
4138 BuildMI(BB, X86::ADJSTACKPTRri, 2,
4139 X86::ESP).addReg(X86::ESP).addImm(ESPOffset);
4141 // TODO: handle jmp [mem]
4143 BuildMI(BB, X86::TAILJMPr, 1).addReg(CalleeReg);
4144 } else if (GlobalAddressSDNode *GASD = dyn_cast<GlobalAddressSDNode>(Callee)){
4145 BuildMI(BB, X86::TAILJMPd, 1).addGlobalAddress(GASD->getGlobal(), true);
4147 ExternalSymbolSDNode *ESSDN = cast<ExternalSymbolSDNode>(Callee);
4148 BuildMI(BB, X86::TAILJMPd, 1).addExternalSymbol(ESSDN->getSymbol(), true);
4150 // ADD IMPLICIT USE RegOp1/RegOp2's
4154 void ISel::Select(SDOperand N) {
4155 unsigned Tmp1, Tmp2, Opc;
4157 if (!ExprMap.insert(std::make_pair(N, 1)).second)
4158 return; // Already selected.
4160 SDNode *Node = N.Val;
4162 switch (Node->getOpcode()) {
4164 Node->dump(); std::cerr << "\n";
4165 assert(0 && "Node not handled yet!");
4166 case ISD::EntryToken: return; // Noop
4167 case ISD::TokenFactor:
4168 if (Node->getNumOperands() == 2) {
4170 getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
4171 Select(Node->getOperand(OneFirst));
4172 Select(Node->getOperand(!OneFirst));
4174 std::vector<std::pair<unsigned, unsigned> > OpsP;
4175 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4176 OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i));
4177 std::sort(OpsP.begin(), OpsP.end());
4178 std::reverse(OpsP.begin(), OpsP.end());
4179 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4180 Select(Node->getOperand(OpsP[i].second));
4183 case ISD::CopyToReg:
4184 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4185 Select(N.getOperand(0));
4186 Tmp1 = SelectExpr(N.getOperand(1));
4188 Tmp1 = SelectExpr(N.getOperand(1));
4189 Select(N.getOperand(0));
4191 Tmp2 = cast<RegSDNode>(N)->getReg();
4194 switch (N.getOperand(1).getValueType()) {
4195 default: assert(0 && "Invalid type for operation!");
4197 case MVT::i8: Opc = X86::MOV8rr; break;
4198 case MVT::i16: Opc = X86::MOV16rr; break;
4199 case MVT::i32: Opc = X86::MOV32rr; break;
4200 case MVT::f32: Opc = X86::MOVAPSrr; break;
4203 Opc = X86::MOVAPDrr;
4206 ContainsFPCode = true;
4210 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
4214 if (N.getOperand(0).getOpcode() == ISD::CALLSEQ_END ||
4215 N.getOperand(0).getOpcode() == X86ISD::TAILCALL ||
4216 N.getOperand(0).getOpcode() == ISD::TokenFactor)
4217 if (EmitPotentialTailCall(Node))
4220 switch (N.getNumOperands()) {
4222 assert(0 && "Unknown return instruction!");
4224 assert(N.getOperand(1).getValueType() == MVT::i32 &&
4225 N.getOperand(2).getValueType() == MVT::i32 &&
4226 "Unknown two-register value!");
4227 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
4228 Tmp1 = SelectExpr(N.getOperand(1));
4229 Tmp2 = SelectExpr(N.getOperand(2));
4231 Tmp2 = SelectExpr(N.getOperand(2));
4232 Tmp1 = SelectExpr(N.getOperand(1));
4234 Select(N.getOperand(0));
4236 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4237 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
4240 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4241 Select(N.getOperand(0));
4242 Tmp1 = SelectExpr(N.getOperand(1));
4244 Tmp1 = SelectExpr(N.getOperand(1));
4245 Select(N.getOperand(0));
4247 switch (N.getOperand(1).getValueType()) {
4248 default: assert(0 && "All other types should have been promoted!!");
4251 // Spill the value to memory and reload it into top of stack.
4252 unsigned Size = MVT::getSizeInBits(MVT::f32)/8;
4253 MachineFunction *F = BB->getParent();
4254 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4255 addFrameReference(BuildMI(BB, X86::MOVSSmr, 5), FrameIdx).addReg(Tmp1);
4256 addFrameReference(BuildMI(BB, X86::FLD32m, 4, X86::FP0), FrameIdx);
4257 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
4258 ContainsFPCode = true;
4260 assert(0 && "MVT::f32 only legal with scalar sse fp");
4266 // Spill the value to memory and reload it into top of stack.
4267 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
4268 MachineFunction *F = BB->getParent();
4269 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4270 addFrameReference(BuildMI(BB, X86::MOVSDmr, 5), FrameIdx).addReg(Tmp1);
4271 addFrameReference(BuildMI(BB, X86::FLD64m, 4, X86::FP0), FrameIdx);
4272 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
4273 ContainsFPCode = true;
4275 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
4279 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4284 Select(N.getOperand(0));
4287 if (X86Lowering.getBytesToPopOnReturn() == 0)
4288 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
4290 BuildMI(BB, X86::RETI, 1).addImm(X86Lowering.getBytesToPopOnReturn());
4293 Select(N.getOperand(0));
4294 MachineBasicBlock *Dest =
4295 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
4296 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
4301 MachineBasicBlock *Dest =
4302 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
4304 // Try to fold a setcc into the branch. If this fails, emit a test/jne
4306 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
4307 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4308 Select(N.getOperand(0));
4309 Tmp1 = SelectExpr(N.getOperand(1));
4311 Tmp1 = SelectExpr(N.getOperand(1));
4312 Select(N.getOperand(0));
4314 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
4315 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
4322 // If this load could be folded into the only using instruction, and if it
4323 // is safe to emit the instruction here, try to do so now.
4324 if (Node->hasNUsesOfValue(1, 0)) {
4325 SDOperand TheVal = N.getValue(0);
4327 for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) {
4328 assert(UI != Node->use_end() && "Didn't find use!");
4330 for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i)
4331 if (UN->getOperand(i) == TheVal) {
4337 // Only handle unary operators right now.
4338 if (User->getNumOperands() == 1) {
4340 SelectExpr(SDOperand(User, 0));
4351 case ISD::DYNAMIC_STACKALLOC:
4352 case X86ISD::TAILCALL:
4357 case ISD::CopyFromReg:
4358 case X86ISD::FILD64m:
4360 SelectExpr(N.getValue(0));
4363 case X86ISD::FP_TO_INT16_IN_MEM:
4364 case X86ISD::FP_TO_INT32_IN_MEM:
4365 case X86ISD::FP_TO_INT64_IN_MEM: {
4366 assert(N.getOperand(1).getValueType() == MVT::f64);
4368 Select(N.getOperand(0)); // Select the token chain
4371 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
4372 ValReg = SelectExpr(N.getOperand(1));
4373 SelectAddress(N.getOperand(2), AM);
4375 SelectAddress(N.getOperand(2), AM);
4376 ValReg = SelectExpr(N.getOperand(1));
4379 // Change the floating point control register to use "round towards zero"
4380 // mode when truncating to an integer value.
4382 MachineFunction *F = BB->getParent();
4383 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
4384 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
4386 // Load the old value of the high byte of the control word...
4387 unsigned OldCW = MakeReg(MVT::i16);
4388 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
4390 // Set the high part to be round to zero...
4391 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
4393 // Reload the modified control word now...
4394 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
4396 // Restore the memory image of control word to original value
4397 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
4399 // Get the X86 opcode to use.
4400 switch (N.getOpcode()) {
4401 case X86ISD::FP_TO_INT16_IN_MEM: Tmp1 = X86::FIST16m; break;
4402 case X86ISD::FP_TO_INT32_IN_MEM: Tmp1 = X86::FIST32m; break;
4403 case X86ISD::FP_TO_INT64_IN_MEM: Tmp1 = X86::FISTP64m; break;
4406 addFullAddress(BuildMI(BB, Tmp1, 5), AM).addReg(ValReg);
4408 // Reload the original control word now.
4409 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
4413 case ISD::TRUNCSTORE: { // truncstore chain, val, ptr, SRCVALUE, storety
4415 MVT::ValueType StoredTy = cast<VTSDNode>(N.getOperand(4))->getVT();
4416 assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 ||
4417 StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/)
4418 && "Unsupported TRUNCSTORE for this target!");
4420 if (StoredTy == MVT::i16) {
4421 // FIXME: This is here just to allow testing. X86 doesn't really have a
4422 // TRUNCSTORE i16 operation, but this is required for targets that do not
4423 // have 16-bit integer registers. We occasionally disable 16-bit integer
4424 // registers to test the promotion code.
4425 Select(N.getOperand(0));
4426 Tmp1 = SelectExpr(N.getOperand(1));
4427 SelectAddress(N.getOperand(2), AM);
4429 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4430 addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX);
4434 // Store of constant bool?
4435 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4436 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4437 Select(N.getOperand(0));
4438 SelectAddress(N.getOperand(2), AM);
4440 SelectAddress(N.getOperand(2), AM);
4441 Select(N.getOperand(0));
4443 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CN->getValue());
4448 default: assert(0 && "Cannot truncstore this type!");
4449 case MVT::i1: Opc = X86::MOV8mr; break;
4451 assert(!X86ScalarSSE && "Cannot truncstore scalar SSE regs");
4452 Opc = X86::FST32m; break;
4455 std::vector<std::pair<unsigned, unsigned> > RP;
4456 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4457 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4458 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4459 std::sort(RP.begin(), RP.end());
4461 Tmp1 = 0; // Silence a warning.
4462 for (unsigned i = 0; i != 3; ++i)
4463 switch (RP[2-i].second) {
4464 default: assert(0 && "Unknown operand number!");
4465 case 0: Select(N.getOperand(0)); break;
4466 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4467 case 2: SelectAddress(N.getOperand(2), AM); break;
4470 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4476 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4478 switch (CN->getValueType(0)) {
4479 default: assert(0 && "Invalid type for operation!");
4481 case MVT::i8: Opc = X86::MOV8mi; break;
4482 case MVT::i16: Opc = X86::MOV16mi; break;
4483 case MVT::i32: Opc = X86::MOV32mi; break;
4486 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4487 Select(N.getOperand(0));
4488 SelectAddress(N.getOperand(2), AM);
4490 SelectAddress(N.getOperand(2), AM);
4491 Select(N.getOperand(0));
4493 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
4496 } else if (GlobalAddressSDNode *GA =
4497 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
4498 assert(GA->getValueType(0) == MVT::i32 && "Bad pointer operand");
4500 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4501 Select(N.getOperand(0));
4502 SelectAddress(N.getOperand(2), AM);
4504 SelectAddress(N.getOperand(2), AM);
4505 Select(N.getOperand(0));
4507 GlobalValue *GV = GA->getGlobal();
4508 // For Darwin, external and weak symbols are indirect, so we want to load
4509 // the value at address GV, not the value of GV itself.
4510 if (Subtarget->getIndirectExternAndWeakGlobals() &&
4511 (GV->hasWeakLinkage() || GV->isExternal())) {
4512 Tmp1 = MakeReg(MVT::i32);
4513 BuildMI(BB, X86::MOV32rm, 4, Tmp1).addReg(0).addZImm(1).addReg(0)
4514 .addGlobalAddress(GV, false, 0);
4515 addFullAddress(BuildMI(BB, X86::MOV32mr, 4+1),AM).addReg(Tmp1);
4517 addFullAddress(BuildMI(BB, X86::MOV32mi, 4+1),AM).addGlobalAddress(GV);
4522 // Check to see if this is a load/op/store combination.
4523 if (TryToFoldLoadOpStore(Node))
4526 switch (N.getOperand(1).getValueType()) {
4527 default: assert(0 && "Cannot store this type!");
4529 case MVT::i8: Opc = X86::MOV8mr; break;
4530 case MVT::i16: Opc = X86::MOV16mr; break;
4531 case MVT::i32: Opc = X86::MOV32mr; break;
4532 case MVT::f32: Opc = X86::MOVSSmr; break;
4533 case MVT::f64: Opc = X86ScalarSSE ? X86::MOVSDmr : X86::FST64m; break;
4536 std::vector<std::pair<unsigned, unsigned> > RP;
4537 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4538 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4539 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4540 std::sort(RP.begin(), RP.end());
4542 Tmp1 = 0; // Silence a warning.
4543 for (unsigned i = 0; i != 3; ++i)
4544 switch (RP[2-i].second) {
4545 default: assert(0 && "Unknown operand number!");
4546 case 0: Select(N.getOperand(0)); break;
4547 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4548 case 2: SelectAddress(N.getOperand(2), AM); break;
4551 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4554 case ISD::CALLSEQ_START:
4555 Select(N.getOperand(0));
4557 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
4558 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(Tmp1);
4560 case ISD::CALLSEQ_END:
4561 Select(N.getOperand(0));
4564 Select(N.getOperand(0)); // Select the chain.
4566 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4567 if (Align == 0) Align = 1;
4569 // Turn the byte code into # iterations
4572 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
4573 unsigned Val = ValC->getValue() & 255;
4575 // If the value is a constant, then we can potentially use larger sets.
4576 switch (Align & 3) {
4577 case 2: // WORD aligned
4578 CountReg = MakeReg(MVT::i32);
4579 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4580 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4582 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4583 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4585 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
4586 Opcode = X86::REP_STOSW;
4588 case 0: // DWORD aligned
4589 CountReg = MakeReg(MVT::i32);
4590 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4591 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4593 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4594 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4596 Val = (Val << 8) | Val;
4597 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
4598 Opcode = X86::REP_STOSD;
4600 default: // BYTE aligned
4601 CountReg = SelectExpr(Node->getOperand(3));
4602 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
4603 Opcode = X86::REP_STOSB;
4607 // If it's not a constant value we are storing, just fall back. We could
4608 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
4609 unsigned ValReg = SelectExpr(Node->getOperand(2));
4610 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
4611 CountReg = SelectExpr(Node->getOperand(3));
4612 Opcode = X86::REP_STOSB;
4615 // No matter what the alignment is, we put the source in ESI, the
4616 // destination in EDI, and the count in ECX.
4617 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4618 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4619 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4620 BuildMI(BB, Opcode, 0);
4624 Select(N.getOperand(0)); // Select the chain.
4626 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4627 if (Align == 0) Align = 1;
4629 // Turn the byte code into # iterations
4632 switch (Align & 3) {
4633 case 2: // WORD aligned
4634 CountReg = MakeReg(MVT::i32);
4635 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4636 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4638 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4639 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4641 Opcode = X86::REP_MOVSW;
4643 case 0: // DWORD aligned
4644 CountReg = MakeReg(MVT::i32);
4645 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4646 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4648 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4649 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4651 Opcode = X86::REP_MOVSD;
4653 default: // BYTE aligned
4654 CountReg = SelectExpr(Node->getOperand(3));
4655 Opcode = X86::REP_MOVSB;
4659 // No matter what the alignment is, we put the source in ESI, the
4660 // destination in EDI, and the count in ECX.
4661 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4662 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
4663 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4664 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4665 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
4666 BuildMI(BB, Opcode, 0);
4669 case ISD::WRITEPORT:
4670 if (Node->getOperand(2).getValueType() != MVT::i16) {
4671 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
4674 Select(Node->getOperand(0)); // Emit the chain.
4676 Tmp1 = SelectExpr(Node->getOperand(1));
4677 switch (Node->getOperand(1).getValueType()) {
4679 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
4680 Tmp2 = X86::OUT8ir; Opc = X86::OUT8rr;
4683 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(Tmp1);
4684 Tmp2 = X86::OUT16ir; Opc = X86::OUT16rr;
4687 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4688 Tmp2 = X86::OUT32ir; Opc = X86::OUT32rr;
4691 std::cerr << "llvm.writeport: invalid data type for X86 target";
4695 // If the port is a single-byte constant, use the immediate form.
4696 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node->getOperand(2)))
4697 if ((CN->getValue() & 255) == CN->getValue()) {
4698 BuildMI(BB, Tmp2, 1).addImm(CN->getValue());
4702 // Otherwise, move the I/O port address into the DX register.
4703 unsigned Reg = SelectExpr(Node->getOperand(2));
4704 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
4705 BuildMI(BB, Opc, 0);
4708 assert(0 && "Should not be reached!");
4712 /// createX86PatternInstructionSelector - This pass converts an LLVM function
4713 /// into a machine code representation using pattern matching and a machine
4714 /// description file.
4716 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
4717 return new ISel(TM);