#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
-#include <queue>
-#include <set>
using namespace llvm;
-STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
+#include "llvm/Support/CommandLine.h"
+static cl::opt<bool> AvoidDupAddrCompute("x86-avoid-dup-address", cl::Hidden);
+
STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
//===----------------------------------------------------------------------===//
int FrameIndex;
} Base;
- bool isRIPRel; // RIP as base?
unsigned Scale;
SDValue IndexReg;
- unsigned Disp;
+ int32_t Disp;
+ SDValue Segment;
GlobalValue *GV;
Constant *CP;
const char *ES;
int JT;
unsigned Align; // CP alignment.
+ unsigned char SymbolFlags; // X86II::MO_*
X86ISelAddressMode()
- : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
- GV(0), CP(0), ES(0), JT(-1), Align(0) {
+ : BaseType(RegBase), Scale(1), IndexReg(), Disp(0),
+ Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0), SymbolFlags(0) {
+ }
+
+ bool hasSymbolicDisplacement() const {
+ return GV != 0 || CP != 0 || ES != 0 || JT != -1;
+ }
+
+ bool hasBaseOrIndexReg() const {
+ return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0;
+ }
+
+ /// isRIPRelative - Return true if this addressing mode is already RIP
+ /// relative.
+ bool isRIPRelative() const {
+ if (BaseType != RegBase) return false;
+ if (RegisterSDNode *RegNode =
+ dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode()))
+ return RegNode->getReg() == X86::RIP;
+ return false;
+ }
+
+ void setBaseReg(SDValue Reg) {
+ BaseType = RegBase;
+ Base.Reg = Reg;
+ }
+
+ void dump() {
+ cerr << "X86ISelAddressMode " << this << "\n";
+ cerr << "Base.Reg ";
+ if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
+ else cerr << "nul";
+ cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
+ cerr << " Scale" << Scale << "\n";
+ cerr << "IndexReg ";
+ if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
+ else cerr << "nul";
+ cerr << " Disp " << Disp << "\n";
+ cerr << "GV "; if (GV) GV->dump();
+ else cerr << "nul";
+ cerr << " CP "; if (CP) CP->dump();
+ else cerr << "nul";
+ cerr << "\n";
+ cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
+ cerr << " JT" << JT << " Align" << Align << "\n";
}
};
}
/// SelectionDAG operations.
///
class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
- /// ContainsFPCode - Every instruction we select that uses or defines a FP
- /// register should set this to true.
- bool ContainsFPCode;
-
- /// TM - Keep a reference to X86TargetMachine.
- ///
- X86TargetMachine &TM;
-
/// X86Lowering - This object fully describes how to lower LLVM code to an
/// X86-specific SelectionDAG.
- X86TargetLowering X86Lowering;
+ X86TargetLowering &X86Lowering;
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
- /// GlobalBaseReg - keeps track of the virtual register mapped onto global
- /// base register.
- unsigned GlobalBaseReg;
-
/// CurBB - Current BB being isel'd.
///
MachineBasicBlock *CurBB;
+ /// OptForSize - If true, selector should try to optimize for code size
+ /// instead of performance.
+ bool OptForSize;
+
public:
- X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
- : SelectionDAGISel(X86Lowering, fast),
- ContainsFPCode(false), TM(tm),
- X86Lowering(*TM.getTargetLowering()),
- Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
-
- virtual bool runOnFunction(Function &Fn) {
- // Make sure we re-emit a set of the global base reg if necessary
- GlobalBaseReg = 0;
- return SelectionDAGISel::runOnFunction(Fn);
- }
-
+ explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
+ : SelectionDAGISel(tm, OptLevel),
+ X86Lowering(*tm.getTargetLowering()),
+ Subtarget(&tm.getSubtarget<X86Subtarget>()),
+ OptForSize(false) {}
+
virtual const char *getPassName() const {
return "X86 DAG->DAG Instruction Selection";
}
/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
- virtual void InstructionSelect(SelectionDAG &DAG);
-
- /// InstructionSelectPostProcessing - Post processing of selected and
- /// scheduled basic blocks.
- virtual void InstructionSelectPostProcessing();
+ virtual void InstructionSelect();
virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
- virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const;
+ virtual
+ bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const;
// Include the pieces autogenerated from the target description.
#include "X86GenDAGISel.inc"
private:
SDNode *Select(SDValue N);
+ SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
+ bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
+ bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
+ bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
- bool isRoot = true, unsigned Depth = 0);
- bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
- bool isRoot, unsigned Depth);
+ unsigned Depth = 0);
+ bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
- SDValue &Scale, SDValue &Index, SDValue &Disp);
+ SDValue &Scale, SDValue &Index, SDValue &Disp,
+ SDValue &Segment);
bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp);
+ bool SelectTLSADDRAddr(SDValue Op, SDValue N, SDValue &Base,
+ SDValue &Scale, SDValue &Index, SDValue &Disp);
bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
SDValue N, SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
+ SDValue &Segment,
SDValue &InChain, SDValue &OutChain);
bool TryFoldLoad(SDValue P, SDValue N,
SDValue &Base, SDValue &Scale,
- SDValue &Index, SDValue &Disp);
- void PreprocessForRMW(SelectionDAG &DAG);
- void PreprocessForFPConvert(SelectionDAG &DAG);
+ SDValue &Index, SDValue &Disp,
+ SDValue &Segment);
+ void PreprocessForRMW();
+ void PreprocessForFPConvert();
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
- std::vector<SDValue> &OutOps,
- SelectionDAG &DAG);
+ std::vector<SDValue> &OutOps);
void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
SDValue &Scale, SDValue &Index,
- SDValue &Disp) {
+ SDValue &Disp, SDValue &Segment) {
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
AM.Base.Reg;
// These are 32-bit even in 64-bit mode since RIP relative offset
// is 32-bit.
if (AM.GV)
- Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
+ Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
+ AM.SymbolFlags);
else if (AM.CP)
- Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp);
+ Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
+ AM.Align, AM.Disp, AM.SymbolFlags);
else if (AM.ES)
- Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
+ Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
else if (AM.JT != -1)
- Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
+ Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
else
- Disp = getI32Imm(AM.Disp);
+ Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
+
+ if (AM.Segment.getNode())
+ Segment = AM.Segment;
+ else
+ Segment = CurDAG->getRegister(0, MVT::i32);
}
/// getI8Imm - Return a target constant with the specified value, of type
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
- /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
- /// base register. Return the virtual register that holds this value.
+ /// getGlobalBaseReg - Return an SDNode that returns the value of
+ /// the global base register. Output instructions required to
+ /// initialize the global base register, if necessary.
+ ///
SDNode *getGlobalBaseReg();
- /// getTruncate - return an SDNode that implements a subreg based truncate
- /// of the specified operand to the the specified value type.
- SDNode *getTruncate(SDValue N0, MVT VT);
+ /// getTargetMachine - Return a reference to the TargetMachine, casted
+ /// to the target-specific type.
+ const X86TargetMachine &getTargetMachine() {
+ return static_cast<const X86TargetMachine &>(TM);
+ }
+
+ /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
+ /// to the target-specific type.
+ const X86InstrInfo *getInstrInfo() {
+ return getTargetMachine().getInstrInfo();
+ }
#ifndef NDEBUG
unsigned Indent;
};
}
-/// findFlagUse - Return use of MVT::Flag value produced by the specified SDNode.
-///
-static SDNode *findFlagUse(SDNode *N) {
- unsigned FlagResNo = N->getNumValues()-1;
- for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
- SDNode *User = *I;
- for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
- SDValue Op = User->getOperand(i);
- if (Op.Val == N && Op.ResNo == FlagResNo)
- return User;
- }
- }
- return NULL;
-}
-
-/// findNonImmUse - Return true by reference in "found" if "Use" is an
-/// non-immediate use of "Def". This function recursively traversing
-/// up the operand chain ignoring certain nodes.
-static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
- SDNode *Root, SDNode *Skip, bool &found,
- SmallPtrSet<SDNode*, 16> &Visited) {
- if (found ||
- Use->getNodeId() > Def->getNodeId() ||
- !Visited.insert(Use))
- return;
-
- for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
- SDNode *N = Use->getOperand(i).Val;
- if (N == Skip)
- continue;
- if (N == Def) {
- if (Use == ImmedUse)
- continue; // We are not looking for immediate use.
- if (Use == Root) {
- // Must be a chain reading node where it is possible to reach its own
- // chain operand through a path started from another operand.
- assert(Use->getOpcode() == ISD::STORE ||
- Use->getOpcode() == X86ISD::CMP ||
- Use->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
- Use->getOpcode() == ISD::INTRINSIC_VOID);
- continue;
- }
- found = true;
- break;
- }
-
- // Traverse up the operand chain.
- findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
- }
-}
-
-/// isNonImmUse - Start searching from Root up the DAG to check is Def can
-/// be reached. Return true if that's the case. However, ignore direct uses
-/// by ImmedUse (which would be U in the example illustrated in
-/// CanBeFoldedBy) and by Root (which can happen in the store case).
-/// FIXME: to be really generic, we should allow direct use by any node
-/// that is being folded. But realisticly since we only fold loads which
-/// have one non-chain use, we only need to watch out for load/op/store
-/// and load/op/cmp case where the root (store / cmp) may reach the load via
-/// its chain operand.
-static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
- SDNode *Skip = NULL) {
- SmallPtrSet<SDNode*, 16> Visited;
- bool found = false;
- findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
- return found;
-}
-
-bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const {
- if (FastISel) return false;
+bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
+ SDNode *Root) const {
+ if (OptLevel == CodeGenOpt::None) return false;
- // If U use can somehow reach N through another path then U can't fold N or
- // it will create a cycle. e.g. In the following diagram, U can reach N
- // through X. If N is folded into into U, then X is both a predecessor and
- // a successor of U.
- //
- // [ N ]
- // ^ ^
- // | |
- // / \---
- // / [X]
- // | ^
- // [U]--------|
-
- if (isNonImmUse(Root, N, U))
- return false;
+ if (U == Root)
+ switch (U->getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR: {
+ SDValue Op1 = U->getOperand(1);
+
+ // If the other operand is a 8-bit immediate we should fold the immediate
+ // instead. This reduces code size.
+ // e.g.
+ // movl 4(%esp), %eax
+ // addl $4, %eax
+ // vs.
+ // movl $4, %eax
+ // addl 4(%esp), %eax
+ // The former is 2 bytes shorter. In case where the increment is 1, then
+ // the saving can be 4 bytes (by using incl %eax).
+ if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
+ if (Imm->getAPIntValue().isSignedIntN(8))
+ return false;
- // If U produces a flag, then it gets (even more) interesting. Since it
- // would have been "glued" together with its flag use, we need to check if
- // it might reach N:
- //
- // [ N ]
- // ^ ^
- // | |
- // [U] \--
- // ^ [TF]
- // | ^
- // | |
- // \ /
- // [FU]
- //
- // If FU (flag use) indirectly reach N (the load), and U fold N (call it
- // NU), then TF is a predecessor of FU and a successor of NU. But since
- // NU and FU are flagged together, this effectively creates a cycle.
- bool HasFlagUse = false;
- MVT VT = Root->getValueType(Root->getNumValues()-1);
- while ((VT == MVT::Flag && !Root->use_empty())) {
- SDNode *FU = findFlagUse(Root);
- if (FU == NULL)
- break;
- else {
- Root = FU;
- HasFlagUse = true;
+ // If the other operand is a TLS address, we should fold it instead.
+ // This produces
+ // movl %gs:0, %eax
+ // leal i@NTPOFF(%eax), %eax
+ // instead of
+ // movl $i@NTPOFF, %eax
+ // addl %gs:0, %eax
+ // if the block also has an access to a second TLS address this will save
+ // a load.
+ // FIXME: This is probably also true for non TLS addresses.
+ if (Op1.getOpcode() == X86ISD::Wrapper) {
+ SDValue Val = Op1.getOperand(0);
+ if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false;
+ }
+ }
}
- VT = Root->getValueType(Root->getNumValues()-1);
- }
- if (HasFlagUse)
- return !isNonImmUse(Root, N, Root, U);
- return true;
+ // Proceed to 'generic' cycle finder code
+ return SelectionDAGISel::IsLegalAndProfitableToFold(N, U, Root);
}
/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
/// and move load below the TokenFactor. Replace store's chain operand with
/// load's chain result.
-static void MoveBelowTokenFactor(SelectionDAG &DAG, SDValue Load,
+static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load,
SDValue Store, SDValue TF) {
- std::vector<SDValue> Ops;
- for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i)
- if (Load.Val == TF.Val->getOperand(i).Val)
- Ops.push_back(Load.Val->getOperand(0));
+ SmallVector<SDValue, 4> Ops;
+ for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i)
+ if (Load.getNode() == TF.getOperand(i).getNode())
+ Ops.push_back(Load.getOperand(0));
else
- Ops.push_back(TF.Val->getOperand(i));
- DAG.UpdateNodeOperands(TF, &Ops[0], Ops.size());
- DAG.UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
- DAG.UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
- Store.getOperand(2), Store.getOperand(3));
+ Ops.push_back(TF.getOperand(i));
+ CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
+ CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
+ Store.getOperand(2), Store.getOperand(3));
}
/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
if (N.hasOneUse() &&
N.getOperand(1) == Address &&
- N.Val->isOperandOf(Chain.Val)) {
+ N.getNode()->isOperandOf(Chain.getNode())) {
Load = N;
return true;
}
return false;
}
+/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
+/// operand and move load below the call's chain operand.
+static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
+ SDValue Call, SDValue CallSeqStart) {
+ SmallVector<SDValue, 8> Ops;
+ SDValue Chain = CallSeqStart.getOperand(0);
+ if (Chain.getNode() == Load.getNode())
+ Ops.push_back(Load.getOperand(0));
+ else {
+ assert(Chain.getOpcode() == ISD::TokenFactor &&
+ "Unexpected CallSeqStart chain operand");
+ for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
+ if (Chain.getOperand(i).getNode() == Load.getNode())
+ Ops.push_back(Load.getOperand(0));
+ else
+ Ops.push_back(Chain.getOperand(i));
+ SDValue NewChain =
+ CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
+ MVT::Other, &Ops[0], Ops.size());
+ Ops.clear();
+ Ops.push_back(NewChain);
+ }
+ for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i)
+ Ops.push_back(CallSeqStart.getOperand(i));
+ CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
+ Load.getOperand(1), Load.getOperand(2));
+ Ops.clear();
+ Ops.push_back(SDValue(Load.getNode(), 1));
+ for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
+ Ops.push_back(Call.getOperand(i));
+ CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
+}
+
+/// isCalleeLoad - Return true if call address is a load and it can be
+/// moved below CALLSEQ_START and the chains leading up to the call.
+/// Return the CALLSEQ_START by reference as a second output.
+static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
+ if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
+ return false;
+ LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
+ if (!LD ||
+ LD->isVolatile() ||
+ LD->getAddressingMode() != ISD::UNINDEXED ||
+ LD->getExtensionType() != ISD::NON_EXTLOAD)
+ return false;
+
+ // Now let's find the callseq_start.
+ while (Chain.getOpcode() != ISD::CALLSEQ_START) {
+ if (!Chain.hasOneUse())
+ return false;
+ Chain = Chain.getOperand(0);
+ }
+
+ if (Chain.getOperand(0).getNode() == Callee.getNode())
+ return true;
+ if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
+ Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()))
+ return true;
+ return false;
+}
+
+
/// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
-/// This is only run if not in -fast mode (aka -O0).
+/// This is only run if not in -O0 mode.
/// This allows the instruction selector to pick more read-modify-write
/// instructions. This is a common case:
///
/// \ /
/// \ /
/// [Store]
-void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
- for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
- E = DAG.allnodes_end(); I != E; ++I) {
+void X86DAGToDAGISel::PreprocessForRMW() {
+ for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
+ E = CurDAG->allnodes_end(); I != E; ++I) {
+ if (I->getOpcode() == X86ISD::CALL) {
+ /// Also try moving call address load from outside callseq_start to just
+ /// before the call to allow it to be folded.
+ ///
+ /// [Load chain]
+ /// ^
+ /// |
+ /// [Load]
+ /// ^ ^
+ /// | |
+ /// / \--
+ /// / |
+ ///[CALLSEQ_START] |
+ /// ^ |
+ /// | |
+ /// [LOAD/C2Reg] |
+ /// | |
+ /// \ /
+ /// \ /
+ /// [CALL]
+ SDValue Chain = I->getOperand(0);
+ SDValue Load = I->getOperand(1);
+ if (!isCalleeLoad(Load, Chain))
+ continue;
+ MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain);
+ ++NumLoadMoved;
+ continue;
+ }
+
if (!ISD::isNON_TRUNCStore(I))
continue;
SDValue Chain = I->getOperand(0);
- if (Chain.Val->getOpcode() != ISD::TokenFactor)
+
+ if (Chain.getNode()->getOpcode() != ISD::TokenFactor)
continue;
SDValue N1 = I->getOperand(1);
bool RModW = false;
SDValue Load;
- unsigned Opcode = N1.Val->getOpcode();
+ unsigned Opcode = N1.getNode()->getOpcode();
switch (Opcode) {
- case ISD::ADD:
- case ISD::MUL:
- case ISD::AND:
- case ISD::OR:
- case ISD::XOR:
- case ISD::ADDC:
- case ISD::ADDE:
- case ISD::VECTOR_SHUFFLE: {
- SDValue N10 = N1.getOperand(0);
- SDValue N11 = N1.getOperand(1);
- RModW = isRMWLoad(N10, Chain, N2, Load);
- if (!RModW)
- RModW = isRMWLoad(N11, Chain, N2, Load);
- break;
- }
- case ISD::SUB:
- case ISD::SHL:
- case ISD::SRA:
- case ISD::SRL:
- case ISD::ROTL:
- case ISD::ROTR:
- case ISD::SUBC:
- case ISD::SUBE:
- case X86ISD::SHLD:
- case X86ISD::SHRD: {
- SDValue N10 = N1.getOperand(0);
- RModW = isRMWLoad(N10, Chain, N2, Load);
- break;
- }
+ case ISD::ADD:
+ case ISD::MUL:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::VECTOR_SHUFFLE: {
+ SDValue N10 = N1.getOperand(0);
+ SDValue N11 = N1.getOperand(1);
+ RModW = isRMWLoad(N10, Chain, N2, Load);
+ if (!RModW)
+ RModW = isRMWLoad(N11, Chain, N2, Load);
+ break;
+ }
+ case ISD::SUB:
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ case ISD::ROTL:
+ case ISD::ROTR:
+ case ISD::SUBC:
+ case ISD::SUBE:
+ case X86ISD::SHLD:
+ case X86ISD::SHRD: {
+ SDValue N10 = N1.getOperand(0);
+ RModW = isRMWLoad(N10, Chain, N2, Load);
+ break;
+ }
}
if (RModW) {
- MoveBelowTokenFactor(DAG, Load, SDValue(I, 0), Chain);
+ MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain);
++NumLoadMoved;
}
}
/// hack on these between the call expansion and the node legalization. As such
/// this pass basically does "really late" legalization of these inline with the
/// X86 isel pass.
-void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) {
- for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
- E = DAG.allnodes_end(); I != E; ) {
+void X86DAGToDAGISel::PreprocessForFPConvert() {
+ for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
+ E = CurDAG->allnodes_end(); I != E; ) {
SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
continue;
else
MemVT = SrcIsSSE ? SrcVT : DstVT;
- SDValue MemTmp = DAG.CreateStackTemporary(MemVT);
+ SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
+ DebugLoc dl = N->getDebugLoc();
// FIXME: optimize the case where the src/dest is a load or store?
- SDValue Store = DAG.getTruncStore(DAG.getEntryNode(), N->getOperand(0),
- MemTmp, NULL, 0, MemVT);
- SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
- NULL, 0, MemVT);
+ SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
+ N->getOperand(0),
+ MemTmp, NULL, 0, MemVT);
+ SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
+ NULL, 0, MemVT);
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
// extload we created. This will cause general havok on the dag because
// anything below the conversion could be folded into other existing nodes.
// To avoid invalidating 'I', back it up to the convert node.
--I;
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
// Now that we did that, the node is dead. Increment the iterator to the
// next node to process, then delete N.
++I;
- DAG.DeleteNode(N);
+ CurDAG->DeleteNode(N);
}
}
/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
/// when it has created a SelectionDAG for us to codegen.
-void X86DAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
+void X86DAGToDAGISel::InstructionSelect() {
CurBB = BB; // BB can change as result of isel.
+ const Function *F = CurDAG->getMachineFunction().getFunction();
+ OptForSize = F->hasFnAttr(Attribute::OptimizeForSize);
DEBUG(BB->dump());
- if (!FastISel)
- PreprocessForRMW(DAG);
+ if (OptLevel != CodeGenOpt::None)
+ PreprocessForRMW();
- // FIXME: This should only happen when not -fast.
- PreprocessForFPConvert(DAG);
+ // FIXME: This should only happen when not compiled with -O0.
+ PreprocessForFPConvert();
// Codegen the basic block.
#ifndef NDEBUG
DOUT << "===== Instruction selection begins:\n";
Indent = 0;
#endif
- DAG.setRoot(SelectRoot(DAG.getRoot()));
+ SelectRoot(*CurDAG);
#ifndef NDEBUG
DOUT << "===== Instruction selection ends:\n";
#endif
- DAG.RemoveDeadNodes();
-}
-
-void X86DAGToDAGISel::InstructionSelectPostProcessing() {
- // If we are emitting FP stack code, scan the basic block to determine if this
- // block defines any FP values. If so, put an FP_REG_KILL instruction before
- // the terminator of the block.
-
- // Note that FP stack instructions are used in all modes for long double,
- // so we always need to do this check.
- // Also note that it's possible for an FP stack register to be live across
- // an instruction that produces multiple basic blocks (SSE CMOV) so we
- // must check all the generated basic blocks.
-
- // Scan all of the machine instructions in these MBBs, checking for FP
- // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
- MachineFunction::iterator MBBI = CurBB;
- MachineFunction::iterator EndMBB = BB; ++EndMBB;
- for (; MBBI != EndMBB; ++MBBI) {
- MachineBasicBlock *MBB = MBBI;
-
- // If this block returns, ignore it. We don't want to insert an FP_REG_KILL
- // before the return.
- if (!MBB->empty()) {
- MachineBasicBlock::iterator EndI = MBB->end();
- --EndI;
- if (EndI->getDesc().isReturn())
- continue;
- }
-
- bool ContainsFPCode = false;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- !ContainsFPCode && I != E; ++I) {
- if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
- const TargetRegisterClass *clas;
- for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
- if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
- TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
- ((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
- X86::RFP32RegisterClass ||
- clas == X86::RFP64RegisterClass ||
- clas == X86::RFP80RegisterClass)) {
- ContainsFPCode = true;
- break;
- }
- }
- }
- }
- // Check PHI nodes in successor blocks. These PHI's will be lowered to have
- // a copy of the input value in this block. In SSE mode, we only care about
- // 80-bit values.
- if (!ContainsFPCode) {
- // Final check, check LLVM BB's that are successors to the LLVM BB
- // corresponding to BB for FP PHI nodes.
- const BasicBlock *LLVMBB = BB->getBasicBlock();
- const PHINode *PN;
- for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
- !ContainsFPCode && SI != E; ++SI) {
- for (BasicBlock::const_iterator II = SI->begin();
- (PN = dyn_cast<PHINode>(II)); ++II) {
- if (PN->getType()==Type::X86_FP80Ty ||
- (!Subtarget->hasSSE1() && PN->getType()->isFloatingPoint()) ||
- (!Subtarget->hasSSE2() && PN->getType()==Type::DoubleTy)) {
- ContainsFPCode = true;
- break;
- }
- }
- }
- }
- // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
- if (ContainsFPCode) {
- BuildMI(*MBB, MBBI->getFirstTerminator(),
- TM.getInstrInfo()->get(X86::FP_REG_KILL));
- ++NumFPKill;
- }
- }
+ CurDAG->RemoveDeadNodes();
}
/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
MachineFrameInfo *MFI) {
const TargetInstrInfo *TII = TM.getInstrInfo();
if (Subtarget->isTargetCygMing())
- BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
+ BuildMI(BB, DebugLoc::getUnknownLoc(),
+ TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
}
void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
EmitSpecialCodeForMain(BB, MF.getFrameInfo());
}
+
+bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N,
+ X86ISelAddressMode &AM) {
+ assert(N.getOpcode() == X86ISD::SegmentBaseAddress);
+ SDValue Segment = N.getOperand(0);
+
+ if (AM.Segment.getNode() == 0) {
+ AM.Segment = Segment;
+ return false;
+ }
+
+ return true;
+}
+
+bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
+ // This optimization is valid because the GNU TLS model defines that
+ // gs:0 (or fs:0 on X86-64) contains its own address.
+ // For more information see http://people.redhat.com/drepper/tls.pdf
+
+ SDValue Address = N.getOperand(1);
+ if (Address.getOpcode() == X86ISD::SegmentBaseAddress &&
+ !MatchSegmentBaseAddress (Address, AM))
+ return false;
+
+ return true;
+}
+
+/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
+/// into an addressing mode. These wrap things that will resolve down into a
+/// symbol reference. If no match is possible, this returns true, otherwise it
+/// returns false.
+bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
+ // If the addressing mode already has a symbol as the displacement, we can
+ // never match another symbol.
+ if (AM.hasSymbolicDisplacement())
+ return true;
+
+ SDValue N0 = N.getOperand(0);
+
+ // Handle X86-64 rip-relative addresses. We check this before checking direct
+ // folding because RIP is preferable to non-RIP accesses.
+ if (Subtarget->is64Bit() &&
+ // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
+ // they cannot be folded into immediate fields.
+ // FIXME: This can be improved for kernel and other models?
+ TM.getCodeModel() == CodeModel::Small &&
+
+ // Base and index reg must be 0 in order to use %rip as base and lowering
+ // must allow RIP.
+ !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
+
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+ int64_t Offset = AM.Disp + G->getOffset();
+ if (!isInt32(Offset)) return true;
+ AM.GV = G->getGlobal();
+ AM.Disp = Offset;
+ AM.SymbolFlags = G->getTargetFlags();
+ } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+ int64_t Offset = AM.Disp + CP->getOffset();
+ if (!isInt32(Offset)) return true;
+ AM.CP = CP->getConstVal();
+ AM.Align = CP->getAlignment();
+ AM.Disp = Offset;
+ AM.SymbolFlags = CP->getTargetFlags();
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
+ AM.ES = S->getSymbol();
+ AM.SymbolFlags = S->getTargetFlags();
+ } else {
+ JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
+ AM.JT = J->getIndex();
+ AM.SymbolFlags = J->getTargetFlags();
+ }
+
+ if (N.getOpcode() == X86ISD::WrapperRIP)
+ AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
+ return false;
+ }
+
+ // Handle the case when globals fit in our immediate field: This is true for
+ // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
+ // mode, this results in a non-RIP-relative computation.
+ if (!Subtarget->is64Bit() ||
+ (TM.getCodeModel() == CodeModel::Small &&
+ TM.getRelocationModel() == Reloc::Static)) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+ AM.GV = G->getGlobal();
+ AM.Disp += G->getOffset();
+ AM.SymbolFlags = G->getTargetFlags();
+ } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+ AM.CP = CP->getConstVal();
+ AM.Align = CP->getAlignment();
+ AM.Disp += CP->getOffset();
+ AM.SymbolFlags = CP->getTargetFlags();
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
+ AM.ES = S->getSymbol();
+ AM.SymbolFlags = S->getTargetFlags();
+ } else {
+ JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
+ AM.JT = J->getIndex();
+ AM.SymbolFlags = J->getTargetFlags();
+ }
+ return false;
+ }
+
+ return true;
+}
+
/// MatchAddress - Add the specified node to the specified addressing mode,
/// returning true if it cannot be done. This just pattern matches for the
/// addressing mode.
bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
- bool isRoot, unsigned Depth) {
+ unsigned Depth) {
+ bool is64Bit = Subtarget->is64Bit();
+ DebugLoc dl = N.getDebugLoc();
+ DOUT << "MatchAddress: "; DEBUG(AM.dump());
// Limit recursion.
if (Depth > 5)
- return MatchAddressBase(N, AM, isRoot, Depth);
+ return MatchAddressBase(N, AM);
+ // If this is already a %rip relative address, we can only merge immediates
+ // into it. Instead of handling this in every case, we handle it here.
// RIP relative addressing: %rip + 32-bit displacement!
- if (AM.isRIPRel) {
- if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
- int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
- if (isInt32(AM.Disp + Val)) {
- AM.Disp += Val;
+ if (AM.isRIPRelative()) {
+ // FIXME: JumpTable and ExternalSymbol address currently don't like
+ // displacements. It isn't very important, but this should be fixed for
+ // consistency.
+ if (!AM.ES && AM.JT != -1) return true;
+
+ if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
+ int64_t Val = AM.Disp + Cst->getSExtValue();
+ if (isInt32(Val)) {
+ AM.Disp = Val;
return false;
}
}
return true;
}
- int id = N.Val->getNodeId();
- bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
-
switch (N.getOpcode()) {
default: break;
case ISD::Constant: {
- int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
- if (isInt32(AM.Disp + Val)) {
+ uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
+ if (!is64Bit || isInt32(AM.Disp + Val)) {
AM.Disp += Val;
return false;
}
break;
}
- case X86ISD::Wrapper: {
- bool is64Bit = Subtarget->is64Bit();
- // Under X86-64 non-small code model, GV (and friends) are 64-bits.
- // Also, base and index reg must be 0 in order to use rip as base.
- if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
- AM.Base.Reg.Val || AM.IndexReg.Val))
- break;
- if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
- break;
- // If value is available in a register both base and index components have
- // been picked, we can't fit the result available in the register in the
- // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
- if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
- SDValue N0 = N.getOperand(0);
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
- GlobalValue *GV = G->getGlobal();
- AM.GV = GV;
- AM.Disp += G->getOffset();
- AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
- Subtarget->isPICStyleRIPRel();
- return false;
- } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
- AM.CP = CP->getConstVal();
- AM.Align = CP->getAlignment();
- AM.Disp += CP->getOffset();
- AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
- Subtarget->isPICStyleRIPRel();
- return false;
- } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
- AM.ES = S->getSymbol();
- AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
- Subtarget->isPICStyleRIPRel();
- return false;
- } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
- AM.JT = J->getIndex();
- AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
- Subtarget->isPICStyleRIPRel();
- return false;
- }
- }
+ case X86ISD::SegmentBaseAddress:
+ if (!MatchSegmentBaseAddress(N, AM))
+ return false;
+ break;
+
+ case X86ISD::Wrapper:
+ case X86ISD::WrapperRIP:
+ if (!MatchWrapper(N, AM))
+ return false;
+ break;
+
+ case ISD::LOAD:
+ if (!MatchLoad(N, AM))
+ return false;
break;
- }
case ISD::FrameIndex:
- if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
+ if (AM.BaseType == X86ISelAddressMode::RegBase
+ && AM.Base.Reg.getNode() == 0) {
AM.BaseType = X86ISelAddressMode::FrameIndexBase;
AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
return false;
break;
case ISD::SHL:
- if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1 || AM.isRIPRel)
+ if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
break;
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
- unsigned Val = CN->getValue();
+ if (ConstantSDNode
+ *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
+ unsigned Val = CN->getZExtValue();
if (Val == 1 || Val == 2 || Val == 3) {
AM.Scale = 1 << Val;
- SDValue ShVal = N.Val->getOperand(0);
+ SDValue ShVal = N.getNode()->getOperand(0);
// Okay, we know that we have a scale by now. However, if the scaled
// value is an add of something and a constant, we can fold the
// constant into the disp field here.
- if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
- isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
- AM.IndexReg = ShVal.Val->getOperand(0);
+ if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
+ isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
+ AM.IndexReg = ShVal.getNode()->getOperand(0);
ConstantSDNode *AddVal =
- cast<ConstantSDNode>(ShVal.Val->getOperand(1));
- uint64_t Disp = AM.Disp + (AddVal->getValue() << Val);
- if (isInt32(Disp))
+ cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
+ uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
+ if (!is64Bit || isInt32(Disp))
AM.Disp = Disp;
else
AM.IndexReg = ShVal;
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
// A mul_lohi where we need the low part can be folded as a plain multiply.
- if (N.ResNo != 0) break;
+ if (N.getResNo() != 0) break;
// FALL THROUGH
case ISD::MUL:
+ case X86ISD::MUL_IMM:
// X*[3,5,9] -> X+X*[2,4,8]
- if (!AlreadySelected &&
- AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base.Reg.Val == 0 &&
- AM.IndexReg.Val == 0 &&
- !AM.isRIPRel) {
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
- if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
- AM.Scale = unsigned(CN->getValue())-1;
-
- SDValue MulVal = N.Val->getOperand(0);
+ if (AM.BaseType == X86ISelAddressMode::RegBase &&
+ AM.Base.Reg.getNode() == 0 &&
+ AM.IndexReg.getNode() == 0) {
+ if (ConstantSDNode
+ *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
+ if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
+ CN->getZExtValue() == 9) {
+ AM.Scale = unsigned(CN->getZExtValue())-1;
+
+ SDValue MulVal = N.getNode()->getOperand(0);
SDValue Reg;
// Okay, we know that we have a scale by now. However, if the scaled
// value is an add of something and a constant, we can fold the
// constant into the disp field here.
- if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
- isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
- Reg = MulVal.Val->getOperand(0);
+ if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
+ isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
+ Reg = MulVal.getNode()->getOperand(0);
ConstantSDNode *AddVal =
- cast<ConstantSDNode>(MulVal.Val->getOperand(1));
- uint64_t Disp = AM.Disp + AddVal->getValue() * CN->getValue();
- if (isInt32(Disp))
+ cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
+ uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
+ CN->getZExtValue();
+ if (!is64Bit || isInt32(Disp))
AM.Disp = Disp;
else
- Reg = N.Val->getOperand(0);
+ Reg = N.getNode()->getOperand(0);
} else {
- Reg = N.Val->getOperand(0);
+ Reg = N.getNode()->getOperand(0);
}
AM.IndexReg = AM.Base.Reg = Reg;
}
break;
- case ISD::ADD:
- if (!AlreadySelected) {
- X86ISelAddressMode Backup = AM;
- if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
- !MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
- return false;
+ case ISD::SUB: {
+ // Given A-B, if A can be completely folded into the address and
+ // the index field with the index field unused, use -B as the index.
+ // This is a win if a has multiple parts that can be folded into
+ // the address. Also, this saves a mov if the base register has
+ // other uses, since it avoids a two-address sub instruction, however
+ // it costs an additional mov if the index register has other uses.
+
+ // Test if the LHS of the sub can be folded.
+ X86ISelAddressMode Backup = AM;
+ if (MatchAddress(N.getNode()->getOperand(0), AM, Depth+1)) {
AM = Backup;
- if (!MatchAddress(N.Val->getOperand(1), AM, false, Depth+1) &&
- !MatchAddress(N.Val->getOperand(0), AM, false, Depth+1))
- return false;
+ break;
+ }
+ // Test if the index field is free for use.
+ if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
AM = Backup;
+ break;
+ }
+ int Cost = 0;
+ SDValue RHS = N.getNode()->getOperand(1);
+ // If the RHS involves a register with multiple uses, this
+ // transformation incurs an extra mov, due to the neg instruction
+ // clobbering its operand.
+ if (!RHS.getNode()->hasOneUse() ||
+ RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
+ RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
+ RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
+ (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
+ RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
+ ++Cost;
+ // If the base is a register with multiple uses, this
+ // transformation may save a mov.
+ if ((AM.BaseType == X86ISelAddressMode::RegBase &&
+ AM.Base.Reg.getNode() &&
+ !AM.Base.Reg.getNode()->hasOneUse()) ||
+ AM.BaseType == X86ISelAddressMode::FrameIndexBase)
+ --Cost;
+ // If the folded LHS was interesting, this transformation saves
+ // address arithmetic.
+ if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
+ ((AM.Disp != 0) && (Backup.Disp == 0)) +
+ (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
+ --Cost;
+ // If it doesn't look like it may be an overall win, don't do it.
+ if (Cost >= 0) {
+ AM = Backup;
+ break;
+ }
+
+ // Ok, the transformation is legal and appears profitable. Go for it.
+ SDValue Zero = CurDAG->getConstant(0, N.getValueType());
+ SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
+ AM.IndexReg = Neg;
+ AM.Scale = 1;
+
+ // Insert the new nodes into the topological ordering.
+ if (Zero.getNode()->getNodeId() == -1 ||
+ Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(N.getNode(), Zero.getNode());
+ Zero.getNode()->setNodeId(N.getNode()->getNodeId());
+ }
+ if (Neg.getNode()->getNodeId() == -1 ||
+ Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(N.getNode(), Neg.getNode());
+ Neg.getNode()->setNodeId(N.getNode()->getNodeId());
+ }
+ return false;
+ }
+
+ case ISD::ADD: {
+ X86ISelAddressMode Backup = AM;
+ if (!MatchAddress(N.getNode()->getOperand(0), AM, Depth+1) &&
+ !MatchAddress(N.getNode()->getOperand(1), AM, Depth+1))
+ return false;
+ AM = Backup;
+ if (!MatchAddress(N.getNode()->getOperand(1), AM, Depth+1) &&
+ !MatchAddress(N.getNode()->getOperand(0), AM, Depth+1))
+ return false;
+ AM = Backup;
+
+ // If we couldn't fold both operands into the address at the same time,
+ // see if we can just put each operand into a register and fold at least
+ // the add.
+ if (AM.BaseType == X86ISelAddressMode::RegBase &&
+ !AM.Base.Reg.getNode() &&
+ !AM.IndexReg.getNode()) {
+ AM.Base.Reg = N.getNode()->getOperand(0);
+ AM.IndexReg = N.getNode()->getOperand(1);
+ AM.Scale = 1;
+ return false;
}
break;
+ }
case ISD::OR:
// Handle "X | C" as "X + C" iff X is known to have C bits clear.
- if (AlreadySelected) break;
-
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
X86ISelAddressMode Backup = AM;
+ uint64_t Offset = CN->getSExtValue();
// Start with the LHS as an addr mode.
- if (!MatchAddress(N.getOperand(0), AM, false) &&
+ if (!MatchAddress(N.getOperand(0), AM, Depth+1) &&
// Address could not have picked a GV address for the displacement.
AM.GV == NULL &&
// On x86-64, the resultant disp must fit in 32-bits.
- isInt32(AM.Disp + CN->getSignExtended()) &&
+ (!is64Bit || isInt32(AM.Disp + Offset)) &&
// Check to see if the LHS & C is zero.
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
- AM.Disp += CN->getValue();
+ AM.Disp += Offset;
return false;
}
AM = Backup;
break;
case ISD::AND: {
- // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
- // allows us to fold the shift into this addressing mode.
- if (AlreadySelected) break;
+ // Perform some heroic transforms on an and of a constant-count shift
+ // with a constant to enable use of the scaled offset field.
+
SDValue Shift = N.getOperand(0);
- if (Shift.getOpcode() != ISD::SHL) break;
-
+ if (Shift.getNumOperands() != 2) break;
+
// Scale must not be used already.
- if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
+ if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
- // Not when RIP is used as the base.
- if (AM.isRIPRel) break;
-
+ SDValue X = Shift.getOperand(0);
ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
if (!C1 || !C2) break;
+ // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This
+ // allows us to convert the shift and and into an h-register extract and
+ // a scaled index.
+ if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) {
+ unsigned ScaleLog = 8 - C1->getZExtValue();
+ if (ScaleLog > 0 && ScaleLog < 4 &&
+ C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
+ SDValue Eight = CurDAG->getConstant(8, MVT::i8);
+ SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
+ SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
+ X, Eight);
+ SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
+ Srl, Mask);
+ SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
+ SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
+ And, ShlCount);
+
+ // Insert the new nodes into the topological ordering.
+ if (Eight.getNode()->getNodeId() == -1 ||
+ Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(X.getNode(), Eight.getNode());
+ Eight.getNode()->setNodeId(X.getNode()->getNodeId());
+ }
+ if (Mask.getNode()->getNodeId() == -1 ||
+ Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(X.getNode(), Mask.getNode());
+ Mask.getNode()->setNodeId(X.getNode()->getNodeId());
+ }
+ if (Srl.getNode()->getNodeId() == -1 ||
+ Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(Shift.getNode(), Srl.getNode());
+ Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
+ }
+ if (And.getNode()->getNodeId() == -1 ||
+ And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(N.getNode(), And.getNode());
+ And.getNode()->setNodeId(N.getNode()->getNodeId());
+ }
+ if (ShlCount.getNode()->getNodeId() == -1 ||
+ ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(X.getNode(), ShlCount.getNode());
+ ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
+ }
+ if (Shl.getNode()->getNodeId() == -1 ||
+ Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(N.getNode(), Shl.getNode());
+ Shl.getNode()->setNodeId(N.getNode()->getNodeId());
+ }
+ CurDAG->ReplaceAllUsesWith(N, Shl);
+ AM.IndexReg = And;
+ AM.Scale = (1 << ScaleLog);
+ return false;
+ }
+ }
+
+ // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
+ // allows us to fold the shift into this addressing mode.
+ if (Shift.getOpcode() != ISD::SHL) break;
+
// Not likely to be profitable if either the AND or SHIFT node has more
// than one use (unless all uses are for address computation). Besides,
// isel mechanism requires their node ids to be reused.
break;
// Verify that the shift amount is something we can fold.
- unsigned ShiftCst = C1->getValue();
+ unsigned ShiftCst = C1->getZExtValue();
if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
break;
// Get the new AND mask, this folds to a constant.
- SDValue NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
- SDValue(C2, 0), SDValue(C1, 0));
- SDValue NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
- Shift.getOperand(0), NewANDMask);
- NewANDMask.Val->setNodeId(Shift.Val->getNodeId());
- NewAND.Val->setNodeId(N.Val->getNodeId());
+ SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
+ SDValue(C2, 0), SDValue(C1, 0));
+ SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
+ NewANDMask);
+ SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
+ NewAND, SDValue(C1, 0));
+
+ // Insert the new nodes into the topological ordering.
+ if (C1->getNodeId() > X.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(X.getNode(), C1);
+ C1->setNodeId(X.getNode()->getNodeId());
+ }
+ if (NewANDMask.getNode()->getNodeId() == -1 ||
+ NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
+ NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
+ }
+ if (NewAND.getNode()->getNodeId() == -1 ||
+ NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
+ NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
+ }
+ if (NewSHIFT.getNode()->getNodeId() == -1 ||
+ NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
+ CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
+ NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
+ }
+
+ CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
AM.Scale = 1 << ShiftCst;
AM.IndexReg = NewAND;
}
}
- return MatchAddressBase(N, AM, isRoot, Depth);
+ return MatchAddressBase(N, AM);
}
/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
/// specified addressing mode without any further recursion.
-bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
- bool isRoot, unsigned Depth) {
+bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
// Is the base register already occupied?
- if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
+ if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
// If so, check to see if the scale index register is set.
- if (AM.IndexReg.Val == 0 && !AM.isRIPRel) {
+ if (AM.IndexReg.getNode() == 0) {
AM.IndexReg = N;
AM.Scale = 1;
return false;
/// match by reference.
bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
- SDValue &Disp) {
+ SDValue &Disp, SDValue &Segment) {
X86ISelAddressMode AM;
- if (MatchAddress(N, AM))
+ bool Done = false;
+ if (AvoidDupAddrCompute && !N.hasOneUse()) {
+ unsigned Opcode = N.getOpcode();
+ if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex &&
+ Opcode != X86ISD::Wrapper && Opcode != X86ISD::WrapperRIP) {
+ // If we are able to fold N into addressing mode, then we'll allow it even
+ // if N has multiple uses. In general, addressing computation is used as
+ // addresses by all of its uses. But watch out for CopyToReg uses, that
+ // means the address computation is liveout. It will be computed by a LEA
+ // so we want to avoid computing the address twice.
+ for (SDNode::use_iterator UI = N.getNode()->use_begin(),
+ UE = N.getNode()->use_end(); UI != UE; ++UI) {
+ if (UI->getOpcode() == ISD::CopyToReg) {
+ MatchAddressBase(N, AM);
+ Done = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!Done && MatchAddress(N, AM))
return false;
MVT VT = N.getValueType();
if (AM.BaseType == X86ISelAddressMode::RegBase) {
- if (!AM.Base.Reg.Val)
+ if (!AM.Base.Reg.getNode())
AM.Base.Reg = CurDAG->getRegister(0, VT);
}
- if (!AM.IndexReg.Val)
+ if (!AM.IndexReg.getNode())
AM.IndexReg = CurDAG->getRegister(0, VT);
- getAddressOperands(AM, Base, Scale, Index, Disp);
+ getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
-/// isZeroNode - Returns true if Elt is a constant zero or a floating point
-/// constant +0.0.
-static inline bool isZeroNode(SDValue Elt) {
- return ((isa<ConstantSDNode>(Elt) &&
- cast<ConstantSDNode>(Elt)->getValue() == 0) ||
- (isa<ConstantFPSDNode>(Elt) &&
- cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
-}
-
-
/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
/// match a load whose top elements are either undef or zeros. The load flavor
/// is derived from the type of N, which is either v4f32 or v2f64.
bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
- SDValue &Disp, SDValue &InChain,
+ SDValue &Disp, SDValue &Segment,
+ SDValue &InChain,
SDValue &OutChain) {
if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
InChain = N.getOperand(0).getValue(1);
- if (ISD::isNON_EXTLoad(InChain.Val) &&
+ if (ISD::isNON_EXTLoad(InChain.getNode()) &&
InChain.getValue(0).hasOneUse() &&
N.hasOneUse() &&
- CanBeFoldedBy(N.Val, Pred.Val, Op.Val)) {
+ IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) {
LoadSDNode *LD = cast<LoadSDNode>(InChain);
- if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
+ if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
return false;
OutChain = LD->getChain();
return true;
// Also handle the case where we explicitly require zeros in the top
// elements. This is a vector shuffle from the zero vector.
- if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.Val->hasOneUse() &&
+ if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
// Check to see if the top elements are all zeros (or bitcast of zeros).
N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
- N.getOperand(0).Val->hasOneUse() &&
- ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).Val) &&
+ N.getOperand(0).getNode()->hasOneUse() &&
+ ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
N.getOperand(0).getOperand(0).hasOneUse()) {
// Okay, this is a zero extending load. Fold it.
LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
- if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
+ if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
return false;
OutChain = LD->getChain();
InChain = SDValue(LD, 1);
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp) {
X86ISelAddressMode AM;
+
+ // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
+ // segments.
+ SDValue Copy = AM.Segment;
+ SDValue T = CurDAG->getRegister(0, MVT::i32);
+ AM.Segment = T;
if (MatchAddress(N, AM))
return false;
+ assert (T == AM.Segment);
+ AM.Segment = Copy;
MVT VT = N.getValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
- if (AM.Base.Reg.Val)
+ if (AM.Base.Reg.getNode())
Complexity = 1;
else
AM.Base.Reg = CurDAG->getRegister(0, VT);
else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
Complexity = 4;
- if (AM.IndexReg.Val)
+ if (AM.IndexReg.getNode())
Complexity++;
else
AM.IndexReg = CurDAG->getRegister(0, VT);
// optimal (especially for code size consideration). LEA is nice because of
// its three-address nature. Tweak the cost function again when we can run
// convertToThreeAddress() at register allocation time.
- if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
+ if (AM.hasSymbolicDisplacement()) {
// For X86-64, we should always use lea to materialize RIP relative
// addresses.
if (Subtarget->is64Bit())
Complexity += 2;
}
- if (AM.Disp && (AM.Base.Reg.Val || AM.IndexReg.Val))
+ if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
Complexity++;
if (Complexity > 2) {
- getAddressOperands(AM, Base, Scale, Index, Disp);
+ SDValue Segment;
+ getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
return false;
}
+/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
+bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue Op, SDValue N, SDValue &Base,
+ SDValue &Scale, SDValue &Index,
+ SDValue &Disp) {
+ assert(Op.getOpcode() == X86ISD::TLSADDR);
+ assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
+ const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
+
+ X86ISelAddressMode AM;
+ AM.GV = GA->getGlobal();
+ AM.Disp += GA->getOffset();
+ AM.Base.Reg = CurDAG->getRegister(0, N.getValueType());
+ AM.SymbolFlags = GA->getTargetFlags();
+
+ if (N.getValueType() == MVT::i32) {
+ AM.Scale = 1;
+ AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
+ } else {
+ AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
+ }
+
+ SDValue Segment;
+ getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
+ return true;
+}
+
+
bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
SDValue &Base, SDValue &Scale,
- SDValue &Index, SDValue &Disp) {
- if (ISD::isNON_EXTLoad(N.Val) &&
+ SDValue &Index, SDValue &Disp,
+ SDValue &Segment) {
+ if (ISD::isNON_EXTLoad(N.getNode()) &&
N.hasOneUse() &&
- CanBeFoldedBy(N.Val, P.Val, P.Val))
- return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
+ IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode()))
+ return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
return false;
}
-/// getGlobalBaseReg - Output the instructions required to put the
-/// base address to use for accessing globals into a register.
+/// getGlobalBaseReg - Return an SDNode that returns the value of
+/// the global base register. Output instructions required to
+/// initialize the global base register, if necessary.
///
SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
- assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing");
- if (!GlobalBaseReg) {
- // Insert the set of GlobalBaseReg into the first MBB of the function
- MachineFunction *MF = BB->getParent();
- MachineBasicBlock &FirstMBB = MF->front();
- MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
-
- const TargetInstrInfo *TII = TM.getInstrInfo();
- // Operand of MovePCtoStack is completely ignored by asm printer. It's
- // only used in JIT code emission as displacement to pc.
- BuildMI(FirstMBB, MBBI, TII->get(X86::MOVPC32r), PC).addImm(0);
-
- // If we're using vanilla 'GOT' PIC style, we should use relative addressing
- // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external
- if (TM.getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT()) {
- GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
- BuildMI(FirstMBB, MBBI, TII->get(X86::ADD32ri), GlobalBaseReg)
- .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
- } else {
- GlobalBaseReg = PC;
- }
-
- }
- return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
+ MachineFunction *MF = CurBB->getParent();
+ unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
+ return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
}
static SDNode *FindCallStartFromCall(SDNode *Node) {
if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
assert(Node->getOperand(0).getValueType() == MVT::Other &&
"Node doesn't have a token chain argument!");
- return FindCallStartFromCall(Node->getOperand(0).Val);
+ return FindCallStartFromCall(Node->getOperand(0).getNode());
}
-SDNode *X86DAGToDAGISel::getTruncate(SDValue N0, MVT VT) {
- SDValue SRIdx;
- switch (VT.getSimpleVT()) {
- default: assert(0 && "Unknown truncate!");
- case MVT::i8:
- SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
- // Ensure that the source register has an 8-bit subreg on 32-bit targets
- if (!Subtarget->is64Bit()) {
- unsigned Opc;
- MVT N0VT = N0.getValueType();
- switch (N0VT.getSimpleVT()) {
- default: assert(0 && "Unknown truncate!");
- case MVT::i16:
- Opc = X86::MOV16to16_;
- break;
- case MVT::i32:
- Opc = X86::MOV32to32_;
- break;
- }
- N0 = SDValue(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0);
- return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
- VT, N0, SRIdx, N0.getValue(1));
- }
- break;
- case MVT::i16:
- SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
- break;
- case MVT::i32:
- SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
- break;
- }
- return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx);
+SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
+ SDValue Chain = Node->getOperand(0);
+ SDValue In1 = Node->getOperand(1);
+ SDValue In2L = Node->getOperand(2);
+ SDValue In2H = Node->getOperand(3);
+ SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
+ if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
+ return NULL;
+ SDValue LSI = Node->getOperand(4); // MemOperand
+ const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain};
+ return CurDAG->getTargetNode(Opc, Node->getDebugLoc(),
+ MVT::i32, MVT::i32, MVT::Other, Ops,
+ array_lengthof(Ops));
}
-
SDNode *X86DAGToDAGISel::Select(SDValue N) {
- SDNode *Node = N.Val;
+ SDNode *Node = N.getNode();
MVT NVT = Node->getValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
-
+ DebugLoc dl = Node->getDebugLoc();
+
#ifndef NDEBUG
DOUT << std::string(Indent, ' ') << "Selecting: ";
DEBUG(Node->dump(CurDAG));
case X86ISD::GlobalBaseReg:
return getGlobalBaseReg();
- case ISD::ADD: {
- // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
- // code and is matched first so to prevent it from being turned into
- // LEA32r X+c.
- // In 64-bit small code size mode, use LEA to take advantage of
- // RIP-relative addressing.
- if (TM.getCodeModel() != CodeModel::Small)
- break;
- MVT PtrVT = TLI.getPointerTy();
- SDValue N0 = N.getOperand(0);
- SDValue N1 = N.getOperand(1);
- if (N.Val->getValueType(0) == PtrVT &&
- N0.getOpcode() == X86ISD::Wrapper &&
- N1.getOpcode() == ISD::Constant) {
- unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue();
- SDValue C(0, 0);
- // TODO: handle ExternalSymbolSDNode.
- if (GlobalAddressSDNode *G =
- dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
- C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
- G->getOffset() + Offset);
- } else if (ConstantPoolSDNode *CP =
- dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
- C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
- CP->getAlignment(),
- CP->getOffset()+Offset);
- }
-
- if (C.Val) {
- if (Subtarget->is64Bit()) {
- SDValue Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
- CurDAG->getRegister(0, PtrVT), C };
- return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4);
- } else
- return CurDAG->SelectNodeTo(N.Val, X86::MOV32ri, PtrVT, C);
- }
- }
-
- // Other cases are handled by auto-generated code.
- break;
- }
+ case X86ISD::ATOMOR64_DAG:
+ return SelectAtomic64(Node, X86::ATOMOR6432);
+ case X86ISD::ATOMXOR64_DAG:
+ return SelectAtomic64(Node, X86::ATOMXOR6432);
+ case X86ISD::ATOMADD64_DAG:
+ return SelectAtomic64(Node, X86::ATOMADD6432);
+ case X86ISD::ATOMSUB64_DAG:
+ return SelectAtomic64(Node, X86::ATOMSUB6432);
+ case X86ISD::ATOMNAND64_DAG:
+ return SelectAtomic64(Node, X86::ATOMNAND6432);
+ case X86ISD::ATOMAND64_DAG:
+ return SelectAtomic64(Node, X86::ATOMAND6432);
+ case X86ISD::ATOMSWAP64_DAG:
+ return SelectAtomic64(Node, X86::ATOMSWAP6432);
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI: {
bool isSigned = Opcode == ISD::SMUL_LOHI;
if (!isSigned)
switch (NVT.getSimpleVT()) {
- default: assert(0 && "Unsupported VT!");
+ default: LLVM_UNREACHABLE("Unsupported VT!");
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
}
else
switch (NVT.getSimpleVT()) {
- default: assert(0 && "Unsupported VT!");
+ default: LLVM_UNREACHABLE("Unsupported VT!");
case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
unsigned LoReg, HiReg;
switch (NVT.getSimpleVT()) {
- default: assert(0 && "Unsupported VT!");
+ default: LLVM_UNREACHABLE("Unsupported VT!");
case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
}
- SDValue Tmp0, Tmp1, Tmp2, Tmp3;
- bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
+ SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
+ bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
// multiplty is commmutative
if (!foldedLoad) {
- foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
+ foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
if (foldedLoad)
std::swap(N0, N1);
}
- AddToISelQueue(N0);
- SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
+ SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
N0, SDValue()).getValue(1);
if (foldedLoad) {
- AddToISelQueue(N1.getOperand(0));
- AddToISelQueue(Tmp0);
- AddToISelQueue(Tmp1);
- AddToISelQueue(Tmp2);
- AddToISelQueue(Tmp3);
- SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
+ SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
+ InFlag };
SDNode *CNode =
- CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
+ CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
+ array_lengthof(Ops));
InFlag = SDValue(CNode, 1);
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
- AddToISelQueue(N1);
InFlag =
- SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
+ SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
// Copy the low half of the result, if it is needed.
if (!N.getValue(0).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
LoReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(N.getValue(0), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(Result.Val->dump(CurDAG));
+ DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
if (HiReg == X86::AH && Subtarget->is64Bit()) {
// Prevent use of AH in a REX instruction by referencing AX instead.
// Shift it down 8 bits.
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
X86::AX, MVT::i16, InFlag);
InFlag = Result.getValue(2);
- Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
- CurDAG->getTargetConstant(8, MVT::i8)), 0);
+ Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
+ Result,
+ CurDAG->getTargetConstant(8, MVT::i8)), 0);
// Then truncate it down to i8.
- SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
- Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
+ SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
+ Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
MVT::i8, Result, SRIdx), 0);
} else {
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
HiReg, NVT, InFlag);
InFlag = Result.getValue(2);
}
ReplaceUses(N.getValue(1), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(Result.Val->dump(CurDAG));
+ DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
bool isSigned = Opcode == ISD::SDIVREM;
if (!isSigned)
switch (NVT.getSimpleVT()) {
- default: assert(0 && "Unsupported VT!");
+ default: LLVM_UNREACHABLE("Unsupported VT!");
case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
}
else
switch (NVT.getSimpleVT()) {
- default: assert(0 && "Unsupported VT!");
+ default: LLVM_UNREACHABLE("Unsupported VT!");
case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
unsigned LoReg, HiReg;
unsigned ClrOpcode, SExtOpcode;
switch (NVT.getSimpleVT()) {
- default: assert(0 && "Unsupported VT!");
+ default: LLVM_UNREACHABLE("Unsupported VT!");
case MVT::i8:
LoReg = X86::AL; HiReg = X86::AH;
ClrOpcode = 0;
break;
}
- SDValue Tmp0, Tmp1, Tmp2, Tmp3;
- bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
+ SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
+ bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
+ bool signBitIsZero = CurDAG->SignBitIsZero(N0);
SDValue InFlag;
- if (NVT == MVT::i8 && !isSigned) {
+ if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
// Special case for div8, just use a move with zero extension to AX to
// clear the upper 8 bits (AH).
- SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
- if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
- SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
- AddToISelQueue(N0.getOperand(0));
- AddToISelQueue(Tmp0);
- AddToISelQueue(Tmp1);
- AddToISelQueue(Tmp2);
- AddToISelQueue(Tmp3);
+ SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
+ if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
+ SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
Move =
- SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
- Ops, 5), 0);
+ SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, MVT::i16,
+ MVT::Other, Ops,
+ array_lengthof(Ops)), 0);
Chain = Move.getValue(1);
ReplaceUses(N0.getValue(1), Chain);
} else {
- AddToISelQueue(N0);
Move =
- SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
+ SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0);
Chain = CurDAG->getEntryNode();
}
- Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDValue());
+ Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue());
InFlag = Chain.getValue(1);
} else {
- AddToISelQueue(N0);
InFlag =
- CurDAG->getCopyToReg(CurDAG->getEntryNode(),
+ CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
LoReg, N0, SDValue()).getValue(1);
- if (isSigned) {
+ if (isSigned && !signBitIsZero) {
// Sign extend the low part into the high part.
InFlag =
- SDValue(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
+ SDValue(CurDAG->getTargetNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
} else {
// Zero out the high part, effectively zero extending the input.
- SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
- InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
+ SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, dl, NVT),
+ 0);
+ InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg,
ClrNode, InFlag).getValue(1);
}
}
if (foldedLoad) {
- AddToISelQueue(N1.getOperand(0));
- AddToISelQueue(Tmp0);
- AddToISelQueue(Tmp1);
- AddToISelQueue(Tmp2);
- AddToISelQueue(Tmp3);
- SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
+ SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
+ InFlag };
SDNode *CNode =
- CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
+ CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
+ array_lengthof(Ops));
InFlag = SDValue(CNode, 1);
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
- AddToISelQueue(N1);
InFlag =
- SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
+ SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
// Copy the division (low) result, if it is needed.
if (!N.getValue(0).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
LoReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(N.getValue(0), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(Result.Val->dump(CurDAG));
+ DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
if (HiReg == X86::AH && Subtarget->is64Bit()) {
// Prevent use of AH in a REX instruction by referencing AX instead.
// Shift it down 8 bits.
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
X86::AX, MVT::i16, InFlag);
InFlag = Result.getValue(2);
- Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
- CurDAG->getTargetConstant(8, MVT::i8)), 0);
+ Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
+ Result,
+ CurDAG->getTargetConstant(8, MVT::i8)),
+ 0);
// Then truncate it down to i8.
- SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
- Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
+ SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
+ Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
MVT::i8, Result, SRIdx), 0);
} else {
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
HiReg, NVT, InFlag);
InFlag = Result.getValue(2);
}
ReplaceUses(N.getValue(1), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(Result.Val->dump(CurDAG));
+ DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
return NULL;
}
- case ISD::ANY_EXTEND: {
- // Check if the type extended to supports subregs.
- if (NVT == MVT::i8)
- break;
-
- SDValue N0 = Node->getOperand(0);
- // Get the subregsiter index for the type to extend.
- MVT N0VT = N0.getValueType();
- unsigned Idx = (N0VT == MVT::i32) ? X86::SUBREG_32BIT :
- (N0VT == MVT::i16) ? X86::SUBREG_16BIT :
- (Subtarget->is64Bit()) ? X86::SUBREG_8BIT : 0;
-
- // If we don't have a subreg Idx, let generated ISel have a try.
- if (Idx == 0)
- break;
-
- // If we have an index, generate an insert_subreg into undef.
- AddToISelQueue(N0);
- SDValue Undef =
- SDValue(CurDAG->getTargetNode(X86::IMPLICIT_DEF, NVT), 0);
- SDValue SRIdx = CurDAG->getTargetConstant(Idx, MVT::i32);
- SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG,
- NVT, Undef, N0, SRIdx);
-
-#ifndef NDEBUG
- DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(ResNode->dump(CurDAG));
- DOUT << "\n";
- Indent -= 2;
-#endif
- return ResNode;
- }
-
- case ISD::SIGN_EXTEND_INREG: {
- SDValue N0 = Node->getOperand(0);
- AddToISelQueue(N0);
-
- MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
- SDValue TruncOp = SDValue(getTruncate(N0, SVT), 0);
- unsigned Opc = 0;
- switch (NVT.getSimpleVT()) {
- default: assert(0 && "Unknown sign_extend_inreg!");
- case MVT::i16:
- if (SVT == MVT::i8) Opc = X86::MOVSX16rr8;
- else assert(0 && "Unknown sign_extend_inreg!");
- break;
- case MVT::i32:
- switch (SVT.getSimpleVT()) {
- default: assert(0 && "Unknown sign_extend_inreg!");
- case MVT::i8: Opc = X86::MOVSX32rr8; break;
- case MVT::i16: Opc = X86::MOVSX32rr16; break;
- }
- break;
- case MVT::i64:
- switch (SVT.getSimpleVT()) {
- default: assert(0 && "Unknown sign_extend_inreg!");
- case MVT::i8: Opc = X86::MOVSX64rr8; break;
- case MVT::i16: Opc = X86::MOVSX64rr16; break;
- case MVT::i32: Opc = X86::MOVSX64rr32; break;
- }
- break;
- }
-
- SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
-
-#ifndef NDEBUG
- DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(TruncOp.Val->dump(CurDAG));
- DOUT << "\n";
- DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(ResNode->dump(CurDAG));
- DOUT << "\n";
- Indent -= 2;
-#endif
- return ResNode;
- break;
- }
-
- case ISD::TRUNCATE: {
- SDValue Input = Node->getOperand(0);
- AddToISelQueue(Node->getOperand(0));
- SDNode *ResNode = getTruncate(Input, NVT);
-
-#ifndef NDEBUG
- DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(ResNode->dump(CurDAG));
- DOUT << "\n";
- Indent -= 2;
-#endif
- return ResNode;
- break;
- }
-
case ISD::DECLARE: {
// Handle DECLARE nodes here because the second operand may have been
// wrapped in X86ISD::Wrapper.
SDValue Chain = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
SDValue N2 = Node->getOperand(2);
- if (!isa<FrameIndexSDNode>(N1))
- break;
- int FI = cast<FrameIndexSDNode>(N1)->getIndex();
+ FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
+
+ // FIXME: We need to handle this for VLAs.
+ if (!FINode) {
+ ReplaceUses(N.getValue(0), Chain);
+ return NULL;
+ }
+
if (N2.getOpcode() == ISD::ADD &&
N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
N2 = N2.getOperand(1);
- if (N2.getOpcode() == X86ISD::Wrapper &&
- isa<GlobalAddressSDNode>(N2.getOperand(0))) {
- GlobalValue *GV =
- cast<GlobalAddressSDNode>(N2.getOperand(0))->getGlobal();
- SDValue Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
- AddToISelQueue(Chain);
- SDValue Ops[] = { Tmp1, Tmp2, Chain };
- return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
- MVT::Other, Ops, 3);
+
+ // If N2 is not Wrapper(decriptor) then the llvm.declare is mangled
+ // somehow, just ignore it.
+ if (N2.getOpcode() != X86ISD::Wrapper &&
+ N2.getOpcode() != X86ISD::WrapperRIP) {
+ ReplaceUses(N.getValue(0), Chain);
+ return NULL;
}
- break;
+ GlobalAddressSDNode *GVNode =
+ dyn_cast<GlobalAddressSDNode>(N2.getOperand(0));
+ if (GVNode == 0) {
+ ReplaceUses(N.getValue(0), Chain);
+ return NULL;
+ }
+ SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
+ TLI.getPointerTy());
+ SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GVNode->getGlobal(),
+ TLI.getPointerTy());
+ SDValue Ops[] = { Tmp1, Tmp2, Chain };
+ return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
+ MVT::Other, Ops,
+ array_lengthof(Ops));
}
}
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
- if (ResNode == NULL || ResNode == N.Val)
- DEBUG(N.Val->dump(CurDAG));
+ if (ResNode == NULL || ResNode == N.getNode())
+ DEBUG(N.getNode()->dump(CurDAG));
else
DEBUG(ResNode->dump(CurDAG));
DOUT << "\n";
bool X86DAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
- std::vector<SDValue> &OutOps, SelectionDAG &DAG){
- SDValue Op0, Op1, Op2, Op3;
+ std::vector<SDValue> &OutOps) {
+ SDValue Op0, Op1, Op2, Op3, Op4;
switch (ConstraintCode) {
case 'o': // offsetable ??
case 'v': // not offsetable ??
default: return true;
case 'm': // memory
- if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
+ if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3, Op4))
return true;
break;
}
OutOps.push_back(Op1);
OutOps.push_back(Op2);
OutOps.push_back(Op3);
- AddToISelQueue(Op0);
- AddToISelQueue(Op1);
- AddToISelQueue(Op2);
- AddToISelQueue(Op3);
+ OutOps.push_back(Op4);
return false;
}
/// createX86ISelDag - This pass converts a legalized DAG into a
/// X86-specific DAG, ready for instruction scheduling.
///
-FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
- return new X86DAGToDAGISel(TM, Fast);
+FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
+ llvm::CodeGenOpt::Level OptLevel) {
+ return new X86DAGToDAGISel(TM, OptLevel);
}