#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include <stdint.h>
using namespace llvm;
#define DEBUG_TYPE "x86-isel"
inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
- Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
- CurDAG->getTargetFrameIndex(AM.Base_FrameIndex,
- getTargetLowering()->getPointerTy()) :
- AM.Base_Reg;
+ Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
+ ? CurDAG->getTargetFrameIndex(AM.Base_FrameIndex,
+ TLI->getPointerTy())
+ : AM.Base_Reg;
Scale = getI8Imm(AM.Scale);
Index = AM.IndexReg;
// These are 32-bit even in 64-bit mode since RIP relative offset
const X86InstrInfo *getInstrInfo() const {
return getTargetMachine().getSubtargetImpl()->getInstrInfo();
}
+
+ /// \brief Address-mode matching performs shift-of-and to and-of-shift
+ /// reassociation in order to expose more scaled addressing
+ /// opportunities.
+ bool ComplexPatternFuncMutatesDAG() const override {
+ return true;
+ }
};
}
// If the source and destination are SSE registers, then this is a legal
// conversion that should not be lowered.
const X86TargetLowering *X86Lowering =
- static_cast<const X86TargetLowering *>(getTargetLowering());
+ static_cast<const X86TargetLowering *>(TLI);
bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
if (SrcIsSSE && DstIsSSE)
}
}
-// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
-// allows us to convert the shift and and into an h-register extract and
-// a scaled index. Returns false if the simplification is performed.
+// Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
+// safe. This allows us to convert the shift and and into an h-register
+// extract and a scaled index. Returns false if the simplification is
+// performed.
static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
uint64_t Mask,
SDValue Shift, SDValue X,
///
SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
- return CurDAG->getRegister(GlobalBaseReg,
- getTargetLowering()->getPointerTy()).getNode();
+ return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode();
}
/// Atomic opcode table
static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
SDLoc dl,
enum AtomicOpc &Op, MVT NVT,
- SDValue Val) {
+ SDValue Val,
+ const X86Subtarget *Subtarget) {
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
int64_t CNVal = CN->getSExtValue();
// Quit if not 32-bit imm.
if ((int32_t)CNVal != CNVal)
return Val;
+ // Quit if INT32_MIN: it would be negated as it is negative and overflow,
+ // producing an immediate that does not fit in the 32 bits available for
+ // an immediate operand to sub. However, it still fits in 32 bits for the
+ // add (since it is not negated) so we can return target-constant.
+ if (CNVal == INT32_MIN)
+ return CurDAG->getTargetConstant(CNVal, NVT);
// For atomic-load-add, we could do some optimizations.
if (Op == ADD) {
// Translate to INC/DEC if ADD by 1 or -1.
- if ((CNVal == 1) || (CNVal == -1)) {
+ if (((CNVal == 1) || (CNVal == -1)) && !Subtarget->slowIncDec()) {
Op = (CNVal == 1) ? INC : DEC;
// No more constant operand after being translated into INC/DEC.
return SDValue();
break;
}
- Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val);
+ Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val, Subtarget);
bool isUnOp = !Val.getNode();
bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
Opc = AtomicOpcTbl[Op][SextConstantI64];
else if (i64immSExt32(Val.getNode()))
Opc = AtomicOpcTbl[Op][ConstantI64];
+ else
+ llvm_unreachable("True 64 bits constant in SelectAtomicLoadArith");
} else
Opc = AtomicOpcTbl[Op][I64];
break;