setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::BUILD_VECTOR);
setTargetDAGCombine(ISD::SELECT);
- setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::SHL);
setTargetDAGCombine(ISD::SRA);
setTargetDAGCombine(ISD::SRL);
X86TargetLowering::getPICBaseSymbol(const MachineFunction *MF,
MCContext &Ctx) const {
const MCAsmInfo &MAI = *getTargetMachine().getMCAsmInfo();
- return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+
- Twine(MF->getFunctionNumber())+"$pb");
+ return Ctx.GetOrCreateTemporarySymbol(Twine(MAI.getPrivateGlobalPrefix())+
+ Twine(MF->getFunctionNumber())+"$pb");
}
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
const X86InstrInfo *TII) {
- int FI;
+ unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
+ int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
if (!VR || TargetRegisterInfo::isPhysicalRegister(VR))
if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) &&
Def->getOperand(1).isFI()) {
FI = Def->getOperand(1).getIndex();
- if (MFI->getObjectSize(FI) != Flags.getByValSize())
- return false;
+ Bytes = Flags.getByValSize();
} else
return false;
}
- } else {
- LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg);
- if (!Ld)
+ } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
+ if (Flags.isByVal())
+ // ByVal argument is passed in as a pointer but it's now being
+ // dereferenced. e.g.
+ // define @foo(%struct.X* %A) {
+ // tail call @bar(%struct.X* byval %A)
+ // }
return false;
SDValue Ptr = Ld->getBasePtr();
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
if (!FINode)
return false;
FI = FINode->getIndex();
- }
+ } else
+ return false;
+ assert(FI != INT_MAX);
if (!MFI->isFixedObjectIndex(FI))
return false;
- return Offset == MFI->getObjectOffset(FI);
+ return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
}
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
EVT IntPtr = getPointerTy();
EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
-
Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag);
Flag = Chain.getValue(1);
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain,
- DAG.getTargetExternalSymbol("_alloca", IntPtr),
- DAG.getRegister(X86::EAX, IntPtr),
- DAG.getRegister(X86StackPtr, SPTy),
- Flag };
- Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops, 5);
- Flag = Chain.getValue(1);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(0, true),
- DAG.getIntPtrConstant(0, true),
- Flag);
+ Chain = DAG.getNode(X86ISD::MINGW_ALLOCA, dl, NodeTys, Chain, Flag);
+ Flag = Chain.getValue(1);
Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1);
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
+ case X86ISD::MINGW_ALLOCA: return "X86ISD::MINGW_ALLOCA";
}
}
return BB;
}
+MachineBasicBlock *
+X86TargetLowering::EmitLoweredMingwAlloca(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ MachineFunction *F = BB->getParent();
+
+ // The lowering is pretty easy: we're just emitting the call to _alloca. The
+ // non-trivial part is impdef of ESP.
+ // FIXME: The code should be tweaked as soon as we'll try to do codegen for
+ // mingw-w64.
+
+ BuildMI(BB, DL, TII->get(X86::CALLpcrel32))
+ .addExternalSymbol("_alloca")
+ .addReg(X86::EAX, RegState::Implicit)
+ .addReg(X86::ESP, RegState::Implicit)
+ .addReg(X86::EAX, RegState::Define | RegState::Implicit)
+ .addReg(X86::ESP, RegState::Define | RegState::Implicit);
+
+ F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ return BB;
+}
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
switch (MI->getOpcode()) {
default: assert(false && "Unexpected instr type to insert");
+ case X86::MINGW_ALLOCA:
+ return EmitLoweredMingwAlloca(MI, BB, EM);
case X86::CMOV_GR8:
case X86::CMOV_V1I64:
case X86::CMOV_FR32:
F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
return BB;
}
+ // DBG_VALUE. Only the frame index case is done here.
+ case X86::DBG_VALUE: {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ X86AddressMode AM;
+ MachineFunction *F = BB->getParent();
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = MI->getOperand(0).getImm();
+ addFullAddress(BuildMI(BB, DL, TII->get(X86::DBG_VALUE)), AM).
+ addImm(MI->getOperand(1).getImm()).
+ addMetadata(MI->getOperand(2).getMetadata());
+ F->DeleteMachineInstr(MI); // Remove pseudo.
+ return BB;
+ }
+
// String/text processing lowering.
case X86::PCMPISTRM128REG:
return EmitPCMP(MI, BB, 3, false /* in-mem */);
return SDValue();
}
-/// PerformANDCombine - Look for SSE and instructions of this form:
-/// (and x, (build_vector signbit,signbit,signbit,signbit)). If there
-/// exists a use of a build_vector that's the bitwise complement of the mask,
-/// then transform the node to
-/// (and (xor x, (build_vector -1,-1,-1,-1)), (build_vector ~sb,~sb,~sb,~sb)).
-static SDValue PerformANDCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT VT = N->getValueType(0);
- if (!VT.isVector() || !VT.isInteger())
- return SDValue();
-
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- if (N0.getOpcode() == ISD::XOR || !N1.hasOneUse())
- return SDValue();
-
- if (N1.getOpcode() == ISD::BUILD_VECTOR) {
- unsigned NumElts = VT.getVectorNumElements();
- EVT EltVT = VT.getVectorElementType();
- SmallVector<SDValue, 8> Mask;
- Mask.reserve(NumElts);
- for (unsigned i = 0; i != NumElts; ++i) {
- SDValue Arg = N1.getOperand(i);
- if (Arg.getOpcode() == ISD::UNDEF) {
- Mask.push_back(Arg);
- continue;
- }
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Arg);
- if (!C)
- return SDValue();
- if (!C->getAPIntValue().isSignBit() &&
- !C->getAPIntValue().isMaxSignedValue())
- return SDValue();
- Mask.push_back(DAG.getConstant(~C->getAPIntValue(), EltVT));
- }
- N1 = DAG.getNode(ISD::BUILD_VECTOR, N1.getDebugLoc(), VT,
- &Mask[0], NumElts);
- if (!N1.use_empty()) {
- unsigned Bits = EltVT.getSizeInBits();
- Mask.clear();
- for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(DAG.getConstant(APInt::getAllOnesValue(Bits), EltVT));
- SDValue NewMask = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
- VT, &Mask[0], NumElts);
- return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
- DAG.getNode(ISD::XOR, N->getDebugLoc(), VT,
- N0, NewMask), N1);
- }
- }
-
- return SDValue();
-}
/// PerformMulCombine - Optimize a single multiply with constant into two
/// in order to implement it with two cheaper instructions, e.g.
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
- case ISD::AND: return PerformANDCombine(N, DAG, DCI);
case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
case ISD::SHL:
case ISD::SRA: