//===----------------------------------------------------------------------===//
#include "PPCISelLowering.h"
+#include "PPCMachineFunctionInfo.h"
+#include "PPCPredicates.h"
#include "PPCTargetMachine.h"
#include "PPCPerfectShuffle.h"
#include "llvm/ADT/VectorExtras.h"
PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
: TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
- // Fold away setcc operations if possible.
- setSetCCIsExpensive();
setPow2DivIsCheap();
// Use _setjmp/_longjmp instead of setjmp/longjmp.
- setUseUnderscoreSetJmpLongJmp(true);
+ setUseUnderscoreSetJmp(true);
+ setUseUnderscoreLongJmp(true);
// Set up the register classes.
addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
// FIXME - use subtarget debug flags
if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
- setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::LABEL, MVT::Other, Expand);
// We want to legalize GlobalAddress and ConstantPool nodes into the
// appropriate instructions to materialize the address.
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
+ setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
// They also have instructions for converting between i64 and fp.
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
-
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+
// FIXME: disable this lowered code. This generates 64-bit register values,
// and we don't model the fact that the top part is clobbered by calls. We
// need to flag these together so that the value isn't live across a call.
case PPCISD::VPERM: return "PPCISD::VPERM";
case PPCISD::Hi: return "PPCISD::Hi";
case PPCISD::Lo: return "PPCISD::Lo";
+ case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
case PPCISD::SRL: return "PPCISD::SRL";
case PPCISD::SRA: return "PPCISD::SRA";
MVT::ValueType VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
Ptr = LD->getBasePtr();
- VT = LD->getValueType(0);
+ VT = LD->getLoadedVT();
+
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
ST = ST;
Ptr = ST->getBasePtr();
VT = ST->getStoredVT();
- return false; // TODO: Stores.
} else
return false;
if (MVT::isVector(VT))
return false;
- // TODO: Handle reg+reg.
- if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
- return false;
+ // TODO: Check reg+reg first.
+
+ // LDU/STU use reg+imm*4, others use reg+imm.
+ if (VT != MVT::i64) {
+ // reg + imm
+ if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
+ return false;
+ } else {
+ // reg + imm * 4.
+ if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
+ return false;
+ }
- // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
- // sext i32 to i64 when addr mode is r+i.
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+ // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
+ // sext i32 to i64 when addr mode is r+i.
if (LD->getValueType(0) == MVT::i64 && LD->getLoadedVT() == MVT::i32 &&
LD->getExtensionType() == ISD::SEXTLOAD &&
isa<ConstantSDNode>(Offset))
return false;
- }
+ }
AM = ISD::PRE_INC;
return true;
Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
- if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
- (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
+ if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV))
return Lo;
// If the global is weak or external, we have to go through the lazy
}
// If we have an integer seteq/setne, turn it into a compare against zero
- // by subtracting the rhs from the lhs, which is faster than setting a
- // condition register, reading it back out, and masking the correct bit.
+ // by xor'ing the rhs with the lhs, which is faster than setting a
+ // condition register, reading it back out, and masking the correct bit. The
+ // normal approach here uses sub to do this instead of xor. Using xor exposes
+ // the result to other bit-twiddling opportunities.
MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
MVT::ValueType VT = Op.getValueType();
- SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0),
+ SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
Op.getOperand(1));
return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
}
SmallVector<SDOperand, 8> ArgValues;
SDOperand Root = Op.getOperand(0);
- unsigned ArgOffset = 24;
- const unsigned Num_GPR_Regs = 8;
- const unsigned Num_FPR_Regs = 13;
- const unsigned Num_VR_Regs = 12;
- unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ bool isPPC64 = PtrVT == MVT::i64;
+ unsigned PtrByteSize = isPPC64 ? 8 : 4;
+
+ unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64);
static const unsigned GPR_32[] = { // 32-bit registers.
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- bool isPPC64 = PtrVT == MVT::i64;
+ const unsigned Num_GPR_Regs = sizeof(GPR_32)/sizeof(GPR_32[0]);
+ const unsigned Num_FPR_Regs = sizeof(FPR)/sizeof(FPR[0]);
+ const unsigned Num_VR_Regs = sizeof( VR)/sizeof( VR[0]);
+
+ unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+
const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
// Add DAG nodes to load the arguments or copy them out of registers. On
- // entry to a function on PPC, the arguments start at offset 24, although the
- // first ones are often in registers.
+ // entry to a function on PPC, the arguments start after the linkage area,
+ // although the first ones are often in registers.
for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
SDOperand ArgVal;
bool needsLoad = false;
MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
+ unsigned ArgSize = ObjSize;
unsigned CurArgOffset = ArgOffset;
switch (ObjectVT) {
default: assert(0 && "Unhandled argument type!");
case MVT::i32:
// All int arguments reserve stack space.
- ArgOffset += isPPC64 ? 8 : 4;
+ ArgOffset += PtrByteSize;
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
++GPR_idx;
} else {
needsLoad = true;
+ ArgSize = PtrByteSize;
}
break;
case MVT::i64: // PPC64
case MVT::f32:
case MVT::f64:
// All FP arguments reserve stack space.
- ArgOffset += ObjSize;
+ ArgOffset += isPPC64 ? 8 : ObjSize;
// Every 4 bytes of argument space consumes one of the GPRs available for
// argument passing.
if (GPR_idx != Num_GPR_Regs) {
++GPR_idx;
- if (ObjSize == 8 && GPR_idx != Num_GPR_Regs)
+ if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
++GPR_idx;
}
if (FPR_idx != Num_FPR_Regs) {
// If the argument is actually used, emit a load from the right stack
// slot.
if (!Op.Val->hasNUsesOfValue(0, ArgNo)) {
- int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset);
+ int FI = MFI->CreateFixedObject(ObjSize,
+ CurArgOffset + (ArgSize - ObjSize));
SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
} else {
// result of va_next.
SmallVector<SDOperand, 8> MemOps;
for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
- unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
+ unsigned VReg;
+ if (isPPC64)
+ VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
+ else
+ VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
+
MF.addLiveIn(GPR[GPR_idx], VReg);
SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val;
}
-
static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) {
SDOperand Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
unsigned PtrByteSize = isPPC64 ? 8 : 4;
-
// args_to_use will accumulate outgoing args for the PPCISD::CALL case in
// SelectExpr to use to put the arguments in the appropriate registers.
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused].
- unsigned NumBytes = 6*PtrByteSize;
+ unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64);
// Add up all the space actually used.
- for (unsigned i = 0; i != NumOps; ++i)
- NumBytes += MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8;
+ for (unsigned i = 0; i != NumOps; ++i) {
+ unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8;
+ ArgSize = std::max(ArgSize, PtrByteSize);
+ NumBytes += ArgSize;
+ }
// The prolog code of the callee may store up to 8 GPR argument registers to
// the stack, allowing va_start to index over them in memory if its varargs.
// Because we cannot tell if this is needed on the caller side, we have to
// conservatively assume that it is needed. As such, make sure we have at
// least enough stack space for the caller to store the 8 GPRs.
- if (NumBytes < 6*PtrByteSize+8*PtrByteSize)
- NumBytes = 6*PtrByteSize+8*PtrByteSize;
+ NumBytes = std::max(NumBytes, PPCFrameInfo::getMinCallFrameSize(isPPC64));
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
// memory. Also, if this is a vararg function, floating point operations
// must be stored to our stack, and loaded into integer regs as well, if
// any integer regs are available for argument passing.
- unsigned ArgOffset = 6*PtrByteSize;
+ unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64);
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+
static const unsigned GPR_32[] = { // 32-bit registers.
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
break;
case MVT::f32:
case MVT::f64:
+ if (isVarArg && isPPC64) {
+ // Float varargs need to be promoted to double.
+ if (Arg.getValueType() == MVT::f32)
+ Arg = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Arg);
+ }
+
if (FPR_idx != NumFPRs) {
RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
- if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64) {
+ if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType());
PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
}
+static SDOperand LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) {
+ // When we pop the dynamic allocation we need to restore the SP link.
+
+ // Get the corect type for pointers.
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+
+ // Construct the stack pointer operand.
+ bool IsPPC64 = Subtarget.isPPC64();
+ unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1;
+ SDOperand StackPtr = DAG.getRegister(SP, PtrVT);
+
+ // Get the operands for the STACKRESTORE.
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand SaveSP = Op.getOperand(1);
+
+ // Load the old link SP.
+ SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0);
+
+ // Restore the stack pointer.
+ Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP);
+
+ // Store the old link SP.
+ return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0);
+}
+
+static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool IsPPC64 = Subtarget.isPPC64();
+
+ // Get current frame pointer save index. The users of this index will be
+ // primarily DYNALLOC instructions.
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ int FPSI = FI->getFramePointerSaveIndex();
+
+ // If the frame pointer save index hasn't been defined yet.
+ if (!FPSI) {
+ // Find out what the fix offset of the frame pointer save area.
+ int Offset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64);
+ // Allocate the frame index for frame pointer save area.
+ FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, Offset);
+ // Save the result.
+ FI->setFramePointerSaveIndex(FPSI);
+ }
+
+ // Get the inputs.
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand Size = Op.getOperand(1);
+
+ // Get the corect type for pointers.
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ // Negate the size.
+ SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT,
+ DAG.getConstant(0, PtrVT), Size);
+ // Construct a node for the frame pointer save index.
+ SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT);
+ // Build a DYNALLOC node.
+ SDOperand Ops[3] = { Chain, NegSize, FPSIdx };
+ SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
+ return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
+}
+
+
/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
/// possible.
static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
SelectionDAG &DAG) {
assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
-
- // Force vspltis[hw] -1 to vspltisb -1.
- if (Val == -1) SplatSize = 1;
-
+
static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
};
+
+ MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
+
+ // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
+ if (Val == -1)
+ SplatSize = 1;
+
MVT::ValueType CanonicalVT = VTys[SplatSize-1];
// Build a canonical splat for this value.
Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt);
SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
&Ops[0], Ops.size());
- return DAG.getNode(ISD::BIT_CONVERT, VT, Res);
+ return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res);
}
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
-1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
-8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
};
+
for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
// Indirect through the SplatCsts array so that we favor 'vsplti -1' for
// cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
// vsplti + shl self.
if (SextVal == (i << (int)TypeShiftAmt)) {
- Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
Intrinsic::ppc_altivec_vslw
};
- return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
}
// vsplti + srl self.
if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
- Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
Intrinsic::ppc_altivec_vsrw
};
- return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
}
// vsplti + sra self.
if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
- Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
Intrinsic::ppc_altivec_vsraw
};
- return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
}
// vsplti + rol self.
if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
- Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
Intrinsic::ppc_altivec_vrlw
};
- return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
}
// t = vsplti c, result = vsldoi t, t, 1
// Odd, in range [17,31]: (vsplti C)-(vsplti -16).
if (SextVal >= 0 && SextVal <= 31) {
- SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, Op.getValueType(),DAG);
- SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
- return DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
+ SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
+ SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
+ LHS = DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
}
// Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
if (SextVal >= -31 && SextVal <= 0) {
- SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, Op.getValueType(),DAG);
- SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
- return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
+ SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
+ SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
+ LHS = DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
}
}
return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
+ case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
+ PPCSubTarget);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
MachineBasicBlock *
PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
MachineBasicBlock *BB) {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
assert((MI->getOpcode() == PPC::SELECT_CC_I4 ||
MI->getOpcode() == PPC::SELECT_CC_I8 ||
MI->getOpcode() == PPC::SELECT_CC_F4 ||
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
- BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
- .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ unsigned SelectPred = MI->getOperand(4).getImm();
+ BuildMI(BB, TII->get(PPC::BCC))
+ .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
MachineFunction *F = BB->getParent();
F->getBasicBlockList().insert(It, copy0MBB);
F->getBasicBlockList().insert(It, sinkMBB);
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
+ BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
// Unpack the result based on how the target uses it.
- unsigned CompOpc;
+ PPC::Predicate CompOpc;
switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
default: // Can't happen, don't crash on invalid number though.
case 0: // Branch on the value of the EQ bit of CR6.
- CompOpc = BranchOnWhenPredTrue ? PPC::BEQ : PPC::BNE;
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
break;
case 1: // Branch on the inverted value of the EQ bit of CR6.
- CompOpc = BranchOnWhenPredTrue ? PPC::BNE : PPC::BEQ;
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
break;
case 2: // Branch on the value of the LT bit of CR6.
- CompOpc = BranchOnWhenPredTrue ? PPC::BLT : PPC::BGE;
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
break;
case 3: // Branch on the inverted value of the LT bit of CR6.
- CompOpc = BranchOnWhenPredTrue ? PPC::BGE : PPC::BLT;
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
break;
}
return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
- DAG.getRegister(PPC::CR6, MVT::i32),
DAG.getConstant(CompOpc, MVT::i32),
+ DAG.getRegister(PPC::CR6, MVT::i32),
N->getOperand(4), CompNode.getValue(1));
}
break;