//===----------------------------------------------------------------------===//
#include "PPCISelLowering.h"
+#include "PPCMachineFunctionInfo.h"
+#include "PPCPredicates.h"
#include "PPCTargetMachine.h"
#include "PPCPerfectShuffle.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Intrinsics.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
-PPCTargetLowering::PPCTargetLowering(TargetMachine &TM)
- : TargetLowering(TM) {
+static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc");
+
+PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
+ : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
- // Fold away setcc operations if possible.
- setSetCCIsExpensive();
setPow2DivIsCheap();
// Use _setjmp/_longjmp instead of setjmp/longjmp.
- setUseUnderscoreSetJmpLongJmp(true);
+ setUseUnderscoreSetJmp(true);
+ setUseUnderscoreLongJmp(true);
// Set up the register classes.
addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
+ // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
+ setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
+
+ // PowerPC does not have truncstore for i1.
+ setStoreXAction(MVT::i1, Promote);
+
+ // PowerPC has pre-inc load and store's.
+ setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
+ setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
+ setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
+ setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
+ setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
+ setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
+
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
setOperationAction(ISD::MEMSET, MVT::Other, Expand);
setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
- // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
- setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand);
- setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand);
-
// PowerPC has no SREM/UREM instructions
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
// We don't support sin/cos/sqrt/fmod
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
+ setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
+ setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
// PowerPC does not have ROTR
setOperationAction(ISD::ROTR, MVT::i32 , Expand);
// PowerPC does not have Select
setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::i64, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
// PowerPC does not have BRCOND which requires SetCC
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
// PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
+ setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
+ setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
- // PowerPC does not have truncstore for i1.
- setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote);
-
+ // We cannot sextinreg(i1). Expand to shifts.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+
// Support label based line numbers.
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
// FIXME - use subtarget debug flags
if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
- setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::LABEL, MVT::Other, Expand);
// We want to legalize GlobalAddress and ConstantPool nodes into the
// appropriate instructions to materialize the address.
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
-
+ setOperationAction(ISD::JumpTable, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+ setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
+ setOperationAction(ISD::JumpTable, MVT::i64, Custom);
+
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET , MVT::Other, Custom);
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
-
+ setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
+
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
+ if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
// They also have instructions for converting between i64 and fp.
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
-
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+
// FIXME: disable this lowered code. This generates 64-bit register values,
// and we don't model the fact that the top part is clobbered by calls. We
// need to flag these together so that the value isn't live across a call.
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
}
- if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) {
+ if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
// 64 bit PowerPC implementations can support i64 types directly
addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
// BUILD_PAIR can't be handled natively, and should be expanded to shl/or
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
} else {
// 32 bit PowerPC wants to expand i64 shifts itself.
- setOperationAction(ISD::SHL, MVT::i64, Custom);
- setOperationAction(ISD::SRL, MVT::i64, Custom);
- setOperationAction(ISD::SRA, MVT::i64, Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
setOperationAction(ISD::MUL, MVT::v4f32, Legal);
+ setOperationAction(ISD::MUL, MVT::v4i32, Custom);
+ setOperationAction(ISD::MUL, MVT::v8i16, Custom);
+ setOperationAction(ISD::MUL, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
}
+ setSetCCResultType(MVT::i32);
+ setShiftAmountType(MVT::i32);
setSetCCResultContents(ZeroOrOneSetCCResult);
- setStackPointerRegisterToSaveRestore(PPC::R1);
+
+ if (TM.getSubtarget<PPCSubtarget>().isPPC64())
+ setStackPointerRegisterToSaveRestore(PPC::X1);
+ else
+ setStackPointerRegisterToSaveRestore(PPC::R1);
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::STORE);
+ setTargetDAGCombine(ISD::BR_CC);
+ setTargetDAGCombine(ISD::BSWAP);
computeRegisterProperties();
}
case PPCISD::VPERM: return "PPCISD::VPERM";
case PPCISD::Hi: return "PPCISD::Hi";
case PPCISD::Lo: return "PPCISD::Lo";
+ case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
case PPCISD::SRL: return "PPCISD::SRL";
case PPCISD::SRA: return "PPCISD::SRA";
case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
case PPCISD::STD_32: return "PPCISD::STD_32";
case PPCISD::CALL: return "PPCISD::CALL";
+ case PPCISD::MTCTR: return "PPCISD::MTCTR";
+ case PPCISD::BCTRL: return "PPCISD::BCTRL";
case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
case PPCISD::MFCR: return "PPCISD::MFCR";
case PPCISD::VCMP: return "PPCISD::VCMP";
case PPCISD::VCMPo: return "PPCISD::VCMPo";
+ case PPCISD::LBRX: return "PPCISD::LBRX";
+ case PPCISD::STBRX: return "PPCISD::STBRX";
+ case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
}
}
static bool isFloatingPointZero(SDOperand Op) {
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
- else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
+ else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) {
// Maybe this has already been legalized into the constant pool?
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
}
return false;
return SDOperand();
}
+//===----------------------------------------------------------------------===//
+// Addressing Mode Selection
+//===----------------------------------------------------------------------===//
+
+/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
+/// or 64-bit immediate, and if the value can be accurately represented as a
+/// sign extension from a 16-bit value. If so, this returns true and the
+/// immediate.
+static bool isIntS16Immediate(SDNode *N, short &Imm) {
+ if (N->getOpcode() != ISD::Constant)
+ return false;
+
+ Imm = (short)cast<ConstantSDNode>(N)->getValue();
+ if (N->getValueType(0) == MVT::i32)
+ return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue();
+ else
+ return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue();
+}
+static bool isIntS16Immediate(SDOperand Op, short &Imm) {
+ return isIntS16Immediate(Op.Val, Imm);
+}
+
+
+/// SelectAddressRegReg - Given the specified addressed, check to see if it
+/// can be represented as an indexed [r+r] operation. Returns false if it
+/// can be more efficiently represented with [r+imm].
+bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base,
+ SDOperand &Index,
+ SelectionDAG &DAG) {
+ short imm = 0;
+ if (N.getOpcode() == ISD::ADD) {
+ if (isIntS16Immediate(N.getOperand(1), imm))
+ return false; // r+i
+ if (N.getOperand(1).getOpcode() == PPCISD::Lo)
+ return false; // r+i
+
+ Base = N.getOperand(0);
+ Index = N.getOperand(1);
+ return true;
+ } else if (N.getOpcode() == ISD::OR) {
+ if (isIntS16Immediate(N.getOperand(1), imm))
+ return false; // r+i can fold it if we can.
+
+ // If this is an or of disjoint bitfields, we can codegen this as an add
+ // (for better address arithmetic) if the LHS and RHS of the OR are provably
+ // disjoint.
+ uint64_t LHSKnownZero, LHSKnownOne;
+ uint64_t RHSKnownZero, RHSKnownOne;
+ ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
+
+ if (LHSKnownZero) {
+ ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne);
+ // If all of the bits are known zero on the LHS or RHS, the add won't
+ // carry.
+ if ((LHSKnownZero | RHSKnownZero) == ~0U) {
+ Base = N.getOperand(0);
+ Index = N.getOperand(1);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/// Returns true if the address N can be represented by a base register plus
+/// a signed 16-bit displacement [r+imm], and if it is not better
+/// represented as reg+reg.
+bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp,
+ SDOperand &Base, SelectionDAG &DAG){
+ // If this can be more profitably realized as r+r, fail.
+ if (SelectAddressRegReg(N, Disp, Base, DAG))
+ return false;
+
+ if (N.getOpcode() == ISD::ADD) {
+ short imm = 0;
+ if (isIntS16Immediate(N.getOperand(1), imm)) {
+ Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
+ Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
+ } else {
+ Base = N.getOperand(0);
+ }
+ return true; // [r+i]
+ } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
+ // Match LOAD (ADD (X, Lo(G))).
+ assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
+ && "Cannot handle constant offsets yet!");
+ Disp = N.getOperand(1).getOperand(0); // The global address.
+ assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
+ Disp.getOpcode() == ISD::TargetConstantPool ||
+ Disp.getOpcode() == ISD::TargetJumpTable);
+ Base = N.getOperand(0);
+ return true; // [&g+r]
+ }
+ } else if (N.getOpcode() == ISD::OR) {
+ short imm = 0;
+ if (isIntS16Immediate(N.getOperand(1), imm)) {
+ // If this is an or of disjoint bitfields, we can codegen this as an add
+ // (for better address arithmetic) if the LHS and RHS of the OR are
+ // provably disjoint.
+ uint64_t LHSKnownZero, LHSKnownOne;
+ ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
+ if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
+ // If all of the bits are known zero on the LHS or RHS, the add won't
+ // carry.
+ Base = N.getOperand(0);
+ Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
+ return true;
+ }
+ }
+ } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
+ // Loading from a constant address.
+
+ // If this address fits entirely in a 16-bit sext immediate field, codegen
+ // this as "d, 0"
+ short Imm;
+ if (isIntS16Immediate(CN, Imm)) {
+ Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
+ Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
+ return true;
+ }
+
+ // FIXME: Handle small sext constant offsets in PPC64 mode also!
+ if (CN->getValueType(0) == MVT::i32) {
+ int Addr = (int)CN->getValue();
+
+ // Otherwise, break this down into an LIS + disp.
+ Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
+ Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32);
+ return true;
+ }
+ }
+
+ Disp = DAG.getTargetConstant(0, getPointerTy());
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
+ Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
+ else
+ Base = N;
+ return true; // [r+0]
+}
+
+/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
+/// represented as an indexed [r+r] operation.
+bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base,
+ SDOperand &Index,
+ SelectionDAG &DAG) {
+ // Check to see if we can easily represent this as an [r+r] address. This
+ // will fail if it thinks that the address is more profitably represented as
+ // reg+imm, e.g. where imm = 0.
+ if (SelectAddressRegReg(N, Base, Index, DAG))
+ return true;
+
+ // If the operand is an addition, always emit this as [r+r], since this is
+ // better (for code size, and execution, as the memop does the add for free)
+ // than emitting an explicit add.
+ if (N.getOpcode() == ISD::ADD) {
+ Base = N.getOperand(0);
+ Index = N.getOperand(1);
+ return true;
+ }
+
+ // Otherwise, do it the hard way, using R0 as the base register.
+ Base = DAG.getRegister(PPC::R0, N.getValueType());
+ Index = N;
+ return true;
+}
+
+/// SelectAddressRegImmShift - Returns true if the address N can be
+/// represented by a base register plus a signed 14-bit displacement
+/// [r+imm*4]. Suitable for use by STD and friends.
+bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp,
+ SDOperand &Base,
+ SelectionDAG &DAG) {
+ // If this can be more profitably realized as r+r, fail.
+ if (SelectAddressRegReg(N, Disp, Base, DAG))
+ return false;
+
+ if (N.getOpcode() == ISD::ADD) {
+ short imm = 0;
+ if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
+ Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
+ Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
+ } else {
+ Base = N.getOperand(0);
+ }
+ return true; // [r+i]
+ } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
+ // Match LOAD (ADD (X, Lo(G))).
+ assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
+ && "Cannot handle constant offsets yet!");
+ Disp = N.getOperand(1).getOperand(0); // The global address.
+ assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
+ Disp.getOpcode() == ISD::TargetConstantPool ||
+ Disp.getOpcode() == ISD::TargetJumpTable);
+ Base = N.getOperand(0);
+ return true; // [&g+r]
+ }
+ } else if (N.getOpcode() == ISD::OR) {
+ short imm = 0;
+ if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
+ // If this is an or of disjoint bitfields, we can codegen this as an add
+ // (for better address arithmetic) if the LHS and RHS of the OR are
+ // provably disjoint.
+ uint64_t LHSKnownZero, LHSKnownOne;
+ ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
+ if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
+ // If all of the bits are known zero on the LHS or RHS, the add won't
+ // carry.
+ Base = N.getOperand(0);
+ Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
+ return true;
+ }
+ }
+ } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
+ // Loading from a constant address.
+
+ // If this address fits entirely in a 14-bit sext immediate field, codegen
+ // this as "d, 0"
+ short Imm;
+ if (isIntS16Immediate(CN, Imm)) {
+ Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
+ Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
+ return true;
+ }
+
+ // FIXME: Handle small sext constant offsets in PPC64 mode also!
+ if (CN->getValueType(0) == MVT::i32) {
+ int Addr = (int)CN->getValue();
+
+ // Otherwise, break this down into an LIS + disp.
+ Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
+ Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32);
+ return true;
+ }
+ }
+
+ Disp = DAG.getTargetConstant(0, getPointerTy());
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
+ Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
+ else
+ Base = N;
+ return true; // [r+0]
+}
+
+
+/// getPreIndexedAddressParts - returns true by value, base pointer and
+/// offset pointer and addressing mode by reference if the node's address
+/// can be legally represented as pre-indexed load / store address.
+bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
+ SDOperand &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) {
+ // Disabled by default for now.
+ if (!EnablePPCPreinc) return false;
+
+ SDOperand Ptr;
+ MVT::ValueType VT;
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+ Ptr = LD->getBasePtr();
+ VT = LD->getLoadedVT();
+
+ } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
+ ST = ST;
+ Ptr = ST->getBasePtr();
+ VT = ST->getStoredVT();
+ } else
+ return false;
+
+ // PowerPC doesn't have preinc load/store instructions for vectors.
+ if (MVT::isVector(VT))
+ return false;
+
+ // TODO: Check reg+reg first.
+
+ // LDU/STU use reg+imm*4, others use reg+imm.
+ if (VT != MVT::i64) {
+ // reg + imm
+ if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
+ return false;
+ } else {
+ // reg + imm * 4.
+ if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
+ return false;
+ }
+
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+ // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
+ // sext i32 to i64 when addr mode is r+i.
+ if (LD->getValueType(0) == MVT::i64 && LD->getLoadedVT() == MVT::i32 &&
+ LD->getExtensionType() == ISD::SEXTLOAD &&
+ isa<ConstantSDNode>(Offset))
+ return false;
+ }
+
+ AM = ISD::PRE_INC;
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// LowerOperation implementation
//===----------------------------------------------------------------------===//
static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
+ MVT::ValueType PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
- Constant *C = CP->get();
- SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment());
- SDOperand Zero = DAG.getConstant(0, MVT::i32);
+ Constant *C = CP->getConstVal();
+ SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
+ SDOperand Zero = DAG.getConstant(0, PtrVT);
const TargetMachine &TM = DAG.getTarget();
+ SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero);
+ SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero);
+
+ // If this is a non-darwin platform, we don't support non-static relo models
+ // yet.
+ if (TM.getRelocationModel() == Reloc::Static ||
+ !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
+ // Generate non-pic code that has direct accesses to the constant pool.
+ // The address of the global is just (hi(&g)+lo(&g)).
+ return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ }
+
+ if (TM.getRelocationModel() == Reloc::PIC_) {
+ // With PIC, the first instruction is actually "GR+hi(&G)".
+ Hi = DAG.getNode(ISD::ADD, PtrVT,
+ DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
+ }
+
+ Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ return Lo;
+}
+
+static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
+ MVT::ValueType PtrVT = Op.getValueType();
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+ SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
+ SDOperand Zero = DAG.getConstant(0, PtrVT);
+
+ const TargetMachine &TM = DAG.getTarget();
+
+ SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero);
+ SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero);
+
// If this is a non-darwin platform, we don't support non-static relo models
// yet.
if (TM.getRelocationModel() == Reloc::Static ||
!TM.getSubtarget<PPCSubtarget>().isDarwin()) {
// Generate non-pic code that has direct accesses to the constant pool.
// The address of the global is just (hi(&g)+lo(&g)).
- SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
- SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
- return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
+ return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
}
- SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
- if (TM.getRelocationModel() == Reloc::PIC) {
+ if (TM.getRelocationModel() == Reloc::PIC_) {
// With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, MVT::i32,
- DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
+ Hi = DAG.getNode(ISD::ADD, PtrVT,
+ DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
}
- SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
- Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
+ Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
return Lo;
}
static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
+ MVT::ValueType PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
- SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset());
- SDOperand Zero = DAG.getConstant(0, MVT::i32);
+ SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
+ SDOperand Zero = DAG.getConstant(0, PtrVT);
const TargetMachine &TM = DAG.getTarget();
+ SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero);
+ SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero);
+
// If this is a non-darwin platform, we don't support non-static relo models
// yet.
if (TM.getRelocationModel() == Reloc::Static ||
!TM.getSubtarget<PPCSubtarget>().isDarwin()) {
// Generate non-pic code that has direct accesses to globals.
// The address of the global is just (hi(&g)+lo(&g)).
- SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
- SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
- return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
+ return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
}
- SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
- if (TM.getRelocationModel() == Reloc::PIC) {
+ if (TM.getRelocationModel() == Reloc::PIC_) {
// With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, MVT::i32,
- DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
+ Hi = DAG.getNode(ISD::ADD, PtrVT,
+ DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
}
- SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
- Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
+ Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
- if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
- (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
+ if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV))
return Lo;
// If the global is weak or external, we have to go through the lazy
// resolution stub.
- return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0));
+ return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0);
}
static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
}
// If we have an integer seteq/setne, turn it into a compare against zero
- // by subtracting the rhs from the lhs, which is faster than setting a
- // condition register, reading it back out, and masking the correct bit.
+ // by xor'ing the rhs with the lhs, which is faster than setting a
+ // condition register, reading it back out, and masking the correct bit. The
+ // normal approach here uses sub to do this instead of xor. Using xor exposes
+ // the result to other bit-twiddling opportunities.
MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
MVT::ValueType VT = Op.getValueType();
- SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0),
+ SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
Op.getOperand(1));
return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
}
unsigned VarArgsFrameIndex) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
- return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
- Op.getOperand(1), Op.getOperand(2));
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+ SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
+ return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(),
+ SV->getOffset());
}
-static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
- SDOperand Copy;
- switch(Op.getNumOperands()) {
- default:
- assert(0 && "Do not know how to return this many arguments!");
- abort();
- case 1:
- return SDOperand(); // ret void is legal
- case 2: {
- MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
- unsigned ArgReg;
- if (MVT::isVector(ArgVT))
- ArgReg = PPC::V2;
- else if (MVT::isInteger(ArgVT))
- ArgReg = PPC::R3;
- else {
- assert(MVT::isFloatingPoint(ArgVT));
- ArgReg = PPC::F1;
+static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG,
+ int &VarArgsFrameIndex) {
+ // TODO: add description of PPC stack frame format, or at least some docs.
+ //
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ SSARegMap *RegMap = MF.getSSARegMap();
+ SmallVector<SDOperand, 8> ArgValues;
+ SDOperand Root = Op.getOperand(0);
+
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ bool isPPC64 = PtrVT == MVT::i64;
+ unsigned PtrByteSize = isPPC64 ? 8 : 4;
+
+ unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64);
+
+ static const unsigned GPR_32[] = { // 32-bit registers.
+ PPC::R3, PPC::R4, PPC::R5, PPC::R6,
+ PPC::R7, PPC::R8, PPC::R9, PPC::R10,
+ };
+ static const unsigned GPR_64[] = { // 64-bit registers.
+ PPC::X3, PPC::X4, PPC::X5, PPC::X6,
+ PPC::X7, PPC::X8, PPC::X9, PPC::X10,
+ };
+ static const unsigned FPR[] = {
+ PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
+ PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
+ };
+ static const unsigned VR[] = {
+ PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
+ PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
+ };
+
+ const unsigned Num_GPR_Regs = sizeof(GPR_32)/sizeof(GPR_32[0]);
+ const unsigned Num_FPR_Regs = sizeof(FPR)/sizeof(FPR[0]);
+ const unsigned Num_VR_Regs = sizeof( VR)/sizeof( VR[0]);
+
+ unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+
+ const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
+
+ // Add DAG nodes to load the arguments or copy them out of registers. On
+ // entry to a function on PPC, the arguments start after the linkage area,
+ // although the first ones are often in registers.
+ for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
+ SDOperand ArgVal;
+ bool needsLoad = false;
+ MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
+ unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
+ unsigned ArgSize = ObjSize;
+
+ unsigned CurArgOffset = ArgOffset;
+ switch (ObjectVT) {
+ default: assert(0 && "Unhandled argument type!");
+ case MVT::i32:
+ // All int arguments reserve stack space.
+ ArgOffset += PtrByteSize;
+
+ if (GPR_idx != Num_GPR_Regs) {
+ unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
+ MF.addLiveIn(GPR[GPR_idx], VReg);
+ ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
+ ++GPR_idx;
+ } else {
+ needsLoad = true;
+ ArgSize = PtrByteSize;
+ }
+ break;
+ case MVT::i64: // PPC64
+ // All int arguments reserve stack space.
+ ArgOffset += 8;
+
+ if (GPR_idx != Num_GPR_Regs) {
+ unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
+ MF.addLiveIn(GPR[GPR_idx], VReg);
+ ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
+ ++GPR_idx;
+ } else {
+ needsLoad = true;
+ }
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ // All FP arguments reserve stack space.
+ ArgOffset += isPPC64 ? 8 : ObjSize;
+
+ // Every 4 bytes of argument space consumes one of the GPRs available for
+ // argument passing.
+ if (GPR_idx != Num_GPR_Regs) {
+ ++GPR_idx;
+ if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
+ ++GPR_idx;
+ }
+ if (FPR_idx != Num_FPR_Regs) {
+ unsigned VReg;
+ if (ObjectVT == MVT::f32)
+ VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
+ else
+ VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
+ MF.addLiveIn(FPR[FPR_idx], VReg);
+ ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
+ ++FPR_idx;
+ } else {
+ needsLoad = true;
+ }
+ break;
+ case MVT::v4f32:
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ // Note that vector arguments in registers don't reserve stack space.
+ if (VR_idx != Num_VR_Regs) {
+ unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass);
+ MF.addLiveIn(VR[VR_idx], VReg);
+ ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
+ ++VR_idx;
+ } else {
+ // This should be simple, but requires getting 16-byte aligned stack
+ // values.
+ assert(0 && "Loading VR argument not implemented yet!");
+ needsLoad = true;
+ }
+ break;
}
- Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
- SDOperand());
+ // We need to load the argument to a virtual register if we determined above
+ // that we ran out of physical registers of the appropriate type
+ if (needsLoad) {
+ // If the argument is actually used, emit a load from the right stack
+ // slot.
+ if (!Op.Val->hasNUsesOfValue(0, ArgNo)) {
+ int FI = MFI->CreateFixedObject(ObjSize,
+ CurArgOffset + (ArgSize - ObjSize));
+ SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
+ ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
+ } else {
+ // Don't emit a dead load.
+ ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT);
+ }
+ }
- // If we haven't noted the R3/F1 are live out, do so now.
- if (DAG.getMachineFunction().liveout_empty())
- DAG.getMachineFunction().addLiveOut(ArgReg);
- break;
+ ArgValues.push_back(ArgVal);
}
- case 3:
- Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2),
- SDOperand());
- Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
- // If we haven't noted the R3+R4 are live out, do so now.
- if (DAG.getMachineFunction().liveout_empty()) {
- DAG.getMachineFunction().addLiveOut(PPC::R3);
- DAG.getMachineFunction().addLiveOut(PPC::R4);
+
+ // If the function takes variable number of arguments, make a frame index for
+ // the start of the first vararg value... for expansion of llvm.va_start.
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
+ if (isVarArg) {
+ VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
+ ArgOffset);
+ SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+ // If this function is vararg, store any remaining integer argument regs
+ // to their spots on the stack so that they may be loaded by deferencing the
+ // result of va_next.
+ SmallVector<SDOperand, 8> MemOps;
+ for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
+ unsigned VReg;
+ if (isPPC64)
+ VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
+ else
+ VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
+
+ MF.addLiveIn(GPR[GPR_idx], VReg);
+ SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
+ SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
+ MemOps.push_back(Store);
+ // Increment the address by four for the next argument to store
+ SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
+ FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
}
- break;
+ if (!MemOps.empty())
+ Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
}
- return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
+
+ ArgValues.push_back(Root);
+
+ // Return the new list of results.
+ std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
+ Op.Val->value_end());
+ return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
}
-/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
-/// possible.
-static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
- // Not FP? Not a fsel.
- if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
- !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
- return SDOperand();
+/// isCallCompatibleAddress - Return the immediate to use if the specified
+/// 32-bit value is representable in the immediate field of a BxA instruction.
+static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
+ if (!C) return 0;
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
+ int Addr = C->getValue();
+ if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
+ (Addr << 6 >> 6) != Addr)
+ return 0; // Top 6 bits have to be sext of immediate.
- // Cannot handle SETEQ/SETNE.
- if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
+ return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val;
+}
+
+static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) {
+ SDOperand Chain = Op.getOperand(0);
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
+ SDOperand Callee = Op.getOperand(4);
+ unsigned NumOps = (Op.getNumOperands() - 5) / 2;
+
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ bool isPPC64 = PtrVT == MVT::i64;
+ unsigned PtrByteSize = isPPC64 ? 8 : 4;
- MVT::ValueType ResVT = Op.getValueType();
- MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
- SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
- SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
+ // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
+ // SelectExpr to use to put the arguments in the appropriate registers.
+ std::vector<SDOperand> args_to_use;
- // If the RHS of the comparison is a 0.0, we don't need to do the
- // subtraction at all.
- if (isFloatingPointZero(RHS))
- switch (CC) {
- default: break; // SETUO etc aren't handled by fsel.
- case ISD::SETULT:
- case ISD::SETLT:
- std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
- case ISD::SETUGE:
- case ISD::SETGE:
- if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
+ // Count how many bytes are to be pushed on the stack, including the linkage
+ // area, and parameter passing area. We start with 24/48 bytes, which is
+ // prereserved space for [SP][CR][LR][3 x unused].
+ unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64);
+
+ // Add up all the space actually used.
+ for (unsigned i = 0; i != NumOps; ++i) {
+ unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8;
+ ArgSize = std::max(ArgSize, PtrByteSize);
+ NumBytes += ArgSize;
+ }
+
+ // The prolog code of the callee may store up to 8 GPR argument registers to
+ // the stack, allowing va_start to index over them in memory if its varargs.
+ // Because we cannot tell if this is needed on the caller side, we have to
+ // conservatively assume that it is needed. As such, make sure we have at
+ // least enough stack space for the caller to store the 8 GPRs.
+ NumBytes = std::max(NumBytes, PPCFrameInfo::getMinCallFrameSize(isPPC64));
+
+ // Adjust the stack pointer for the new arguments...
+ // These operations are automatically eliminated by the prolog/epilog pass
+ Chain = DAG.getCALLSEQ_START(Chain,
+ DAG.getConstant(NumBytes, PtrVT));
+
+ // Set up a copy of the stack pointer for use loading and storing any
+ // arguments that may not fit in the registers available for argument
+ // passing.
+ SDOperand StackPtr;
+ if (isPPC64)
+ StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
+ else
+ StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
+
+ // Figure out which arguments are going to go in registers, and which in
+ // memory. Also, if this is a vararg function, floating point operations
+ // must be stored to our stack, and loaded into integer regs as well, if
+ // any integer regs are available for argument passing.
+ unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64);
+ unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+
+ static const unsigned GPR_32[] = { // 32-bit registers.
+ PPC::R3, PPC::R4, PPC::R5, PPC::R6,
+ PPC::R7, PPC::R8, PPC::R9, PPC::R10,
+ };
+ static const unsigned GPR_64[] = { // 64-bit registers.
+ PPC::X3, PPC::X4, PPC::X5, PPC::X6,
+ PPC::X7, PPC::X8, PPC::X9, PPC::X10,
+ };
+ static const unsigned FPR[] = {
+ PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
+ PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
+ };
+ static const unsigned VR[] = {
+ PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
+ PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
+ };
+ const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]);
+ const unsigned NumFPRs = sizeof(FPR)/sizeof(FPR[0]);
+ const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]);
+
+ const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
+
+ std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
+ SmallVector<SDOperand, 8> MemOpChains;
+ for (unsigned i = 0; i != NumOps; ++i) {
+ SDOperand Arg = Op.getOperand(5+2*i);
+
+ // PtrOff will be used to store the current argument to the stack if a
+ // register cannot be found for it.
+ SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
+ PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
+
+ // On PPC64, promote integers to 64-bit values.
+ if (isPPC64 && Arg.getValueType() == MVT::i32) {
+ unsigned ExtOp = ISD::ZERO_EXTEND;
+ if (cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue())
+ ExtOp = ISD::SIGN_EXTEND;
+ Arg = DAG.getNode(ExtOp, MVT::i64, Arg);
+ }
+
+ switch (Arg.getValueType()) {
+ default: assert(0 && "Unexpected ValueType for argument!");
+ case MVT::i32:
+ case MVT::i64:
+ if (GPR_idx != NumGPRs) {
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
+ } else {
+ MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+ }
+ ArgOffset += PtrByteSize;
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ if (isVarArg && isPPC64) {
+ // Float varargs need to be promoted to double.
+ if (Arg.getValueType() == MVT::f32)
+ Arg = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Arg);
+ }
+
+ if (FPR_idx != NumFPRs) {
+ RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
+
+ if (isVarArg) {
+ SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
+ MemOpChains.push_back(Store);
+
+ // Float varargs are always shadowed in available integer registers
+ if (GPR_idx != NumGPRs) {
+ SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ }
+ if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
+ SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType());
+ PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
+ SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ }
+ } else {
+ // If we have any FPRs remaining, we may also have GPRs remaining.
+ // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
+ // GPRs.
+ if (GPR_idx != NumGPRs)
+ ++GPR_idx;
+ if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64)
+ ++GPR_idx;
+ }
+ } else {
+ MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+ }
+ if (isPPC64)
+ ArgOffset += 8;
+ else
+ ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
+ break;
+ case MVT::v4f32:
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ assert(!isVarArg && "Don't support passing vectors to varargs yet!");
+ assert(VR_idx != NumVRs &&
+ "Don't support passing more than 12 vector args yet!");
+ RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
+ break;
+ }
+ }
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
+
+ // Build a sequence of copy-to-reg nodes chained together with token chain
+ // and flag operands which copy the outgoing args into the appropriate regs.
+ SDOperand InFlag;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
+ InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ std::vector<MVT::ValueType> NodeTys;
+ NodeTys.push_back(MVT::Other); // Returns a chain
+ NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
+
+ SmallVector<SDOperand, 8> Ops;
+ unsigned CallOpc = PPCISD::CALL;
+
+ // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
+ // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
+ // node so that legalize doesn't hack it.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
+ else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
+ else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
+ // If this is an absolute destination address, use the munged value.
+ Callee = SDOperand(Dest, 0);
+ else {
+ // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
+ // to do the call, we can't use PPCISD::CALL.
+ SDOperand MTCTROps[] = {Chain, Callee, InFlag};
+ Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0));
+ InFlag = Chain.getValue(1);
+
+ // Copy the callee address into R12 on darwin.
+ Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag);
+ InFlag = Chain.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Other);
+ NodeTys.push_back(MVT::Flag);
+ Ops.push_back(Chain);
+ CallOpc = PPCISD::BCTRL;
+ Callee.Val = 0;
+ }
+
+ // If this is a direct call, pass the chain and the callee.
+ if (Callee.Val) {
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ }
+
+ // Add argument registers to the end of the list so that they are known live
+ // into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+
+ if (InFlag.Val)
+ Ops.push_back(InFlag);
+ Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+
+ SDOperand ResultVals[3];
+ unsigned NumResults = 0;
+ NodeTys.clear();
+
+ // If the call has results, copy the values out of the ret val registers.
+ switch (Op.Val->getValueType(0)) {
+ default: assert(0 && "Unexpected ret value!");
+ case MVT::Other: break;
+ case MVT::i32:
+ if (Op.Val->getValueType(1) == MVT::i32) {
+ Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1);
+ ResultVals[0] = Chain.getValue(0);
+ Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32,
+ Chain.getValue(2)).getValue(1);
+ ResultVals[1] = Chain.getValue(0);
+ NumResults = 2;
+ NodeTys.push_back(MVT::i32);
+ } else {
+ Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1);
+ ResultVals[0] = Chain.getValue(0);
+ NumResults = 1;
+ }
+ NodeTys.push_back(MVT::i32);
+ break;
+ case MVT::i64:
+ Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1);
+ ResultVals[0] = Chain.getValue(0);
+ NumResults = 1;
+ NodeTys.push_back(MVT::i64);
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0),
+ InFlag).getValue(1);
+ ResultVals[0] = Chain.getValue(0);
+ NumResults = 1;
+ NodeTys.push_back(Op.Val->getValueType(0));
+ break;
+ case MVT::v4f32:
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0),
+ InFlag).getValue(1);
+ ResultVals[0] = Chain.getValue(0);
+ NumResults = 1;
+ NodeTys.push_back(Op.Val->getValueType(0));
+ break;
+ }
+
+ Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
+ DAG.getConstant(NumBytes, PtrVT));
+ NodeTys.push_back(MVT::Other);
+
+ // If the function returns void, just return the chain.
+ if (NumResults == 0)
+ return Chain;
+
+ // Otherwise, merge everything together with a MERGE_VALUES node.
+ ResultVals[NumResults++] = Chain;
+ SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
+ ResultVals, NumResults);
+ return Res.getValue(Op.ResNo);
+}
+
+static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
+ SDOperand Copy;
+ switch(Op.getNumOperands()) {
+ default:
+ assert(0 && "Do not know how to return this many arguments!");
+ abort();
+ case 1:
+ return SDOperand(); // ret void is legal
+ case 3: {
+ MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
+ unsigned ArgReg;
+ if (ArgVT == MVT::i32) {
+ ArgReg = PPC::R3;
+ } else if (ArgVT == MVT::i64) {
+ ArgReg = PPC::X3;
+ } else if (MVT::isVector(ArgVT)) {
+ ArgReg = PPC::V2;
+ } else {
+ assert(MVT::isFloatingPoint(ArgVT));
+ ArgReg = PPC::F1;
+ }
+
+ Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
+ SDOperand());
+
+ // If we haven't noted the R3/F1 are live out, do so now.
+ if (DAG.getMachineFunction().liveout_empty())
+ DAG.getMachineFunction().addLiveOut(ArgReg);
+ break;
+ }
+ case 5:
+ Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(3),
+ SDOperand());
+ Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
+ // If we haven't noted the R3+R4 are live out, do so now.
+ if (DAG.getMachineFunction().liveout_empty()) {
+ DAG.getMachineFunction().addLiveOut(PPC::R3);
+ DAG.getMachineFunction().addLiveOut(PPC::R4);
+ }
+ break;
+ }
+ return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
+}
+
+static SDOperand LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) {
+ // When we pop the dynamic allocation we need to restore the SP link.
+
+ // Get the corect type for pointers.
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+
+ // Construct the stack pointer operand.
+ bool IsPPC64 = Subtarget.isPPC64();
+ unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1;
+ SDOperand StackPtr = DAG.getRegister(SP, PtrVT);
+
+ // Get the operands for the STACKRESTORE.
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand SaveSP = Op.getOperand(1);
+
+ // Load the old link SP.
+ SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0);
+
+ // Restore the stack pointer.
+ Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP);
+
+ // Store the old link SP.
+ return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0);
+}
+
+static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool IsPPC64 = Subtarget.isPPC64();
+
+ // Get current frame pointer save index. The users of this index will be
+ // primarily DYNALLOC instructions.
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ int FPSI = FI->getFramePointerSaveIndex();
+
+ // If the frame pointer save index hasn't been defined yet.
+ if (!FPSI) {
+ // Find out what the fix offset of the frame pointer save area.
+ int Offset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64);
+ // Allocate the frame index for frame pointer save area.
+ FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, Offset);
+ // Save the result.
+ FI->setFramePointerSaveIndex(FPSI);
+ }
+
+ // Get the inputs.
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand Size = Op.getOperand(1);
+
+ // Get the corect type for pointers.
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ // Negate the size.
+ SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT,
+ DAG.getConstant(0, PtrVT), Size);
+ // Construct a node for the frame pointer save index.
+ SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT);
+ // Build a DYNALLOC node.
+ SDOperand Ops[3] = { Chain, NegSize, FPSIdx };
+ SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
+ return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
+}
+
+
+/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
+/// possible.
+static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
+ // Not FP? Not a fsel.
+ if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
+ !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
+ return SDOperand();
+
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
+
+ // Cannot handle SETEQ/SETNE.
+ if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
+
+ MVT::ValueType ResVT = Op.getValueType();
+ MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
+ SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
+ SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
+
+ // If the RHS of the comparison is a 0.0, we don't need to do the
+ // subtraction at all.
+ if (isFloatingPointZero(RHS))
+ switch (CC) {
+ default: break; // SETUO etc aren't handled by fsel.
+ case ISD::SETULT:
+ case ISD::SETOLT:
+ case ISD::SETLT:
+ std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
+ case ISD::SETUGE:
+ case ISD::SETOGE:
+ case ISD::SETGE:
+ if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
case ISD::SETUGT:
+ case ISD::SETOGT:
case ISD::SETGT:
std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
case ISD::SETULE:
+ case ISD::SETOLE:
case ISD::SETLE:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
switch (CC) {
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETULT:
+ case ISD::SETOLT:
case ISD::SETLT:
Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
case ISD::SETUGE:
+ case ISD::SETOGE:
case ISD::SETGE:
Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
case ISD::SETUGT:
+ case ISD::SETOGT:
case ISD::SETGT:
Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
case ISD::SETULE:
+ case ISD::SETOLE:
case ISD::SETLE:
Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
// then lfd it and fcfid it.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(8, 8);
- SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
Op.getOperand(0));
DAG.getEntryNode(), Ext64, FIdx,
DAG.getSrcValue(NULL));
// Load the value as a double.
- SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL));
+ SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0);
// FCFID it and return it.
SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
return FP;
}
-static SDOperand LowerSHL(SDOperand Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i64 &&
+static SDOperand LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) {
+ assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
- // The generic code does a fine job expanding shift by a constant.
- if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
- // Otherwise, expand into a bunch of logical ops. Note that these ops
+ // Expand into a bunch of logical ops. Note that these ops
// depend on the PPC behavior for oversized shift amounts.
- SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(1, MVT::i32));
- SDOperand Amt = Op.getOperand(1);
+ SDOperand Lo = Op.getOperand(0);
+ SDOperand Hi = Op.getOperand(1);
+ SDOperand Amt = Op.getOperand(2);
SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
DAG.getConstant(32, MVT::i32), Amt);
SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
- return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
+ SDOperand OutOps[] = { OutLo, OutHi };
+ return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
+ OutOps, 2);
}
-static SDOperand LowerSRL(SDOperand Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i64 &&
- Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
- // The generic code does a fine job expanding shift by a constant.
- if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
+static SDOperand LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) {
+ assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
+ Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRL!");
// Otherwise, expand into a bunch of logical ops. Note that these ops
// depend on the PPC behavior for oversized shift amounts.
- SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(1, MVT::i32));
- SDOperand Amt = Op.getOperand(1);
+ SDOperand Lo = Op.getOperand(0);
+ SDOperand Hi = Op.getOperand(1);
+ SDOperand Amt = Op.getOperand(2);
SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
DAG.getConstant(32, MVT::i32), Amt);
SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
- return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
+ SDOperand OutOps[] = { OutLo, OutHi };
+ return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
+ OutOps, 2);
}
-static SDOperand LowerSRA(SDOperand Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i64 &&
+static SDOperand LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) {
+ assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
- // The generic code does a fine job expanding shift by a constant.
- if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
// Otherwise, expand into a bunch of logical ops, followed by a select_cc.
- SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
- DAG.getConstant(1, MVT::i32));
- SDOperand Amt = Op.getOperand(1);
+ SDOperand Lo = Op.getOperand(0);
+ SDOperand Hi = Op.getOperand(1);
+ SDOperand Amt = Op.getOperand(2);
SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
DAG.getConstant(32, MVT::i32), Amt);
SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
Tmp4, Tmp6, ISD::SETLE);
- return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
+ SDOperand OutOps[] = { OutLo, OutHi };
+ return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
+ OutOps, 2);
}
//===----------------------------------------------------------------------===//
return true;
}
+/// BuildSplatI - Build a canonical splati of Val with an element size of
+/// SplatSize. Cast the result to VT.
+static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
+ SelectionDAG &DAG) {
+ assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
+
+ static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
+ MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
+ };
+
+ MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
+
+ // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
+ if (Val == -1)
+ SplatSize = 1;
+
+ MVT::ValueType CanonicalVT = VTys[SplatSize-1];
+
+ // Build a canonical splat for this value.
+ SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT));
+ SmallVector<SDOperand, 8> Ops;
+ Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt);
+ SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
+ &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res);
+}
+
+/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
+/// specified intrinsic ID.
+static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
+ SelectionDAG &DAG,
+ MVT::ValueType DestVT = MVT::Other) {
+ if (DestVT == MVT::Other) DestVT = LHS.getValueType();
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
+ DAG.getConstant(IID, MVT::i32), LHS, RHS);
+}
+
+/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
+/// specified intrinsic ID.
+static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
+ SDOperand Op2, SelectionDAG &DAG,
+ MVT::ValueType DestVT = MVT::Other) {
+ if (DestVT == MVT::Other) DestVT = Op0.getValueType();
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
+ DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
+}
+
+
+/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
+/// amount. The result has the specified value type.
+static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
+ MVT::ValueType VT, SelectionDAG &DAG) {
+ // Force LHS/RHS to be the right type.
+ LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
+ RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
+
+ SDOperand Ops[16];
+ for (unsigned i = 0; i != 16; ++i)
+ Ops[i] = DAG.getConstant(i+Amt, MVT::i32);
+ SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16));
+ return DAG.getNode(ISD::BIT_CONVERT, VT, T);
+}
+
// If this is a case we can't handle, return null and let the default
// expansion code take care of it. If we CAN select this case, and if it
// selects to a single instruction, return Op. Otherwise, if we can codegen
// If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
- if (SextVal >= -16 && SextVal <= 15) {
- const MVT::ValueType VTys[] = { // canonical VT to use for each size.
- MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
- };
- MVT::ValueType CanonicalVT = VTys[SplatSize-1];
-
- // If this is a non-canonical splat for this value,
- if (Op.getValueType() != CanonicalVT || HasAnyUndefs) {
- SDOperand Elt = DAG.getConstant(SplatBits,
- MVT::getVectorBaseType(CanonicalVT));
- std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt);
- SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops);
- Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
- }
- return Op;
- }
+ if (SextVal >= -16 && SextVal <= 15)
+ return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
+ // Two instruction sequences.
+
+ // If this value is in the range [-32,30] and is even, use:
+ // tmp = VSPLTI[bhw], result = add tmp, tmp
+ if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
+ Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG);
+ return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op);
+ }
+
// If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
- // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). These are important
+ // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
// for fneg/fabs.
- if (SplatSize == 4 &&
- SplatBits == 0x80000000 || SplatBits == (0x7FFFFFFF&~SplatUndef)) {
+ if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
// Make -1 and vspltisw -1:
- SDOperand OnesI = DAG.getConstant(~0U, MVT::i32);
- SDOperand OnesV = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- OnesI, OnesI, OnesI, OnesI);
+ SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
// Make the VSLW intrinsic, computing 0x8000_0000.
- SDOperand Res
- = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, MVT::v4i32,
- DAG.getConstant(Intrinsic::ppc_altivec_vslw, MVT::i32),
- OnesV, OnesV);
-
- // If this is 0x7FFF_FFFF, xor by OnesV to invert it.
- if (SplatBits == 0x80000000)
- Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
+ SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
+ OnesV, DAG);
+ // xor by OnesV to invert it.
+ Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
}
+
+ // Check to see if this is a wide variety of vsplti*, binop self cases.
+ unsigned SplatBitSize = SplatSize*8;
+ static const char SplatCsts[] = {
+ -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
+ -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
+ };
+
+ for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
+ // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
+ // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
+ int i = SplatCsts[idx];
+
+ // Figure out what shift amount will be used by altivec if shifted by i in
+ // this splat size.
+ unsigned TypeShiftAmt = i & (SplatBitSize-1);
+
+ // vsplti + shl self.
+ if (SextVal == (i << (int)TypeShiftAmt)) {
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ static const unsigned IIDs[] = { // Intrinsic to use for each size.
+ Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
+ Intrinsic::ppc_altivec_vslw
+ };
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ }
+
+ // vsplti + srl self.
+ if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ static const unsigned IIDs[] = { // Intrinsic to use for each size.
+ Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
+ Intrinsic::ppc_altivec_vsrw
+ };
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ }
+
+ // vsplti + sra self.
+ if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ static const unsigned IIDs[] = { // Intrinsic to use for each size.
+ Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
+ Intrinsic::ppc_altivec_vsraw
+ };
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ }
+
+ // vsplti + rol self.
+ if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
+ ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
+ SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ static const unsigned IIDs[] = { // Intrinsic to use for each size.
+ Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
+ Intrinsic::ppc_altivec_vrlw
+ };
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ }
+
+ // t = vsplti c, result = vsldoi t, t, 1
+ if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
+ SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
+ return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
+ }
+ // t = vsplti c, result = vsldoi t, t, 2
+ if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
+ SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
+ return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
+ }
+ // t = vsplti c, result = vsldoi t, t, 3
+ if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
+ SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
+ return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
+ }
+ }
+
+ // Three instruction sequences.
+
+ // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
+ if (SextVal >= 0 && SextVal <= 31) {
+ SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
+ SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
+ LHS = DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
+ }
+ // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
+ if (SextVal >= -31 && SextVal <= 0) {
+ SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
+ SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
+ LHS = DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
+ return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
+ }
}
return SDOperand();
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
enum {
- OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
+ OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
OP_VMRGHW,
OP_VMRGLW,
OP_VSPLTISW0,
OP_VSPLTISW3,
OP_VSLDOI4,
OP_VSLDOI8,
- OP_VSLDOI12,
+ OP_VSLDOI12
};
if (OpNum == OP_COPY) {
return RHS;
}
+ SDOperand OpLHS, OpRHS;
+ OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
+ OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
+
unsigned ShufIdxs[16];
switch (OpNum) {
default: assert(0 && "Unknown i32 permute!");
ShufIdxs[i] = (i&3)+12;
break;
case OP_VSLDOI4:
- for (unsigned i = 0; i != 16; ++i)
- ShufIdxs[i] = i+4;
- break;
+ return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
case OP_VSLDOI8:
- for (unsigned i = 0; i != 16; ++i)
- ShufIdxs[i] = i+8;
- break;
+ return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
case OP_VSLDOI12:
- for (unsigned i = 0; i != 16; ++i)
- ShufIdxs[i] = i+12;
- break;
+ return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
}
- std::vector<SDOperand> Ops;
+ SDOperand Ops[16];
for (unsigned i = 0; i != 16; ++i)
- Ops.push_back(DAG.getConstant(ShufIdxs[i], MVT::i32));
- SDOperand OpLHS, OpRHS;
- OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
- OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
+ Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32);
return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
}
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
- std::vector<SDOperand> ResultMask;
+ SmallVector<SDOperand, 16> ResultMask;
for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
unsigned SrcElt;
if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
MVT::i8));
}
- SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask);
+ SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
+ &ResultMask[0], ResultMask.size());
return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
}
-/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
-/// lower, do it, otherwise return null.
-static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
-
- // If this is a lowered altivec predicate compare, CompareOpc is set to the
- // opcode number of the comparison.
- int CompareOpc = -1;
- bool isDot = false;
- switch (IntNo) {
- default: return SDOperand(); // Don't custom lower most intrinsics.
- // Comparison predicates.
+/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
+/// altivec comparison. If it is, return true and fill in Opc/isDot with
+/// information about the intrinsic.
+static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc,
+ bool &isDot) {
+ unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
+ CompareOpc = -1;
+ isDot = false;
+ switch (IntrinsicID) {
+ default: return false;
+ // Comparison predicates.
case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
}
+ return true;
+}
+
+/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
+/// lower, do it, otherwise return null.
+static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
+ // If this is a lowered altivec predicate compare, CompareOpc is set to the
+ // opcode number of the comparison.
+ int CompareOpc;
+ bool isDot;
+ if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
+ return SDOperand(); // Don't custom lower most intrinsics.
- assert(CompareOpc>0 && "We only lower altivec predicate compares so far!");
-
- // If this is a non-dot comparison, make the VCMP node.
+ // If this is a non-dot comparison, make the VCMP node and we are done.
if (!isDot) {
SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
Op.getOperand(1), Op.getOperand(2),
}
// Create the PPCISD altivec 'dot' comparison node.
- std::vector<SDOperand> Ops;
+ SDOperand Ops[] = {
+ Op.getOperand(2), // LHS
+ Op.getOperand(3), // RHS
+ DAG.getConstant(CompareOpc, MVT::i32)
+ };
std::vector<MVT::ValueType> VTs;
- Ops.push_back(Op.getOperand(2)); // LHS
- Ops.push_back(Op.getOperand(3)); // RHS
- Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
VTs.push_back(Op.getOperand(2).getValueType());
VTs.push_back(MVT::Flag);
- SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
+ SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
// Now that we have the comparison, emit a copy from the CR to a GPR.
// This is flagged to the above dot comparison.
// Create a stack slot that is 16-byte aligned.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(16, 16);
- SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
// Store the input value into Value#0 of the stack slot.
- SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
- Op.getOperand(0), FIdx,DAG.getSrcValue(NULL));
+ SDOperand Store = DAG.getStore(DAG.getEntryNode(),
+ Op.getOperand(0), FIdx, NULL, 0);
// Load it out.
- return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL));
+ return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0);
+}
+
+static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) {
+ if (Op.getValueType() == MVT::v4i32) {
+ SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
+
+ SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
+ SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
+
+ SDOperand RHSSwap = // = vrlw RHS, 16
+ BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
+
+ // Shrinkify inputs to v8i16.
+ LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
+ RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
+ RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
+
+ // Low parts multiplied together, generating 32-bit results (we ignore the
+ // top parts).
+ SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
+ LHS, RHS, DAG, MVT::v4i32);
+
+ SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
+ LHS, RHSSwap, Zero, DAG, MVT::v4i32);
+ // Shift the high parts up 16 bits.
+ HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
+ return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
+ } else if (Op.getValueType() == MVT::v8i16) {
+ SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
+
+ SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
+
+ return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
+ LHS, RHS, Zero, DAG);
+ } else if (Op.getValueType() == MVT::v16i8) {
+ SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
+
+ // Multiply the even 8-bit parts, producing 16-bit sums.
+ SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
+ LHS, RHS, DAG, MVT::v8i16);
+ EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
+
+ // Multiply the odd 8-bit parts, producing 16-bit sums.
+ SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
+ LHS, RHS, DAG, MVT::v8i16);
+ OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
+
+ // Merge the results together.
+ SDOperand Ops[16];
+ for (unsigned i = 0; i != 8; ++i) {
+ Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8);
+ Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
+ }
+ return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
+ } else {
+ assert(0 && "Unknown mul to lower!");
+ abort();
+ }
}
/// LowerOperation - Provide custom lowering hooks for some operations.
default: assert(0 && "Wasn't expecting to be able to lower this!");
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
+ case ISD::FORMAL_ARGUMENTS:
+ return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
+ case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
+ case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
+ PPCSubTarget);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
// Lower 64-bit shifts.
- case ISD::SHL: return LowerSHL(Op, DAG);
- case ISD::SRL: return LowerSRL(Op, DAG);
- case ISD::SRA: return LowerSRA(Op, DAG);
+ case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
+ case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
+ case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
// Vector-related lowering.
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
+ case ISD::MUL: return LowerMUL(Op, DAG);
}
return SDOperand();
}
// Other Lowering Code
//===----------------------------------------------------------------------===//
-std::vector<SDOperand>
-PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
- //
- // add beautiful description of PPC stack frame format, or at least some docs
- //
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineBasicBlock& BB = MF.front();
- SSARegMap *RegMap = MF.getSSARegMap();
- std::vector<SDOperand> ArgValues;
-
- unsigned ArgOffset = 24;
- unsigned GPR_remaining = 8;
- unsigned FPR_remaining = 13;
- unsigned GPR_idx = 0, FPR_idx = 0;
- static const unsigned GPR[] = {
- PPC::R3, PPC::R4, PPC::R5, PPC::R6,
- PPC::R7, PPC::R8, PPC::R9, PPC::R10,
- };
- static const unsigned FPR[] = {
- PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
- PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
- };
-
- // Add DAG nodes to load the arguments... On entry to a function on PPC,
- // the arguments start at offset 24, although they are likely to be passed
- // in registers.
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
- SDOperand newroot, argt;
- unsigned ObjSize;
- bool needsLoad = false;
- bool ArgLive = !I->use_empty();
- MVT::ValueType ObjectVT = getValueType(I->getType());
-
- switch (ObjectVT) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- ObjSize = 4;
- if (!ArgLive) break;
- if (GPR_remaining > 0) {
- unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
- MF.addLiveIn(GPR[GPR_idx], VReg);
- argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
- if (ObjectVT != MVT::i32) {
- unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
- : ISD::AssertZext;
- argt = DAG.getNode(AssertOp, MVT::i32, argt,
- DAG.getValueType(ObjectVT));
- argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt);
- }
- } else {
- needsLoad = true;
- }
- break;
- case MVT::i64:
- ObjSize = 8;
- if (!ArgLive) break;
- if (GPR_remaining > 0) {
- SDOperand argHi, argLo;
- unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
- MF.addLiveIn(GPR[GPR_idx], VReg);
- argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
- // If we have two or more remaining argument registers, then both halves
- // of the i64 can be sourced from there. Otherwise, the lower half will
- // have to come off the stack. This can happen when an i64 is preceded
- // by 28 bytes of arguments.
- if (GPR_remaining > 1) {
- unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
- MF.addLiveIn(GPR[GPR_idx+1], VReg);
- argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32);
- } else {
- int FI = MFI->CreateFixedObject(4, ArgOffset+4);
- SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
- argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
- DAG.getSrcValue(NULL));
- }
- // Build the outgoing arg thingy
- argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi);
- newroot = argLo;
- } else {
- needsLoad = true;
- }
- break;
- case MVT::f32:
- case MVT::f64:
- ObjSize = (ObjectVT == MVT::f64) ? 8 : 4;
- if (!ArgLive) {
- if (FPR_remaining > 0) {
- --FPR_remaining;
- ++FPR_idx;
- }
- break;
- }
- if (FPR_remaining > 0) {
- unsigned VReg;
- if (ObjectVT == MVT::f32)
- VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
- else
- VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
- MF.addLiveIn(FPR[FPR_idx], VReg);
- argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT);
- --FPR_remaining;
- ++FPR_idx;
- } else {
- needsLoad = true;
- }
- break;
- }
-
- // We need to load the argument to a virtual register if we determined above
- // that we ran out of physical registers of the appropriate type
- if (needsLoad) {
- unsigned SubregOffset = 0;
- if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3;
- if (ObjectVT == MVT::i16) SubregOffset = 2;
- int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
- SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
- FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN,
- DAG.getConstant(SubregOffset, MVT::i32));
- argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
- DAG.getSrcValue(NULL));
- }
-
- // Every 4 bytes of argument space consumes one of the GPRs available for
- // argument passing.
- if (GPR_remaining > 0) {
- unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1;
- GPR_remaining -= delta;
- GPR_idx += delta;
- }
- ArgOffset += ObjSize;
- if (newroot.Val)
- DAG.setRoot(newroot.getValue(1));
-
- ArgValues.push_back(argt);
- }
-
- // If the function takes variable number of arguments, make a frame index for
- // the start of the first vararg value... for expansion of llvm.va_start.
- if (F.isVarArg()) {
- VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
- SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
- // If this function is vararg, store any remaining integer argument regs
- // to their spots on the stack so that they may be loaded by deferencing the
- // result of va_next.
- std::vector<SDOperand> MemOps;
- for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) {
- unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
- MF.addLiveIn(GPR[GPR_idx], VReg);
- SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
- SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
- Val, FIN, DAG.getSrcValue(NULL));
- MemOps.push_back(Store);
- // Increment the address by four for the next argument to store
- SDOperand PtrOff = DAG.getConstant(4, getPointerTy());
- FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff);
- }
- if (!MemOps.empty()) {
- MemOps.push_back(DAG.getRoot());
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps));
- }
- }
-
- return ArgValues;
-}
-
-std::pair<SDOperand, SDOperand>
-PPCTargetLowering::LowerCallTo(SDOperand Chain,
- const Type *RetTy, bool isVarArg,
- unsigned CallingConv, bool isTailCall,
- SDOperand Callee, ArgListTy &Args,
- SelectionDAG &DAG) {
- // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
- // SelectExpr to use to put the arguments in the appropriate registers.
- std::vector<SDOperand> args_to_use;
-
- // Count how many bytes are to be pushed on the stack, including the linkage
- // area, and parameter passing area.
- unsigned NumBytes = 24;
-
- if (Args.empty()) {
- Chain = DAG.getCALLSEQ_START(Chain,
- DAG.getConstant(NumBytes, getPointerTy()));
- } else {
- for (unsigned i = 0, e = Args.size(); i != e; ++i) {
- switch (getValueType(Args[i].second)) {
- default: assert(0 && "Unknown value type!");
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::f32:
- NumBytes += 4;
- break;
- case MVT::i64:
- case MVT::f64:
- NumBytes += 8;
- break;
- }
- }
-
- // Just to be safe, we'll always reserve the full 24 bytes of linkage area
- // plus 32 bytes of argument space in case any called code gets funky on us.
- // (Required by ABI to support var arg)
- if (NumBytes < 56) NumBytes = 56;
-
- // Adjust the stack pointer for the new arguments...
- // These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain,
- DAG.getConstant(NumBytes, getPointerTy()));
-
- // Set up a copy of the stack pointer for use loading and storing any
- // arguments that may not fit in the registers available for argument
- // passing.
- SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
-
- // Figure out which arguments are going to go in registers, and which in
- // memory. Also, if this is a vararg function, floating point operations
- // must be stored to our stack, and loaded into integer regs as well, if
- // any integer regs are available for argument passing.
- unsigned ArgOffset = 24;
- unsigned GPR_remaining = 8;
- unsigned FPR_remaining = 13;
-
- std::vector<SDOperand> MemOps;
- for (unsigned i = 0, e = Args.size(); i != e; ++i) {
- // PtrOff will be used to store the current argument to the stack if a
- // register cannot be found for it.
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
- MVT::ValueType ArgVT = getValueType(Args[i].second);
-
- switch (ArgVT) {
- default: assert(0 && "Unexpected ValueType for argument!");
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- // Promote the integer to 32 bits. If the input type is signed use a
- // sign extend, otherwise use a zero extend.
- if (Args[i].second->isSigned())
- Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
- else
- Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
- // FALL THROUGH
- case MVT::i32:
- if (GPR_remaining > 0) {
- args_to_use.push_back(Args[i].first);
- --GPR_remaining;
- } else {
- MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
- Args[i].first, PtrOff,
- DAG.getSrcValue(NULL)));
- }
- ArgOffset += 4;
- break;
- case MVT::i64:
- // If we have one free GPR left, we can place the upper half of the i64
- // in it, and store the other half to the stack. If we have two or more
- // free GPRs, then we can pass both halves of the i64 in registers.
- if (GPR_remaining > 0) {
- SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
- Args[i].first, DAG.getConstant(1, MVT::i32));
- SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
- Args[i].first, DAG.getConstant(0, MVT::i32));
- args_to_use.push_back(Hi);
- --GPR_remaining;
- if (GPR_remaining > 0) {
- args_to_use.push_back(Lo);
- --GPR_remaining;
- } else {
- SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
- MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
- Lo, PtrOff, DAG.getSrcValue(NULL)));
- }
- } else {
- MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
- Args[i].first, PtrOff,
- DAG.getSrcValue(NULL)));
- }
- ArgOffset += 8;
- break;
- case MVT::f32:
- case MVT::f64:
- if (FPR_remaining > 0) {
- args_to_use.push_back(Args[i].first);
- --FPR_remaining;
- if (isVarArg) {
- SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain,
- Args[i].first, PtrOff,
- DAG.getSrcValue(NULL));
- MemOps.push_back(Store);
- // Float varargs are always shadowed in available integer registers
- if (GPR_remaining > 0) {
- SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
- DAG.getSrcValue(NULL));
- MemOps.push_back(Load.getValue(1));
- args_to_use.push_back(Load);
- --GPR_remaining;
- }
- if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
- SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
- SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
- DAG.getSrcValue(NULL));
- MemOps.push_back(Load.getValue(1));
- args_to_use.push_back(Load);
- --GPR_remaining;
- }
- } else {
- // If we have any FPRs remaining, we may also have GPRs remaining.
- // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
- // GPRs.
- if (GPR_remaining > 0) {
- args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
- --GPR_remaining;
- }
- if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
- args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
- --GPR_remaining;
- }
- }
- } else {
- MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
- Args[i].first, PtrOff,
- DAG.getSrcValue(NULL)));
- }
- ArgOffset += (ArgVT == MVT::f32) ? 4 : 8;
- break;
- }
- }
- if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps);
- }
-
- std::vector<MVT::ValueType> RetVals;
- MVT::ValueType RetTyVT = getValueType(RetTy);
- MVT::ValueType ActualRetTyVT = RetTyVT;
- if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16)
- ActualRetTyVT = MVT::i32; // Promote result to i32.
-
- if (RetTyVT == MVT::i64) {
- RetVals.push_back(MVT::i32);
- RetVals.push_back(MVT::i32);
- } else if (RetTyVT != MVT::isVoid) {
- RetVals.push_back(ActualRetTyVT);
- }
- RetVals.push_back(MVT::Other);
-
- // If the callee is a GlobalAddress node (quite common, every direct call is)
- // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
-
- std::vector<SDOperand> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
- Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end());
- SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops);
- Chain = TheCall.getValue(TheCall.Val->getNumValues()-1);
- Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
- DAG.getConstant(NumBytes, getPointerTy()));
- SDOperand RetVal = TheCall;
-
- // If the result is a small value, add a note so that we keep track of the
- // information about whether it is sign or zero extended.
- if (RetTyVT != ActualRetTyVT) {
- RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext,
- MVT::i32, RetVal, DAG.getValueType(RetTyVT));
- RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
- } else if (RetTyVT == MVT::i64) {
- RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1));
- }
-
- return std::make_pair(RetVal, Chain);
-}
-
MachineBasicBlock *
PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
MachineBasicBlock *BB) {
- assert((MI->getOpcode() == PPC::SELECT_CC_Int ||
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ assert((MI->getOpcode() == PPC::SELECT_CC_I4 ||
+ MI->getOpcode() == PPC::SELECT_CC_I8 ||
MI->getOpcode() == PPC::SELECT_CC_F4 ||
MI->getOpcode() == PPC::SELECT_CC_F8 ||
MI->getOpcode() == PPC::SELECT_CC_VRRC) &&
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
- BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
- .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ unsigned SelectPred = MI->getOperand(4).getImm();
+ BuildMI(BB, TII->get(PPC::BCC))
+ .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
MachineFunction *F = BB->getParent();
F->getBasicBlockList().insert(It, copy0MBB);
F->getBasicBlockList().insert(It, sinkMBB);
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
+ BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
+ case PPCISD::SHL:
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ if (C->getValue() == 0) // 0 << V -> 0.
+ return N->getOperand(0);
+ }
+ break;
+ case PPCISD::SRL:
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ if (C->getValue() == 0) // 0 >>u V -> 0.
+ return N->getOperand(0);
+ }
+ break;
+ case PPCISD::SRA:
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ if (C->getValue() == 0 || // 0 >>s V -> 0.
+ C->isAllOnesValue()) // -1 >>s V -> -1.
+ return N->getOperand(0);
+ }
+ break;
+
case ISD::SINT_TO_FP:
- if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
+ if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
// Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
// We allow the src/dst to be either f32/f64, but the intermediate
DCI.AddToWorklist(Val.Val);
return Val;
}
+
+ // Turn STORE (BSWAP) -> sthbrx/stwbrx.
+ if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
+ N->getOperand(1).Val->hasOneUse() &&
+ (N->getOperand(1).getValueType() == MVT::i32 ||
+ N->getOperand(1).getValueType() == MVT::i16)) {
+ SDOperand BSwapOp = N->getOperand(1).getOperand(0);
+ // Do an any-extend to 32-bits if this is a half-word input.
+ if (BSwapOp.getValueType() == MVT::i16)
+ BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp);
+
+ return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp,
+ N->getOperand(2), N->getOperand(3),
+ DAG.getValueType(N->getOperand(1).getValueType()));
+ }
+ break;
+ case ISD::BSWAP:
+ // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
+ if (ISD::isNON_EXTLoad(N->getOperand(0).Val) &&
+ N->getOperand(0).hasOneUse() &&
+ (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
+ SDOperand Load = N->getOperand(0);
+ LoadSDNode *LD = cast<LoadSDNode>(Load);
+ // Create the byte-swapping load.
+ std::vector<MVT::ValueType> VTs;
+ VTs.push_back(MVT::i32);
+ VTs.push_back(MVT::Other);
+ SDOperand SV = DAG.getSrcValue(LD->getSrcValue(), LD->getSrcValueOffset());
+ SDOperand Ops[] = {
+ LD->getChain(), // Chain
+ LD->getBasePtr(), // Ptr
+ SV, // SrcValue
+ DAG.getValueType(N->getValueType(0)) // VT
+ };
+ SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4);
+
+ // If this is an i16 load, insert the truncate.
+ SDOperand ResVal = BSLoad;
+ if (N->getValueType(0) == MVT::i16)
+ ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad);
+
+ // First, combine the bswap away. This makes the value produced by the
+ // load dead.
+ DCI.CombineTo(N, ResVal);
+
+ // Next, combine the load away, we give it a bogus result value but a real
+ // chain result. The result value is dead because the bswap is dead.
+ DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1));
+
+ // Return N so it doesn't get rechecked!
+ return SDOperand(N, 0);
+ }
+
break;
case PPCISD::VCMP: {
// If a VCMPo node already exists with exactly the same operands as this
break;
}
- // If there are non-zero uses of the flag value, use the VCMPo node!
- if (VCMPoNode && !VCMPoNode->hasNUsesOfValue(0, 1))
+ // If there is no VCMPo node, or if the flag value has a single use, don't
+ // transform this.
+ if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
+ break;
+
+ // Look at the (necessarily single) use of the flag value. If it has a
+ // chain, this transformation is more complex. Note that multiple things
+ // could use the value result, which we should ignore.
+ SDNode *FlagUser = 0;
+ for (SDNode::use_iterator UI = VCMPoNode->use_begin();
+ FlagUser == 0; ++UI) {
+ assert(UI != VCMPoNode->use_end() && "Didn't find user!");
+ SDNode *User = *UI;
+ for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
+ if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) {
+ FlagUser = User;
+ break;
+ }
+ }
+ }
+
+ // If the user is a MFCR instruction, we know this is safe. Otherwise we
+ // give up for right now.
+ if (FlagUser->getOpcode() == PPCISD::MFCR)
return SDOperand(VCMPoNode, 0);
}
break;
}
+ case ISD::BR_CC: {
+ // If this is a branch on an altivec predicate comparison, lower this so
+ // that we don't have to do a MFCR: instead, branch directly on CR6. This
+ // lowering is done pre-legalize, because the legalizer lowers the predicate
+ // compare down to code that is difficult to reassemble.
+ ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
+ SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3);
+ int CompareOpc;
+ bool isDot;
+
+ if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
+ getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
+ assert(isDot && "Can't compare against a vector result!");
+
+ // If this is a comparison against something other than 0/1, then we know
+ // that the condition is never/always true.
+ unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
+ if (Val != 0 && Val != 1) {
+ if (CC == ISD::SETEQ) // Cond never true, remove branch.
+ return N->getOperand(0);
+ // Always !=, turn it into an unconditional branch.
+ return DAG.getNode(ISD::BR, MVT::Other,
+ N->getOperand(0), N->getOperand(4));
+ }
+
+ bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
+
+ // Create the PPCISD altivec 'dot' comparison node.
+ std::vector<MVT::ValueType> VTs;
+ SDOperand Ops[] = {
+ LHS.getOperand(2), // LHS of compare
+ LHS.getOperand(3), // RHS of compare
+ DAG.getConstant(CompareOpc, MVT::i32)
+ };
+ VTs.push_back(LHS.getOperand(2).getValueType());
+ VTs.push_back(MVT::Flag);
+ SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
+
+ // Unpack the result based on how the target uses it.
+ PPC::Predicate CompOpc;
+ switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
+ default: // Can't happen, don't crash on invalid number though.
+ case 0: // Branch on the value of the EQ bit of CR6.
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
+ break;
+ case 1: // Branch on the inverted value of the EQ bit of CR6.
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
+ break;
+ case 2: // Branch on the value of the LT bit of CR6.
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
+ break;
+ case 3: // Branch on the inverted value of the LT bit of CR6.
+ CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
+ break;
+ }
+
+ return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
+ DAG.getConstant(CompOpc, MVT::i32),
+ DAG.getRegister(PPC::CR6, MVT::i32),
+ N->getOperand(4), CompNode.getValue(1));
+ }
+ break;
+ }
}
return SDOperand();
KnownOne = 0;
switch (Op.getOpcode()) {
default: break;
+ case PPCISD::LBRX: {
+ // lhbrx is known to have the top bits cleared out.
+ if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16)
+ KnownZero = 0xFFFF0000;
+ break;
+ }
case ISD::INTRINSIC_WO_CHAIN: {
switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
default: break;
return TargetLowering::getConstraintType(ConstraintLetter);
}
-
-std::vector<unsigned> PPCTargetLowering::
-getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+std::pair<unsigned, const TargetRegisterClass*>
+PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+ MVT::ValueType VT) const {
if (Constraint.size() == 1) {
- switch (Constraint[0]) { // GCC RS6000 Constraint Letters
- default: break; // Unknown constriant letter
- case 'b':
- return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 ,
- PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
- PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
- PPC::R12, PPC::R13, PPC::R14, PPC::R15,
- PPC::R16, PPC::R17, PPC::R18, PPC::R19,
- PPC::R20, PPC::R21, PPC::R22, PPC::R23,
- PPC::R24, PPC::R25, PPC::R26, PPC::R27,
- PPC::R28, PPC::R29, PPC::R30, PPC::R31,
- 0);
- case 'r':
- return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 ,
- PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
- PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
- PPC::R12, PPC::R13, PPC::R14, PPC::R15,
- PPC::R16, PPC::R17, PPC::R18, PPC::R19,
- PPC::R20, PPC::R21, PPC::R22, PPC::R23,
- PPC::R24, PPC::R25, PPC::R26, PPC::R27,
- PPC::R28, PPC::R29, PPC::R30, PPC::R31,
- 0);
- case 'f':
- return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 ,
- PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 ,
- PPC::F8 , PPC::F9 , PPC::F10, PPC::F11,
- PPC::F12, PPC::F13, PPC::F14, PPC::F15,
- PPC::F16, PPC::F17, PPC::F18, PPC::F19,
- PPC::F20, PPC::F21, PPC::F22, PPC::F23,
- PPC::F24, PPC::F25, PPC::F26, PPC::F27,
- PPC::F28, PPC::F29, PPC::F30, PPC::F31,
- 0);
+ // GCC RS6000 Constraint Letters
+ switch (Constraint[0]) {
+ case 'b': // R1-R31
+ case 'r': // R0-R31
+ if (VT == MVT::i64 && PPCSubTarget.isPPC64())
+ return std::make_pair(0U, PPC::G8RCRegisterClass);
+ return std::make_pair(0U, PPC::GPRCRegisterClass);
+ case 'f':
+ if (VT == MVT::f32)
+ return std::make_pair(0U, PPC::F4RCRegisterClass);
+ else if (VT == MVT::f64)
+ return std::make_pair(0U, PPC::F8RCRegisterClass);
+ break;
case 'v':
- return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 ,
- PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
- PPC::V8 , PPC::V9 , PPC::V10, PPC::V11,
- PPC::V12, PPC::V13, PPC::V14, PPC::V15,
- PPC::V16, PPC::V17, PPC::V18, PPC::V19,
- PPC::V20, PPC::V21, PPC::V22, PPC::V23,
- PPC::V24, PPC::V25, PPC::V26, PPC::V27,
- PPC::V28, PPC::V29, PPC::V30, PPC::V31,
- 0);
- case 'y':
- return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
- PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7,
- 0);
+ return std::make_pair(0U, PPC::VRRCRegisterClass);
+ case 'y': // crrc
+ return std::make_pair(0U, PPC::CRRCRegisterClass);
}
}
- return std::vector<unsigned>();
+ return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
+
// isOperandValidForConstraint
-bool PPCTargetLowering::
-isOperandValidForConstraint(SDOperand Op, char Letter) {
+SDOperand PPCTargetLowering::
+isOperandValidForConstraint(SDOperand Op, char Letter, SelectionDAG &DAG) {
switch (Letter) {
default: break;
case 'I':
case 'N':
case 'O':
case 'P': {
- if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate.
+ if (!isa<ConstantSDNode>(Op)) return SDOperand(0,0);// Must be an immediate.
unsigned Value = cast<ConstantSDNode>(Op)->getValue();
switch (Letter) {
default: assert(0 && "Unknown constraint letter!");
case 'I': // "I" is a signed 16-bit constant.
- return (short)Value == (int)Value;
+ if ((short)Value == (int)Value) return Op;
+ break;
case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
- return (short)Value == 0;
+ if ((short)Value == 0) return Op;
+ break;
case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
- return (Value >> 16) == 0;
+ if ((Value >> 16) == 0) return Op;
+ break;
case 'M': // "M" is a constant that is greater than 31.
- return Value > 31;
+ if (Value > 31) return Op;
+ break;
case 'N': // "N" is a positive constant that is an exact power of two.
- return (int)Value > 0 && isPowerOf2_32(Value);
+ if ((int)Value > 0 && isPowerOf2_32(Value)) return Op;
+ break;
case 'O': // "O" is the constant zero.
- return Value == 0;
+ if (Value == 0) return Op;
+ break;
case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
- return (short)-Value == (int)-Value;
+ if ((short)-Value == (int)-Value) return Op;
+ break;
}
break;
}
}
// Handle standard constraint letters.
- return TargetLowering::isOperandValidForConstraint(Op, Letter);
+ return TargetLowering::isOperandValidForConstraint(Op, Letter, DAG);
}
/// isLegalAddressImmediate - Return true if the integer value can be used
// PPC allows a sign-extended 16-bit immediate field.
return (V > -(1 << 16) && V < (1 << 16)-1);
}
+
+bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
+ return TargetLowering::isLegalAddressImmediate(GV);
+}