#include "PPCPredicates.h"
#include "PPCTargetMachine.h"
#include "PPCPerfectShuffle.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
-static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc");
+static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc",
+cl::desc("enable preincrement load/store generation on PPC (experimental)"),
+ cl::Hidden);
PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
: TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
+ // Shortening conversions involving ppcf128 get expanded (2 regs -> 1 reg)
+ setConvertAction(MVT::ppcf128, MVT::f64, Expand);
+ setConvertAction(MVT::ppcf128, MVT::f32, Expand);
+ // This is used in the ppcf128->int sequence. Note it has different semantics
+ // from FP_ROUND: that rounds to nearest, this rounds to zero.
+ setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
+
// PowerPC has no intrinsics for these particular operations
setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
setOperationAction(ISD::MEMSET, MVT::Other, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i64, Expand);
setOperationAction(ISD::UREM, MVT::i64, Expand);
-
- // We don't support sin/cos/sqrt/fmod
+
+ // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+
+ // We don't support sin/cos/sqrt/fmod/pow
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
+ setOperationAction(ISD::FPOW , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::f32, Expand);
// If we're enabling GP optimizations, use hardware square root
if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
// We want to legalize GlobalAddress and ConstantPool nodes into the
// appropriate instructions to materialize the address.
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
setOperationAction(ISD::JumpTable, MVT::i64, Custom);
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET , MVT::Other, Custom);
-
+
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
+ // VAARG is custom lowered with ELF 32 ABI
+ if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI())
+ setOperationAction(ISD::VAARG, MVT::Other, Custom);
+ else
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+
// Use the default implementation.
- setOperationAction(ISD::VAARG , MVT::Other, Expand);
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
}
if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
- // 64 bit PowerPC implementations can support i64 types directly
+ // 64-bit PowerPC implementations can support i64 types directly
addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
// BUILD_PAIR can't be handled natively, and should be expanded to shl/or
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
} else {
- // 32 bit PowerPC wants to expand i64 shifts itself.
+ // 32-bit PowerPC wants to expand i64 shifts itself.
setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
+ VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
// add/sub are legal for all supported vector VT's.
setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
-
+ setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand);
}
// We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
setTargetDAGCombine(ISD::BR_CC);
setTargetDAGCombine(ISD::BSWAP);
+ // Darwin long double math library functions have $LDBL128 appended.
+ if (TM.getSubtarget<PPCSubtarget>().isDarwin()) {
+ setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
+ setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
+ setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
+ }
+
computeRegisterProperties();
}
/// isFloatingPointZero - Return true if this is 0.0 or -0.0.
static bool isFloatingPointZero(SDOperand Op) {
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
- return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
+ return CFP->getValueAPF().isZero();
else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) {
// Maybe this has already been legalized into the constant pool?
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
- return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
+ return CFP->getValueAPF().isZero();
}
return false;
}
return true;
}
+/// isAllNegativeZeroVector - Returns true if all elements of build_vector
+/// are -0.0.
+bool PPC::isAllNegativeZeroVector(SDNode *N) {
+ assert(N->getOpcode() == ISD::BUILD_VECTOR);
+ if (PPC::isSplatShuffleMask(N, N->getNumOperands()))
+ if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N))
+ return CFP->getValueAPF().isNegZero();
+ return false;
+}
+
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
- Value = FloatToBits(CN->getValue());
+ Value = FloatToBits(CN->getValueAPF().convertToFloat());
ValSizeInBytes = 4;
}
// disjoint.
uint64_t LHSKnownZero, LHSKnownOne;
uint64_t RHSKnownZero, RHSKnownOne;
- ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
+ DAG.ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
if (LHSKnownZero) {
- ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne);
+ DAG.ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne);
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
if ((LHSKnownZero | RHSKnownZero) == ~0U) {
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
uint64_t LHSKnownZero, LHSKnownOne;
- ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
+ DAG.ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
uint64_t LHSKnownZero, LHSKnownOne;
- ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
+ DAG.ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
return Lo;
}
+static SDOperand LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) {
+ assert(0 && "TLS not implemented for PPC.");
+}
+
static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
return SDOperand();
}
+static SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG,
+ int VarArgsFrameIndex,
+ int VarArgsStackOffset,
+ unsigned VarArgsNumGPR,
+ unsigned VarArgsNumFPR,
+ const PPCSubtarget &Subtarget) {
+
+ assert(0 && "VAARG in ELF32 ABI not implemented yet!");
+}
+
static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
- unsigned VarArgsFrameIndex) {
- // vastart just stores the address of the VarArgsFrameIndex slot into the
- // memory location argument.
+ int VarArgsFrameIndex,
+ int VarArgsStackOffset,
+ unsigned VarArgsNumGPR,
+ unsigned VarArgsNumFPR,
+ const PPCSubtarget &Subtarget) {
+
+ if (Subtarget.isMachoABI()) {
+ // vastart just stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+ SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
+ return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(),
+ SV->getOffset());
+ }
+
+ // For ELF 32 ABI we follow the layout of the va_list struct.
+ // We suppose the given va_list is already allocated.
+ //
+ // typedef struct {
+ // char gpr; /* index into the array of 8 GPRs
+ // * stored in the register save area
+ // * gpr=0 corresponds to r3,
+ // * gpr=1 to r4, etc.
+ // */
+ // char fpr; /* index into the array of 8 FPRs
+ // * stored in the register save area
+ // * fpr=0 corresponds to f1,
+ // * fpr=1 to f2, etc.
+ // */
+ // char *overflow_arg_area;
+ // /* location on stack that holds
+ // * the next overflow argument
+ // */
+ // char *reg_save_area;
+ // /* where r3:r10 and f1:f8 (if saved)
+ // * are stored
+ // */
+ // } va_list[1];
+
+
+ SDOperand ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8);
+ SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8);
+
+
MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+
+ SDOperand StackOffset = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+
+ SDOperand ConstFrameOffset = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8,
+ PtrVT);
+ SDOperand ConstStackOffset = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8 - 1,
+ PtrVT);
+ SDOperand ConstFPROffset = DAG.getConstant(1, PtrVT);
+
SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
- return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(),
+
+ // Store first byte : number of int regs
+ SDOperand firstStore = DAG.getStore(Op.getOperand(0), ArgGPR,
+ Op.getOperand(1), SV->getValue(),
+ SV->getOffset());
+ SDOperand nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1),
+ ConstFPROffset);
+
+ // Store second byte : number of float regs
+ SDOperand secondStore = DAG.getStore(firstStore, ArgFPR, nextPtr,
+ SV->getValue(), SV->getOffset());
+ nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset);
+
+ // Store second word : arguments given on stack
+ SDOperand thirdStore = DAG.getStore(secondStore, StackOffset, nextPtr,
+ SV->getValue(), SV->getOffset());
+ nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset);
+
+ // Store third word : arguments given in registers
+ return DAG.getStore(thirdStore, FR, nextPtr, SV->getValue(),
SV->getOffset());
+
}
#include "PPCGenCallingConv.inc"
static const unsigned FPR[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
- PPC::F8, PPC::F9, PPC::F10
+ PPC::F8
};
return FPR;
}
static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG,
int &VarArgsFrameIndex,
+ int &VarArgsStackOffset,
+ unsigned &VarArgsNumGPR,
+ unsigned &VarArgsNumFPR,
const PPCSubtarget &Subtarget) {
// TODO: add description of PPC stack frame format, or at least some docs.
//
MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
bool isMachoABI = Subtarget.isMachoABI();
- bool isELF_ABI = Subtarget.isELF_ABI();
+ bool isELF32_ABI = Subtarget.isELF32_ABI();
unsigned PtrByteSize = isPPC64 ? 8 : 4;
unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
- const unsigned Num_GPR_Regs = sizeof(GPR_32)/sizeof(GPR_32[0]);
- const unsigned Num_FPR_Regs = isMachoABI ? 13 : 10;
- const unsigned Num_VR_Regs = sizeof( VR)/sizeof( VR[0]);
+ const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
+ const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8;
+ const unsigned Num_VR_Regs = array_lengthof( VR);
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
// entry to a function on PPC, the arguments start after the linkage area,
// although the first ones are often in registers.
//
- // In the ELF ABI, GPRs and stack are double word align: an argument
+ // In the ELF 32 ABI, GPRs and stack are double word align: an argument
// represented with two words (long long or double) must be copied to an
// even GPR_idx value or to an even ArgOffset value.
default: assert(0 && "Unhandled argument type!");
case MVT::i32:
// Double word align in ELF
- if (Expand && isELF_ABI && !isPPC64) GPR_idx += (GPR_idx % 2);
+ if (Expand && isELF32_ABI) GPR_idx += (GPR_idx % 2);
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
MF.addLiveIn(GPR[GPR_idx], VReg);
ArgSize = PtrByteSize;
}
// Stack align in ELF
- if (needsLoad && Expand && isELF_ABI && !isPPC64)
+ if (needsLoad && Expand && isELF32_ABI)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
// All int arguments reserve stack space in Macho ABI.
if (isMachoABI || needsLoad) ArgOffset += PtrByteSize;
}
// Stack align in ELF
- if (needsLoad && Expand && isELF_ABI && !isPPC64)
+ if (needsLoad && Expand && isELF32_ABI)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
// All FP arguments reserve stack space in Macho ABI.
if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize;
// the start of the first vararg value... for expansion of llvm.va_start.
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
if (isVarArg) {
+
+ int depth;
+ if (isELF32_ABI) {
+ VarArgsNumGPR = GPR_idx;
+ VarArgsNumFPR = FPR_idx;
+
+ // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame
+ // pointer.
+ depth = -(Num_GPR_Regs * MVT::getSizeInBits(PtrVT)/8 +
+ Num_FPR_Regs * MVT::getSizeInBits(MVT::f64)/8 +
+ MVT::getSizeInBits(PtrVT)/8);
+
+ VarArgsStackOffset = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
+ ArgOffset);
+
+ }
+ else
+ depth = ArgOffset;
+
VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
- ArgOffset);
+ depth);
SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+
+ SmallVector<SDOperand, 8> MemOps;
+
+ // In ELF 32 ABI, the fixed integer arguments of a variadic function are
+ // stored to the VarArgsFrameIndex on the stack.
+ if (isELF32_ABI) {
+ for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) {
+ SDOperand Val = DAG.getRegister(GPR[GPR_idx], PtrVT);
+ SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0);
+ MemOps.push_back(Store);
+ // Increment the address by four for the next argument to store
+ SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
+ FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
+ }
+ }
+
// If this function is vararg, store any remaining integer argument regs
// to their spots on the stack so that they may be loaded by deferencing the
// result of va_next.
- SmallVector<SDOperand, 8> MemOps;
for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
unsigned VReg;
if (isPPC64)
SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
}
+
+ // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex
+ // on the stack.
+ if (isELF32_ABI) {
+ for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) {
+ SDOperand Val = DAG.getRegister(FPR[FPR_idx], MVT::f64);
+ SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0);
+ MemOps.push_back(Store);
+ // Increment the address by eight for the next argument to store
+ SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8,
+ PtrVT);
+ FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
+ }
+
+ for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) {
+ unsigned VReg;
+ VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
+
+ MF.addLiveIn(FPR[FPR_idx], VReg);
+ SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::f64);
+ SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
+ MemOps.push_back(Store);
+ // Increment the address by eight for the next argument to store
+ SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8,
+ PtrVT);
+ FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
+ }
+ }
+
if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
}
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
bool isMachoABI = Subtarget.isMachoABI();
- bool isELF_ABI = Subtarget.isELF_ABI();
+ bool isELF32_ABI = Subtarget.isELF32_ABI();
MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
- const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]);
- const unsigned NumFPRs = isMachoABI ? 13 : 10;
- const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]);
+ const unsigned NumGPRs = array_lengthof(GPR_32);
+ const unsigned NumFPRs = isMachoABI ? 13 : 8;
+ const unsigned NumVRs = array_lengthof( VR);
const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
// register cannot be found for it.
SDOperand PtrOff;
- // Stack align in ELF
- if (isELF_ABI && Expand && !isPPC64)
+ // Stack align in ELF 32
+ if (isELF32_ABI && Expand)
PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize,
StackPtr.getValueType());
else
case MVT::i32:
case MVT::i64:
// Double word align in ELF
- if (isELF_ABI && Expand && !isPPC64) GPR_idx += (GPR_idx % 2);
+ if (isELF32_ABI && Expand) GPR_idx += (GPR_idx % 2);
if (GPR_idx != NumGPRs) {
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
} else {
}
if (inMem || isMachoABI) {
// Stack align in ELF
- if (isELF_ABI && Expand && !isPPC64)
+ if (isELF32_ABI && Expand)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
ArgOffset += PtrByteSize;
}
if (inMem || isMachoABI) {
// Stack align in ELF
- if (isELF_ABI && Expand && !isPPC64)
+ if (isELF32_ABI && Expand)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
if (isPPC64)
ArgOffset += 8;
InFlag = Chain.getValue(1);
}
- // With the ELF ABI, set CR6 to true if this is a vararg call.
- if (isVarArg && isELF_ABI) {
+ // With the ELF 32 ABI, set CR6 to true if this is a vararg call.
+ if (isVarArg && isELF32_ABI) {
SDOperand SetCR(DAG.getTargetNode(PPC::SETCR, MVT::i32), 0);
Chain = DAG.getCopyToReg(Chain, PPC::CR6, SetCR, InFlag);
InFlag = Chain.getValue(1);
case MVT::Other: break;
case MVT::i32:
if (Op.Val->getValueType(1) == MVT::i32) {
- Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1);
+ Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
- Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32,
+ Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32,
Chain.getValue(2)).getValue(1);
ResultVals[1] = Chain.getValue(0);
NumResults = 2;
NumResults = 1;
NodeTys.push_back(MVT::i64);
break;
- case MVT::f32:
case MVT::f64:
+ if (Op.Val->getValueType(1) == MVT::f64) {
+ Chain = DAG.getCopyFromReg(Chain, PPC::F1, MVT::f64, InFlag).getValue(1);
+ ResultVals[0] = Chain.getValue(0);
+ Chain = DAG.getCopyFromReg(Chain, PPC::F2, MVT::f64,
+ Chain.getValue(2)).getValue(1);
+ ResultVals[1] = Chain.getValue(0);
+ NumResults = 2;
+ NodeTys.push_back(MVT::f64);
+ NodeTys.push_back(MVT::f64);
+ break;
+ }
+ // else fall through
+ case MVT::f32:
Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0),
InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG, TargetMachine &TM) {
SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
- CCState CCInfo(CC, TM, RVLocs);
+ bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
+ CCState CCInfo(CC, isVarArg, TM, RVLocs);
CCInfo.AnalyzeReturn(Op.Val, RetCC_PPC);
// If this is the first return lowered for this function, add the regs to the
DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
}
- SDOperand Cmp;
+ SDOperand Cmp;
switch (CC) {
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETULT:
}
// Convert the FP value to an int value through memory.
- SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
+ SDOperand FIPtr = DAG.CreateStackTemporary(MVT::f64);
+
+ // Emit a store to the stack slot.
+ SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0);
+
+ // Result is a load from the stack slot. If loading 4 bytes, make sure to
+ // add in a bias.
if (Op.getValueType() == MVT::i32)
- Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
- return Bits;
+ FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr,
+ DAG.getConstant(4, FIPtr.getValueType()));
+ return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0);
+}
+
+static SDOperand LowerFP_ROUND_INREG(SDOperand Op, SelectionDAG &DAG) {
+ assert(Op.getValueType() == MVT::ppcf128);
+ SDNode *Node = Op.Val;
+ assert(Node->getOperand(0).getValueType() == MVT::ppcf128);
+ assert(Node->getOperand(0).Val->getOpcode() == ISD::BUILD_PAIR);
+ SDOperand Lo = Node->getOperand(0).Val->getOperand(0);
+ SDOperand Hi = Node->getOperand(0).Val->getOperand(1);
+
+ // This sequence changes FPSCR to do round-to-zero, adds the two halves
+ // of the long double, and puts FPSCR back the way it was. We do not
+ // actually model FPSCR.
+ std::vector<MVT::ValueType> NodeTys;
+ SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg;
+
+ NodeTys.push_back(MVT::f64); // Return register
+ NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
+ Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
+ MFFSreg = Result.getValue(0);
+ InFlag = Result.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = DAG.getConstant(31, MVT::i32);
+ Ops[1] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2);
+ InFlag = Result.getValue(0);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = DAG.getConstant(30, MVT::i32);
+ Ops[1] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2);
+ InFlag = Result.getValue(0);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::f64); // result of add
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = Lo;
+ Ops[1] = Hi;
+ Ops[2] = InFlag;
+ Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3);
+ FPreg = Result.getValue(0);
+ InFlag = Result.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::f64);
+ Ops[0] = DAG.getConstant(1, MVT::i32);
+ Ops[1] = MFFSreg;
+ Ops[2] = FPreg;
+ Ops[3] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4);
+ FPreg = Result.getValue(0);
+
+ // We know the low half is about to be thrown away, so just use something
+ // convenient.
+ return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg);
}
static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
assert(CN->getValueType(0) == MVT::f32 &&
"Only one legal FP vector type!");
- EltBits = FloatToBits(CN->getValue());
+ EltBits = FloatToBits(CN->getValueAPF().convertToFloat());
} else {
// Nonconstant element.
return true;
MVT::ValueType CanonicalVT = VTys[SplatSize-1];
// Build a canonical splat for this value.
- SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT));
+ SDOperand Elt = DAG.getConstant(Val, MVT::getVectorElementType(CanonicalVT));
SmallVector<SDOperand, 8> Ops;
Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt);
SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
// Check to see if this is a wide variety of vsplti*, binop self cases.
unsigned SplatBitSize = SplatSize*8;
- static const char SplatCsts[] = {
+ static const signed char SplatCsts[] = {
-1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
-8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
};
- for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
+ for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
// Indirect through the SplatCsts array so that we favor 'vsplti -1' for
// cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
int i = SplatCsts[idx];
if (SextVal >= 0 && SextVal <= 31) {
SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
- LHS = DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
+ LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS);
return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
}
// Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
if (SextVal >= -31 && SextVal <= 0) {
SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
- LHS = DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
+ LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS);
return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
}
}
// The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
// that it is in input element units, not in bytes. Convert now.
- MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
+ MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType());
unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
SmallVector<SDOperand, 16> ResultMask;
default: assert(0 && "Wasn't expecting to be able to lower this!");
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
- case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
+ case ISD::VASTART:
+ return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
+ VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
+
+ case ISD::VAARG:
+ return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
+ VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
+
case ISD::FORMAL_ARGUMENTS:
- return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, PPCSubTarget);
+ return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex,
+ VarArgsStackOffset, VarArgsNumGPR,
+ VarArgsNumFPR, PPCSubTarget);
+
case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget);
case ISD::RET: return LowerRET(Op, DAG, getTargetMachine());
case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
+ case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG);
// Lower 64-bit shifts.
case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
uint64_t Mask,
uint64_t &KnownZero,
uint64_t &KnownOne,
+ const SelectionDAG &DAG,
unsigned Depth) const {
KnownZero = 0;
KnownOne = 0;
}
-// isOperandValidForConstraint
-SDOperand PPCTargetLowering::
-isOperandValidForConstraint(SDOperand Op, char Letter, SelectionDAG &DAG) {
+/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+/// vector. If it is invalid, don't add anything to Ops.
+void PPCTargetLowering::LowerAsmOperandForConstraint(SDOperand Op, char Letter,
+ std::vector<SDOperand>&Ops,
+ SelectionDAG &DAG) {
+ SDOperand Result(0,0);
switch (Letter) {
default: break;
case 'I':
case 'N':
case 'O':
case 'P': {
- if (!isa<ConstantSDNode>(Op)) return SDOperand(0,0);// Must be an immediate.
- unsigned Value = cast<ConstantSDNode>(Op)->getValue();
+ ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
+ if (!CST) return; // Must be an immediate to match.
+ unsigned Value = CST->getValue();
switch (Letter) {
default: assert(0 && "Unknown constraint letter!");
case 'I': // "I" is a signed 16-bit constant.
- if ((short)Value == (int)Value) return Op;
+ if ((short)Value == (int)Value)
+ Result = DAG.getTargetConstant(Value, Op.getValueType());
break;
case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
- if ((short)Value == 0) return Op;
+ if ((short)Value == 0)
+ Result = DAG.getTargetConstant(Value, Op.getValueType());
break;
case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
- if ((Value >> 16) == 0) return Op;
+ if ((Value >> 16) == 0)
+ Result = DAG.getTargetConstant(Value, Op.getValueType());
break;
case 'M': // "M" is a constant that is greater than 31.
- if (Value > 31) return Op;
+ if (Value > 31)
+ Result = DAG.getTargetConstant(Value, Op.getValueType());
break;
case 'N': // "N" is a positive constant that is an exact power of two.
- if ((int)Value > 0 && isPowerOf2_32(Value)) return Op;
+ if ((int)Value > 0 && isPowerOf2_32(Value))
+ Result = DAG.getTargetConstant(Value, Op.getValueType());
break;
case 'O': // "O" is the constant zero.
- if (Value == 0) return Op;
+ if (Value == 0)
+ Result = DAG.getTargetConstant(Value, Op.getValueType());
break;
case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
- if ((short)-Value == (int)-Value) return Op;
+ if ((short)-Value == (int)-Value)
+ Result = DAG.getTargetConstant(Value, Op.getValueType());
break;
}
break;
}
}
+ if (Result.Val) {
+ Ops.push_back(Result);
+ return;
+ }
+
// Handle standard constraint letters.
- return TargetLowering::isOperandValidForConstraint(Op, Letter, DAG);
+ TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG);
+}
+
+// isLegalAddressingMode - Return true if the addressing mode represented
+// by AM is legal for this target, for a load/store of the specified type.
+bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ const Type *Ty) const {
+ // FIXME: PPC does not allow r+i addressing modes for vectors!
+
+ // PPC allows a sign-extended 16-bit immediate field.
+ if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
+ return false;
+
+ // No global is ever allowed as a base.
+ if (AM.BaseGV)
+ return false;
+
+ // PPC only support r+r,
+ switch (AM.Scale) {
+ case 0: // "r+i" or just "i", depending on HasBaseReg.
+ break;
+ case 1:
+ if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
+ return false;
+ // Otherwise we have r+r or r+i.
+ break;
+ case 2:
+ if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
+ return false;
+ // Allow 2*r as r+r.
+ break;
+ default:
+ // No other scales are supported.
+ return false;
+ }
+
+ return true;
}
/// isLegalAddressImmediate - Return true if the integer value can be used
}
bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
- return TargetLowering::isLegalAddressImmediate(GV);
+ return false;
}
SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG)
if (isPPC64)
return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1,
- MVT::i32);
+ MVT::i64);
else
return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1,
MVT::i32);