//
// The LLVM Compiler Infrastructure
//
-// This file was developed by Chris Lattner and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
+ // Shortening conversions involving ppcf128 get expanded (2 regs -> 1 reg)
+ setConvertAction(MVT::ppcf128, MVT::f64, Expand);
+ setConvertAction(MVT::ppcf128, MVT::f32, Expand);
+ // This is used in the ppcf128->int sequence. Note it has different semantics
+ // from FP_ROUND: that rounds to nearest, this rounds to zero.
+ setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
+
// PowerPC has no intrinsics for these particular operations
setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
setOperationAction(ISD::MEMSET, MVT::Other, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i64, Expand);
setOperationAction(ISD::UREM, MVT::i64, Expand);
-
- // We don't support sin/cos/sqrt/fmod
+
+ // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+
+ // We don't support sin/cos/sqrt/fmod/pow
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
+ setOperationAction(ISD::FPOW , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::f32, Expand);
// If we're enabling GP optimizations, use hardware square root
if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
// Support label based line numbers.
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
- if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) {
- setOperationAction(ISD::LABEL, MVT::Other, Expand);
- } else {
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- }
+
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
+
// We want to legalize GlobalAddress and ConstantPool nodes into the
// appropriate instructions to materialize the address.
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET , MVT::Other, Custom);
- setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
- setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand);
-
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
}
if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
- // 64 bit PowerPC implementations can support i64 types directly
+ // 64-bit PowerPC implementations can support i64 types directly
addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
// BUILD_PAIR can't be handled natively, and should be expanded to shl/or
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
} else {
- // 32 bit PowerPC wants to expand i64 shifts itself.
+ // 32-bit PowerPC wants to expand i64 shifts itself.
setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
-
+ setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand);
}
// We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
setTargetDAGCombine(ISD::BR_CC);
setTargetDAGCombine(ISD::BSWAP);
+ // Darwin long double math library functions have $LDBL128 appended.
+ if (TM.getSubtarget<PPCSubtarget>().isDarwin()) {
+ setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
+ setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
+ setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
+ setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
+ setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
+ }
+
computeRegisterProperties();
}
//
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- SSARegMap *RegMap = MF.getSSARegMap();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
SmallVector<SDOperand, 8> ArgValues;
SDOperand Root = Op.getOperand(0);
// Double word align in ELF
if (Expand && isELF32_ABI) GPR_idx += (GPR_idx % 2);
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
- MF.addLiveIn(GPR[GPR_idx], VReg);
+ unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
+ RegInfo.addLiveIn(GPR[GPR_idx], VReg);
ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
++GPR_idx;
} else {
case MVT::i64: // PPC64
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
- MF.addLiveIn(GPR[GPR_idx], VReg);
+ unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
+ RegInfo.addLiveIn(GPR[GPR_idx], VReg);
ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
++GPR_idx;
} else {
if (FPR_idx != Num_FPR_Regs) {
unsigned VReg;
if (ObjectVT == MVT::f32)
- VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
+ VReg = RegInfo.createVirtualRegister(&PPC::F4RCRegClass);
else
- VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
- MF.addLiveIn(FPR[FPR_idx], VReg);
+ VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
+ RegInfo.addLiveIn(FPR[FPR_idx], VReg);
ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
++FPR_idx;
} else {
case MVT::v16i8:
// Note that vector arguments in registers don't reserve stack space.
if (VR_idx != Num_VR_Regs) {
- unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass);
- MF.addLiveIn(VR[VR_idx], VReg);
+ unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass);
+ RegInfo.addLiveIn(VR[VR_idx], VReg);
ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
++VR_idx;
} else {
for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
unsigned VReg;
if (isPPC64)
- VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
+ VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
else
- VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
+ VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
- MF.addLiveIn(GPR[GPR_idx], VReg);
+ RegInfo.addLiveIn(GPR[GPR_idx], VReg);
SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
MemOps.push_back(Store);
for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) {
unsigned VReg;
- VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
+ VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
- MF.addLiveIn(FPR[FPR_idx], VReg);
+ RegInfo.addLiveIn(FPR[FPR_idx], VReg);
SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::f64);
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
MemOps.push_back(Store);
(Addr << 6 >> 6) != Addr)
return 0; // Top 6 bits have to be sext of immediate.
- return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val;
+ return DAG.getConstant((int)C->getValue() >> 2,
+ DAG.getTargetLoweringInfo().getPointerTy()).Val;
}
Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getConstant(NumBytes, PtrVT),
+ DAG.getConstant(0, PtrVT),
+ InFlag);
+ if (Op.Val->getValueType(0) != MVT::Other)
+ InFlag = Chain.getValue(1);
+
SDOperand ResultVals[3];
unsigned NumResults = 0;
NodeTys.clear();
NumResults = 1;
NodeTys.push_back(MVT::i64);
break;
- case MVT::f32:
case MVT::f64:
+ if (Op.Val->getValueType(1) == MVT::f64) {
+ Chain = DAG.getCopyFromReg(Chain, PPC::F1, MVT::f64, InFlag).getValue(1);
+ ResultVals[0] = Chain.getValue(0);
+ Chain = DAG.getCopyFromReg(Chain, PPC::F2, MVT::f64,
+ Chain.getValue(2)).getValue(1);
+ ResultVals[1] = Chain.getValue(0);
+ NumResults = 2;
+ NodeTys.push_back(MVT::f64);
+ NodeTys.push_back(MVT::f64);
+ break;
+ }
+ // else fall through
+ case MVT::f32:
Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0),
InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
break;
}
- Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
- DAG.getConstant(NumBytes, PtrVT));
NodeTys.push_back(MVT::Other);
// If the function returns void, just return the chain.
// If this is the first return lowered for this function, add the regs to the
// liveout set for the function.
- if (DAG.getMachineFunction().liveout_empty()) {
+ if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
for (unsigned i = 0; i != RVLocs.size(); ++i)
- DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg());
+ DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
}
SDOperand Chain = Op.getOperand(0);
DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
}
- SDOperand Cmp;
+ SDOperand Cmp;
switch (CC) {
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETULT:
return SDOperand();
}
+// FIXME: Split this code up when LegalizeDAGTypes lands.
static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
SDOperand Src = Op.getOperand(0);
}
// Convert the FP value to an int value through memory.
- SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
+ SDOperand FIPtr = DAG.CreateStackTemporary(MVT::f64);
+
+ // Emit a store to the stack slot.
+ SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0);
+
+ // Result is a load from the stack slot. If loading 4 bytes, make sure to
+ // add in a bias.
if (Op.getValueType() == MVT::i32)
- Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
- return Bits;
+ FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr,
+ DAG.getConstant(4, FIPtr.getValueType()));
+ return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0);
+}
+
+static SDOperand LowerFP_ROUND_INREG(SDOperand Op, SelectionDAG &DAG) {
+ assert(Op.getValueType() == MVT::ppcf128);
+ SDNode *Node = Op.Val;
+ assert(Node->getOperand(0).getValueType() == MVT::ppcf128);
+ assert(Node->getOperand(0).Val->getOpcode() == ISD::BUILD_PAIR);
+ SDOperand Lo = Node->getOperand(0).Val->getOperand(0);
+ SDOperand Hi = Node->getOperand(0).Val->getOperand(1);
+
+ // This sequence changes FPSCR to do round-to-zero, adds the two halves
+ // of the long double, and puts FPSCR back the way it was. We do not
+ // actually model FPSCR.
+ std::vector<MVT::ValueType> NodeTys;
+ SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg;
+
+ NodeTys.push_back(MVT::f64); // Return register
+ NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
+ Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
+ MFFSreg = Result.getValue(0);
+ InFlag = Result.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = DAG.getConstant(31, MVT::i32);
+ Ops[1] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2);
+ InFlag = Result.getValue(0);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = DAG.getConstant(30, MVT::i32);
+ Ops[1] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2);
+ InFlag = Result.getValue(0);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::f64); // result of add
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = Lo;
+ Ops[1] = Hi;
+ Ops[2] = InFlag;
+ Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3);
+ FPreg = Result.getValue(0);
+ InFlag = Result.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::f64);
+ Ops[0] = DAG.getConstant(1, MVT::i32);
+ Ops[1] = MFFSreg;
+ Ops[2] = FPreg;
+ Ops[3] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4);
+ FPreg = Result.getValue(0);
+
+ // We know the low half is about to be thrown away, so just use something
+ // convenient.
+ return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg);
}
static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
if (SextVal >= 0 && SextVal <= 31) {
SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
- LHS = DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
+ LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS);
return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
}
// Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
if (SextVal >= -31 && SextVal <= 0) {
SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
- LHS = DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
+ LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS);
return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
}
}
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
+ case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG);
// Lower 64-bit shifts.
case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
- // Frame & Return address. Currently unimplemented
- case ISD::RETURNADDR: break;
+ // Frame & Return address.
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
}
return SDOperand();
}
+SDNode *PPCTargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
+ switch (N->getOpcode()) {
+ default: assert(0 && "Wasn't expecting to be able to lower this!");
+ case ISD::FP_TO_SINT: return LowerFP_TO_SINT(SDOperand(N, 0), DAG).Val;
+ }
+}
+
+
//===----------------------------------------------------------------------===//
// Other Lowering Code
//===----------------------------------------------------------------------===//
// Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
// We allow the src/dst to be either f32/f64, but the intermediate
// type must be i64.
- if (N->getOperand(0).getValueType() == MVT::i64) {
+ if (N->getOperand(0).getValueType() == MVT::i64 &&
+ N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
SDOperand Val = N->getOperand(0).getOperand(0);
if (Val.getValueType() == MVT::f32) {
Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
// Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
- N->getOperand(1).getValueType() == MVT::i32) {
+ N->getOperand(1).getValueType() == MVT::i32 &&
+ N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
SDOperand Val = N->getOperand(1).getOperand(0);
if (Val.getValueType() == MVT::f32) {
Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
return false;
}
-SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG)
-{
+SDOperand PPCTargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) {
+ // Depths > 0 not supported yet!
+ if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
+ return SDOperand();
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ int RAIdx = FuncInfo->getReturnAddrSaveIndex();
+ if (RAIdx == 0) {
+ bool isPPC64 = PPCSubTarget.isPPC64();
+ int Offset =
+ PPCFrameInfo::getReturnSaveOffset(isPPC64, PPCSubTarget.isMachoABI());
+
+ // Set up a frame object for the return address.
+ RAIdx = MF.getFrameInfo()->CreateFixedObject(isPPC64 ? 8 : 4, Offset);
+
+ // Remember it for next time.
+ FuncInfo->setReturnAddrSaveIndex(RAIdx);
+
+ // Make sure the function really does not optimize away the store of the RA
+ // to the stack.
+ FuncInfo->setLRStoreRequired();
+ }
+
+ // Just load the return address off the stack.
+ SDOperand RetAddrFI = DAG.getFrameIndex(RAIdx, getPointerTy());
+ return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
+}
+
+SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) {
// Depths > 0 not supported yet!
if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
return SDOperand();