X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FWebAssembly%2FWebAssemblyISelLowering.cpp;h=6e1283b4d3341ae4df0c79f5b509402c2a248430;hp=f4c2e5bce0de78a78b134bd377afc4cbcf51d29e;hb=b975ecb43f74888e84d14d92ef95c9a86684549f;hpb=456ed8978e251f7e7db36d01acbdaeb1f38a84c0 diff --git a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index f4c2e5bce0d..6e1283b4d33 100644 --- a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -20,6 +20,7 @@ #include "WebAssemblyTargetObjectFile.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/IR/DiagnosticInfo.h" @@ -31,15 +32,15 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetOptions.h" - using namespace llvm; #define DEBUG_TYPE "wasm-lower" namespace { // Diagnostic information for unimplemented or unsupported feature reporting. -// FIXME copied from BPF and AMDGPU. -class DiagnosticInfoUnsupported : public DiagnosticInfo { +// TODO: This code is copied from BPF and AMDGPU; consider factoring it out +// and sharing code. +class DiagnosticInfoUnsupported final : public DiagnosticInfo { private: // Debug location where this diagnostic is triggered. DebugLoc DLoc; @@ -106,14 +107,23 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setStackPointerRegisterToSaveRestore( Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32); // Set up the register classes. - addRegisterClass(MVT::i32, &WebAssembly::Int32RegClass); - addRegisterClass(MVT::i64, &WebAssembly::Int64RegClass); - addRegisterClass(MVT::f32, &WebAssembly::Float32RegClass); - addRegisterClass(MVT::f64, &WebAssembly::Float64RegClass); + addRegisterClass(MVT::i32, &WebAssembly::I32RegClass); + addRegisterClass(MVT::i64, &WebAssembly::I64RegClass); + addRegisterClass(MVT::f32, &WebAssembly::F32RegClass); + addRegisterClass(MVT::f64, &WebAssembly::F64RegClass); // Compute derived properties from the register classes. computeRegisterProperties(Subtarget->getRegisterInfo()); setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); + setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); + setOperationAction(ISD::JumpTable, MVTPtr, Custom); + + // Take the default expansion for va_arg, va_copy, and va_end. There is no + // default action for va_start, so we do that custom. + setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction(ISD::VAEND, MVT::Other, Expand); for (auto T : {MVT::f32, MVT::f64}) { // Don't expand the floating-point types to constant pools. @@ -123,22 +133,26 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE}) setCondCodeAction(CC, T, Expand); // Expand floating-point library function operators. - for (auto Op : {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOWI, ISD::FPOW}) + for (auto Op : {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOWI, ISD::FPOW, + ISD::FREM}) setOperationAction(Op, T, Expand); // Note supported floating-point library function operators that otherwise // default to expand. - for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, - ISD::FRINT}) + for (auto Op : + {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT}) setOperationAction(Op, T, Legal); + // Support minnan and maxnan, which otherwise default to expand. + setOperationAction(ISD::FMINNAN, T, Legal); + setOperationAction(ISD::FMAXNAN, T, Legal); } for (auto T : {MVT::i32, MVT::i64}) { // Expand unavailable integer operations. - for (auto Op : {ISD::BSWAP, ISD::ROTL, ISD::ROTR, - ISD::SMUL_LOHI, ISD::UMUL_LOHI, - ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, - ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, - ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) { + for (auto Op : + {ISD::BSWAP, ISD::ROTL, ISD::ROTR, ISD::SMUL_LOHI, ISD::UMUL_LOHI, + ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, + ISD::SRA_PARTS, ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, + ISD::SUBE}) { setOperationAction(Op, T, Expand); } } @@ -153,6 +167,14 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand); + // Expand these forms; we pattern-match the forms that we can handle in isel. + for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) + for (auto Op : {ISD::BR_CC, ISD::SELECT_CC}) + setOperationAction(Op, T, Expand); + + // We have custom switch handling. + setOperationAction(ISD::BR_JT, MVT::Other, Custom); + // WebAssembly doesn't have: // - Floating-point extending loads. // - Floating-point truncating stores. @@ -162,6 +184,9 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( for (auto T : MVT::integer_valuetypes()) for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) setLoadExtAction(Ext, T, MVT::i1, Promote); + + // Trap lowers to wasm unreachable + setOperationAction(ISD::TRAP, MVT::Other, Legal); } FastISel *WebAssemblyTargetLowering::createFastISel( @@ -170,13 +195,13 @@ FastISel *WebAssemblyTargetLowering::createFastISel( } bool WebAssemblyTargetLowering::isOffsetFoldingLegal( - const GlobalAddressSDNode *GA) const { + const GlobalAddressSDNode * /*GA*/) const { // The WebAssembly target doesn't support folding offsets into global // addresses. return false; } -MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout &DL, +MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/, EVT VT) const { return VT.getSimpleVT(); } @@ -195,6 +220,40 @@ WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const { return nullptr; } +std::pair +WebAssemblyTargetLowering::getRegForInlineAsmConstraint( + const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { + // First, see if this is a constraint that directly corresponds to a + // WebAssembly register class. + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'r': + assert(VT != MVT::iPTR && "Pointer MVT not expected here"); + if (VT.isInteger() && !VT.isVector()) { + if (VT.getSizeInBits() <= 32) + return std::make_pair(0U, &WebAssembly::I32RegClass); + if (VT.getSizeInBits() <= 64) + return std::make_pair(0U, &WebAssembly::I64RegClass); + } + break; + default: + break; + } + } + + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} + +bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const { + // Assume ctz is a relatively cheap operation. + return true; +} + +bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const { + // Assume clz is a relatively cheap operation. + return true; +} + //===----------------------------------------------------------------------===// // WebAssembly Lowering private implementation. //===----------------------------------------------------------------------===// @@ -209,6 +268,19 @@ static void fail(SDLoc DL, SelectionDAG &DAG, const char *msg) { DiagnosticInfoUnsupported(DL, *MF.getFunction(), msg, SDValue())); } +// Test whether the given calling convention is supported. +static bool CallingConvSupported(CallingConv::ID CallConv) { + // We currently support the language-independent target-independent + // conventions. We don't yet have a way to annotate calls with properties like + // "cold", and we don't have any call-clobbered registers, so these are mostly + // all handled the same. + return CallConv == CallingConv::C || CallConv == CallingConv::Fast || + CallConv == CallingConv::Cold || + CallConv == CallingConv::PreserveMost || + CallConv == CallingConv::PreserveAll || + CallConv == CallingConv::CXX_FAST_TLS; +} + SDValue WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { @@ -219,46 +291,123 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, MachineFunction &MF = DAG.getMachineFunction(); CallingConv::ID CallConv = CLI.CallConv; - if (CallConv != CallingConv::C) - fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); - if (CLI.IsTailCall || MF.getTarget().Options.GuaranteedTailCallOpt) - fail(DL, DAG, "WebAssembly doesn't support tail call yet"); + if (!CallingConvSupported(CallConv)) + fail(DL, DAG, + "WebAssembly doesn't support language-specific or target-specific " + "calling conventions yet"); if (CLI.IsPatchPoint) fail(DL, DAG, "WebAssembly doesn't support patch point yet"); - SmallVectorImpl &Outs = CLI.Outs; + // WebAssembly doesn't currently support explicit tail calls. If they are + // required, fail. Otherwise, just disable them. + if ((CallConv == CallingConv::Fast && CLI.IsTailCall && + MF.getTarget().Options.GuaranteedTailCallOpt) || + (CLI.CS && CLI.CS->isMustTailCall())) + fail(DL, DAG, "WebAssembly doesn't support tail call yet"); + CLI.IsTailCall = false; + SmallVectorImpl &OutVals = CLI.OutVals; - bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); - if (IsStructRet) - fail(DL, DAG, "WebAssembly doesn't support struct return yet"); - if (Outs.size() > 1) - fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet"); SmallVectorImpl &Ins = CLI.Ins; + if (Ins.size() > 1) + fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet"); + + SmallVectorImpl &Outs = CLI.Outs; + for (const ISD::OutputArg &Out : Outs) { + assert(!Out.Flags.isByVal() && "byval is not valid for return values"); + assert(!Out.Flags.isNest() && "nest is not valid for return values"); + if (Out.Flags.isInAlloca()) + fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); + if (Out.Flags.isInConsecutiveRegs()) + fail(DL, DAG, "WebAssembly hasn't implemented cons regs results"); + if (Out.Flags.isInConsecutiveRegsLast()) + fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results"); + } + bool IsVarArg = CLI.IsVarArg; - if (IsVarArg) - fail(DL, DAG, "WebAssembly doesn't support varargs yet"); + unsigned NumFixedArgs = CLI.NumFixedArgs; + auto PtrVT = getPointerTy(MF.getDataLayout()); + // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); - unsigned NumBytes = CCInfo.getNextStackOffset(); - auto PtrVT = getPointerTy(MF.getDataLayout()); - auto Zero = DAG.getConstant(0, DL, PtrVT, true); + if (IsVarArg) { + // Outgoing non-fixed arguments are placed at the top of the stack. First + // compute their offsets and the total amount of argument stack space + // needed. + for (SDValue Arg : + make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) { + EVT VT = Arg.getValueType(); + assert(VT != MVT::iPTR && "Legalized args should be concrete"); + Type *Ty = VT.getTypeForEVT(*DAG.getContext()); + unsigned Offset = + CCInfo.AllocateStack(MF.getDataLayout().getTypeAllocSize(Ty), + MF.getDataLayout().getABITypeAlignment(Ty)); + CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(), + Offset, VT.getSimpleVT(), + CCValAssign::Full)); + } + } + + unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); + auto NB = DAG.getConstant(NumBytes, DL, PtrVT, true); Chain = DAG.getCALLSEQ_START(Chain, NB, DL); + if (IsVarArg) { + // For non-fixed arguments, next emit stores to store the argument values + // to the stack at the offsets computed above. + SDValue SP = DAG.getCopyFromReg( + Chain, DL, getStackPointerRegisterToSaveRestore(), PtrVT); + unsigned ValNo = 0; + SmallVector Chains; + for (SDValue Arg : + make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) { + assert(ArgLocs[ValNo].getValNo() == ValNo && + "ArgLocs should remain in order and only hold varargs args"); + unsigned Offset = ArgLocs[ValNo++].getLocMemOffset(); + SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, SP, + DAG.getConstant(Offset, DL, PtrVT)); + Chains.push_back(DAG.getStore(Chain, DL, Arg, Add, + MachinePointerInfo::getStack(MF, Offset), + false, false, 0)); + } + if (!Chains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); + } + + // Compute the operands for the CALLn node. SmallVector Ops; Ops.push_back(Chain); Ops.push_back(Callee); - Ops.append(OutVals.begin(), OutVals.end()); + + // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs + // isn't reliable. + Ops.append(OutVals.begin(), + IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end()); SmallVector Tys; - for (const auto &In : Ins) + for (const auto &In : Ins) { + if (In.Flags.isByVal()) + fail(DL, DAG, "WebAssembly hasn't implemented byval arguments"); + if (In.Flags.isInAlloca()) + fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); + if (In.Flags.isNest()) + fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); + if (In.Flags.isInConsecutiveRegs()) + fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); + if (In.Flags.isInConsecutiveRegsLast()) + fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); + // Ignore In.getOrigAlign() because all our arguments are passed in + // registers. Tys.push_back(In.VT); + } Tys.push_back(MVT::Other); SDVTList TyList = DAG.getVTList(Tys); - SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, TyList, Ops); + SDValue Res = + DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1, + DL, TyList, Ops); if (Ins.empty()) { Chain = Res; } else { @@ -266,85 +415,88 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, Chain = Res.getValue(1); } - // FIXME: handle CLI.RetSExt and CLI.RetZExt? - - Chain = DAG.getCALLSEQ_END(Chain, NB, Zero, SDValue(), DL); + SDValue Unused = DAG.getUNDEF(PtrVT); + Chain = DAG.getCALLSEQ_END(Chain, NB, Unused, SDValue(), DL); return Chain; } bool WebAssemblyTargetLowering::CanLowerReturn( - CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, - const SmallVectorImpl &Outs, LLVMContext &Context) const { + CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/, + const SmallVectorImpl &Outs, + LLVMContext & /*Context*/) const { // WebAssembly can't currently handle returning tuples. return Outs.size() <= 1; } SDValue WebAssemblyTargetLowering::LowerReturn( - SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, SDLoc DL, SelectionDAG &DAG) const { - assert(Outs.size() <= 1 && "WebAssembly can only return up to one value"); - if (CallConv != CallingConv::C) + if (!CallingConvSupported(CallConv)) fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); - if (IsVarArg) - fail(DL, DAG, "WebAssembly doesn't support varargs yet"); SmallVector RetOps(1, Chain); RetOps.append(OutVals.begin(), OutVals.end()); Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps); + // Record the number and types of the return values. + for (const ISD::OutputArg &Out : Outs) { + assert(!Out.Flags.isByVal() && "byval is not valid for return values"); + assert(!Out.Flags.isNest() && "nest is not valid for return values"); + assert(Out.IsFixed && "non-fixed return value is not valid"); + if (Out.Flags.isInAlloca()) + fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); + if (Out.Flags.isInConsecutiveRegs()) + fail(DL, DAG, "WebAssembly hasn't implemented cons regs results"); + if (Out.Flags.isInConsecutiveRegsLast()) + fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results"); + } + return Chain; } SDValue WebAssemblyTargetLowering::LowerFormalArguments( - SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/, const SmallVectorImpl &Ins, SDLoc DL, SelectionDAG &DAG, SmallVectorImpl &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); - if (CallConv != CallingConv::C) + if (!CallingConvSupported(CallConv)) fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); - if (IsVarArg) - fail(DL, DAG, "WebAssembly doesn't support varargs yet"); - if (MF.getFunction()->hasStructRetAttr()) - fail(DL, DAG, "WebAssembly doesn't support struct return yet"); - unsigned ArgNo = 0; + // Set up the incoming ARGUMENTS value, which serves to represent the liveness + // of the incoming values before they're represented by virtual registers. + MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS); + for (const ISD::InputArg &In : Ins) { - if (In.Flags.isZExt()) - fail(DL, DAG, "WebAssembly hasn't implemented zext arguments"); - if (In.Flags.isSExt()) - fail(DL, DAG, "WebAssembly hasn't implemented sext arguments"); - if (In.Flags.isInReg()) - fail(DL, DAG, "WebAssembly hasn't implemented inreg arguments"); - if (In.Flags.isSRet()) - fail(DL, DAG, "WebAssembly hasn't implemented sret arguments"); if (In.Flags.isByVal()) fail(DL, DAG, "WebAssembly hasn't implemented byval arguments"); if (In.Flags.isInAlloca()) fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); if (In.Flags.isNest()) fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); - if (In.Flags.isReturned()) - fail(DL, DAG, "WebAssembly hasn't implemented returned arguments"); if (In.Flags.isInConsecutiveRegs()) fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); if (In.Flags.isInConsecutiveRegsLast()) fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); - if (In.Flags.isSplit()) - fail(DL, DAG, "WebAssembly hasn't implemented split arguments"); - // FIXME Do something with In.getOrigAlign()? + // Ignore In.getOrigAlign() because all our arguments are passed in + // registers. InVals.push_back( In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT, - DAG.getTargetConstant(ArgNo, DL, MVT::i32)) - : DAG.getNode(ISD::UNDEF, DL, In.VT)); - ++ArgNo; + DAG.getTargetConstant(InVals.size(), DL, MVT::i32)) + : DAG.getUNDEF(In.VT)); + + // Record the number and types of arguments. + MF.getInfo()->addParam(In.VT); } + // Incoming varargs arguments are on the stack and will be accessed through + // va_arg, so we don't need to do anything for them here. + return Chain; } @@ -360,6 +512,14 @@ SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, return SDValue(); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); + case ISD::ExternalSymbol: + return LowerExternalSymbol(Op, DAG); + case ISD::JumpTable: + return LowerJumpTable(Op, DAG); + case ISD::BR_JT: + return LowerBR_JT(Op, DAG); + case ISD::VASTART: + return LowerVASTART(Op, DAG); } } @@ -377,12 +537,79 @@ SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op, DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT)); } +SDValue +WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + const auto *ES = cast(Op); + EVT VT = Op.getValueType(); + assert(ES->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"); + return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, + DAG.getTargetExternalSymbol(ES->getSymbol(), VT)); +} + +SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op, + SelectionDAG &DAG) const { + // There's no need for a Wrapper node because we always incorporate a jump + // table operand into a TABLESWITCH instruction, rather than ever + // materializing it in a register. + const JumpTableSDNode *JT = cast(Op); + return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(), + JT->getTargetFlags()); +} + +SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + SDValue Chain = Op.getOperand(0); + const auto *JT = cast(Op.getOperand(1)); + SDValue Index = Op.getOperand(2); + assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"); + + SmallVector Ops; + Ops.push_back(Chain); + Ops.push_back(Index); + + MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo(); + const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs; + + // TODO: For now, we just pick something arbitrary for a default case for now. + // We really want to sniff out the guard and put in the real default case (and + // delete the guard). + Ops.push_back(DAG.getBasicBlock(MBBs[0])); + + // Add an operand for each case. + for (auto MBB : MBBs) + Ops.push_back(DAG.getBasicBlock(MBB)); + + return DAG.getNode(WebAssemblyISD::TABLESWITCH, DL, MVT::Other, Ops); +} + +SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout()); + + // The incoming non-fixed arguments are placed on the top of the stack, with + // natural alignment, at the point of the call, so the base pointer is just + // the current frame pointer. + DAG.getMachineFunction().getFrameInfo()->setFrameAddressIsTaken(true); + unsigned FP = + static_cast(Subtarget->getRegisterInfo()) + ->getFrameRegister(DAG.getMachineFunction()); + SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FP, PtrVT); + const Value *SV = cast(Op.getOperand(2))->getValue(); + return DAG.getStore(Op.getOperand(0), DL, FrameAddr, Op.getOperand(1), + MachinePointerInfo(SV), false, false, 0); +} + //===----------------------------------------------------------------------===// // WebAssembly Optimization Hooks //===----------------------------------------------------------------------===// MCSection *WebAssemblyTargetObjectFile::SelectSectionForGlobal( - const GlobalValue *GV, SectionKind Kind, Mangler &Mang, - const TargetMachine &TM) const { - return getDataSection(); + const GlobalValue *GV, SectionKind /*Kind*/, Mangler & /*Mang*/, + const TargetMachine & /*TM*/) const { + // TODO: Be more sophisticated than this. + return isa(GV) ? getTextSection() : getDataSection(); }