X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FPPCInstrAltivec.td;h=545a4da505c7a3cc40db680fba7beef237d04378;hb=1ee29257428960fede862fcfdbe80d5d007927e9;hp=f31dfab1873e2a7722b75bda9f6d4e2ead7613ce;hpb=af9136bc0c7ce213271495801033e332b8d38bda;p=oota-llvm.git diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td index f31dfab1873..545a4da505c 100644 --- a/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/lib/Target/PowerPC/PPCInstrAltivec.td @@ -15,93 +15,210 @@ // Altivec transformation functions and pattern fragments. // -// VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm. -def VSPLT_get_imm : SDNodeXForm; +def VPKUWUM_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVPKUWUMShuffleMask(N, false); +}]>; + +def VPKUHUM_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVPKUHUMShuffleMask(N, true); +}]>; +def VPKUWUM_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVPKUWUMShuffleMask(N, true); +}]>; + + +def VMRGLB_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGLShuffleMask(N, 1, false); +}]>; +def VMRGLH_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGLShuffleMask(N, 2, false); +}]>; +def VMRGLW_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGLShuffleMask(N, 4, false); +}]>; +def VMRGHB_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGHShuffleMask(N, 1, false); +}]>; +def VMRGHH_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGHShuffleMask(N, 2, false); +}]>; +def VMRGHW_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGHShuffleMask(N, 4, false); }]>; -def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{ - return PPC::isSplatShuffleMask(N); -}], VSPLT_get_imm>; +def VMRGLB_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGLShuffleMask(N, 1, true); +}]>; +def VMRGLH_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGLShuffleMask(N, 2, true); +}]>; +def VMRGLW_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGLShuffleMask(N, 4, true); +}]>; +def VMRGHB_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGHShuffleMask(N, 1, true); +}]>; +def VMRGHH_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGHShuffleMask(N, 2, true); +}]>; +def VMRGHW_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVMRGHShuffleMask(N, 4, true); +}]>; -def vecimm0 : PatLeaf<(build_vector), [{ - return PPC::isZeroVector(N); + +def VSLDOI_get_imm : SDNodeXForm; +def VSLDOI_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVSLDOIShuffleMask(N, false) != -1; +}], VSLDOI_get_imm>; + +/// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into +/// vector_shuffle(X,undef,mask) by the dag combiner. +def VSLDOI_unary_get_imm : SDNodeXForm; +def VSLDOI_unary_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isVSLDOIShuffleMask(N, true) != -1; +}], VSLDOI_unary_get_imm>; + + +// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm. +def VSPLTB_get_imm : SDNodeXForm; +def VSPLTB_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isSplatShuffleMask(N, 1); +}], VSPLTB_get_imm>; +def VSPLTH_get_imm : SDNodeXForm; +def VSPLTH_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isSplatShuffleMask(N, 2); +}], VSPLTH_get_imm>; +def VSPLTW_get_imm : SDNodeXForm; +def VSPLTW_shuffle_mask : PatLeaf<(build_vector), [{ + return PPC::isSplatShuffleMask(N, 4); +}], VSPLTW_get_imm>; // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm. def VSPLTISB_get_imm : SDNodeXForm; def vecspltisb : PatLeaf<(build_vector), [{ - return PPC::isVecSplatImm(N, 1); + return PPC::get_VSPLTI_elt(N, 1, *CurDAG).Val != 0; }], VSPLTISB_get_imm>; // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm. def VSPLTISH_get_imm : SDNodeXForm; def vecspltish : PatLeaf<(build_vector), [{ - return PPC::isVecSplatImm(N, 2); + return PPC::get_VSPLTI_elt(N, 2, *CurDAG).Val != 0; }], VSPLTISH_get_imm>; // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm. def VSPLTISW_get_imm : SDNodeXForm; def vecspltisw : PatLeaf<(build_vector), [{ - return PPC::isVecSplatImm(N, 4); + return PPC::get_VSPLTI_elt(N, 4, *CurDAG).Val != 0; }], VSPLTISW_get_imm>; - +//===----------------------------------------------------------------------===// +// Helpers for defining instructions that directly correspond to intrinsics. + +// VA1a_Int - A VAForm_1a intrinsic definition. +class VA1a_Int xo, string opc, Intrinsic IntID> + : VAForm_1a; + +// VX1_Int - A VXForm_1 intrinsic definition. +class VX1_Int xo, string opc, Intrinsic IntID> + : VXForm_1; + +// VX2_Int - A VXForm_2 intrinsic definition. +class VX2_Int xo, string opc, Intrinsic IntID> + : VXForm_2; //===----------------------------------------------------------------------===// // Instruction Definitions. -def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC", - [(set VRRC:$rD, (v4f32 (undef)))]>; +def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; IMPLICIT_DEF_VRRC $rD", + [(set VRRC:$rD, (v4i32 (undef)))]>; + +let noResults = 1 in { +def DSS : DSS_Form<822, (ops u5imm:$A, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2), + "dss $STRM, $A", LdStGeneral /*FIXME*/, []>; +def DST : DSS_Form<342, (ops u5imm:$T, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + "dst $rA, $rB, $STRM, $T", LdStGeneral /*FIXME*/, []>; +def DSTST : DSS_Form<374, (ops u5imm:$T, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + "dstst $rA, $rB, $STRM, $T", LdStGeneral /*FIXME*/, []>; +} + +def MFVSCR : VXForm_4<1540, (ops VRRC:$vD), + "mfvcr $vD", LdStGeneral, + [(set VRRC:$vD, (int_ppc_altivec_mfvscr))]>; +def MTVSCR : VXForm_5<1604, (ops VRRC:$vB), + "mtvcr $vB", LdStGeneral, + [(int_ppc_altivec_mtvscr VRRC:$vB)]>; let isLoad = 1, PPC970_Unit = 2 in { // Loads. def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src), "lvebx $vD, $src", LdStGeneral, - [(set VRRC:$vD, (v16i8 (PPClve_x xoaddr:$src)))]>; -def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src), + [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>; +def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src), "lvehx $vD, $src", LdStGeneral, - [(set VRRC:$vD, (v8i16 (PPClve_x xoaddr:$src)))]>; -def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src), + [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>; +def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src), "lvewx $vD, $src", LdStGeneral, - [(set VRRC:$vD, (v4f32 (PPClve_x xoaddr:$src)))]>; -def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src), + [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>; +def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src), "lvx $vD, $src", LdStGeneral, - [(set VRRC:$vD, (v4f32 (load xoaddr:$src)))]>; + [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>; +def LVXL : XForm_1<31, 359, (ops VRRC:$vD, memrr:$src), + "lvxl $vD, $src", LdStGeneral, + [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>; } -def LVSL : XForm_1<31, 6, (ops VRRC:$vD, GPRC:$base, GPRC:$rA), - "lvsl $vD, $base, $rA", LdStGeneral, - []>, PPC970_Unit_LSU; -def LVSR : XForm_1<31, 38, (ops VRRC:$vD, GPRC:$base, GPRC:$rA), - "lvsl $vD, $base, $rA", LdStGeneral, - []>, PPC970_Unit_LSU; +def LVSL : XForm_1<31, 6, (ops VRRC:$vD, memrr:$src), + "lvsl $vD, $src", LdStGeneral, + [(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>, + PPC970_Unit_LSU; +def LVSR : XForm_1<31, 38, (ops VRRC:$vD, memrr:$src), + "lvsr $vD, $src", LdStGeneral, + [(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>, + PPC970_Unit_LSU; let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores. -def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB), - "stvebx $rS, $rA, $rB", LdStGeneral, - []>; -def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB), - "stvehx $rS, $rA, $rB", LdStGeneral, - []>; -def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB), - "stvewx $rS, $rA, $rB", LdStGeneral, - []>; +def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, memrr:$dst), + "stvebx $rS, $dst", LdStGeneral, + [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>; +def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, memrr:$dst), + "stvehx $rS, $dst", LdStGeneral, + [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>; +def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, memrr:$dst), + "stvewx $rS, $dst", LdStGeneral, + [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>; def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst), "stvx $rS, $dst", LdStGeneral, - [(store (v4f32 VRRC:$rS), xoaddr:$dst)]>; + [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>; +def STVXL : XForm_8<31, 487, (ops VRRC:$rS, memrr:$dst), + "stvxl $rS, $dst", LdStGeneral, + [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>; } let PPC970_Unit = 5 in { // VALU Operations. @@ -117,47 +234,43 @@ def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB), VRRC:$vB)))]>, Requires<[FPContractions]>; -def VPERM : VAForm_1<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB), - "vperm $vD, $vA, $vB, $vC", VecPerm, - [(set VRRC:$vD, - (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>; +def VMHADDSHS : VA1a_Int<32, "vmhaddshs", int_ppc_altivec_vmhaddshs>; +def VMHRADDSHS : VA1a_Int<33, "vmhraddshs", int_ppc_altivec_vmhraddshs>; +def VMLADDUHM : VA1a_Int<34, "vmladduhm", int_ppc_altivec_vmladduhm>; +def VPERM : VA1a_Int<43, "vperm", int_ppc_altivec_vperm>; +def VSEL : VA1a_Int<42, "vsel", int_ppc_altivec_vsel>; +// Shuffles. +def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH), + "vsldoi $vD, $vA, $vB, $SH", VecFP, + [(set VRRC:$vD, + (vector_shuffle (v16i8 VRRC:$vA), VRRC:$vB, + VSLDOI_shuffle_mask:$SH))]>; // VX-Form instructions. AltiVec arithmetic ops. -def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddcuw $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>; def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), "vaddfp $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>; -def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddsbs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>; -def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddshs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>; -def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddsws $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>; -def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddubs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>; -def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vadduhs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>; + +def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vaddubm $vD, $vA, $vB", VecGeneral, + [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>; +def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vadduhm $vD, $vA, $vB", VecGeneral, + [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>; def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), "vadduwm $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>; -def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vadduws $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>; + +def VADDCUW : VX1_Int<384, "vaddcuw", int_ppc_altivec_vaddcuw>; +def VADDSBS : VX1_Int<768, "vaddsbs", int_ppc_altivec_vaddsbs>; +def VADDSHS : VX1_Int<832, "vaddshs", int_ppc_altivec_vaddshs>; +def VADDSWS : VX1_Int<896, "vaddsws", int_ppc_altivec_vaddsws>; +def VADDUBS : VX1_Int<512, "vaddubs", int_ppc_altivec_vaddubs>; +def VADDUHS : VX1_Int<576, "vadduhs", int_ppc_altivec_vadduhs>; +def VADDUWS : VX1_Int<640, "vadduws", int_ppc_altivec_vadduws>; + + def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), "vand $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>; @@ -175,43 +288,112 @@ def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>; def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), "vctsxs $vD, $vB, $UIMM", VecFP, - []>; + [(set VRRC:$vD, + (int_ppc_altivec_vctsxs VRRC:$vB, imm:$UIMM))]>; def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), "vctuxs $vD, $vB, $UIMM", VecFP, - []>; -def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB), - "vexptefp $vD, $vB", VecFP, - []>; -def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB), - "vlogefp $vD, $vB", VecFP, - []>; -def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vmaxfp $vD, $vA, $vB", VecFP, - []>; -def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vminfp $vD, $vA, $vB", VecFP, - []>; -def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB), - "vrefp $vD, $vB", VecFP, - []>; -def VRFIM : VXForm_2<714, (ops VRRC:$vD, VRRC:$vB), - "vrfim $vD, $vB", VecFP, - []>; -def VRFIN : VXForm_2<522, (ops VRRC:$vD, VRRC:$vB), - "vrfin $vD, $vB", VecFP, - []>; -def VRFIP : VXForm_2<650, (ops VRRC:$vD, VRRC:$vB), - "vrfip $vD, $vB", VecFP, - []>; -def VRFIZ : VXForm_2<586, (ops VRRC:$vD, VRRC:$vB), - "vrfiz $vD, $vB", VecFP, - []>; -def VRSQRTEFP : VXForm_2<330, (ops VRRC:$vD, VRRC:$vB), - "vrsqrtefp $vD, $vB", VecFP, - [(set VRRC:$vD,(int_ppc_altivec_vrsqrtefp VRRC:$vB))]>; -def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vsubfp $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, + (int_ppc_altivec_vctuxs VRRC:$vB, imm:$UIMM))]>; +def VEXPTEFP : VX2_Int<394, "vexptefp", int_ppc_altivec_vexptefp>; +def VLOGEFP : VX2_Int<458, "vlogefp", int_ppc_altivec_vlogefp>; + +def VAVGSB : VX1_Int<1282, "vavgsb", int_ppc_altivec_vavgsb>; +def VAVGSH : VX1_Int<1346, "vavgsh", int_ppc_altivec_vavgsh>; +def VAVGSW : VX1_Int<1410, "vavgsw", int_ppc_altivec_vavgsw>; +def VAVGUB : VX1_Int<1026, "vavgub", int_ppc_altivec_vavgub>; +def VAVGUH : VX1_Int<1090, "vavguh", int_ppc_altivec_vavguh>; +def VAVGUW : VX1_Int<1154, "vavguw", int_ppc_altivec_vavguw>; + +def VMAXFP : VX1_Int<1034, "vmaxfp", int_ppc_altivec_vmaxfp>; +def VMAXSB : VX1_Int< 258, "vmaxsb", int_ppc_altivec_vmaxsb>; +def VMAXSH : VX1_Int< 322, "vmaxsh", int_ppc_altivec_vmaxsh>; +def VMAXSW : VX1_Int< 386, "vmaxsw", int_ppc_altivec_vmaxsw>; +def VMAXUB : VX1_Int< 2, "vmaxub", int_ppc_altivec_vmaxub>; +def VMAXUH : VX1_Int< 66, "vmaxuh", int_ppc_altivec_vmaxuh>; +def VMAXUW : VX1_Int< 130, "vmaxuw", int_ppc_altivec_vmaxuw>; +def VMINFP : VX1_Int<1098, "vminfp", int_ppc_altivec_vminfp>; +def VMINSB : VX1_Int< 770, "vminsb", int_ppc_altivec_vminsb>; +def VMINSH : VX1_Int< 834, "vminsh", int_ppc_altivec_vminsh>; +def VMINSW : VX1_Int< 896, "vminsw", int_ppc_altivec_vminsw>; +def VMINUB : VX1_Int< 514, "vminub", int_ppc_altivec_vminub>; +def VMINUH : VX1_Int< 578, "vminuh", int_ppc_altivec_vminuh>; +def VMINUW : VX1_Int< 642, "vminuw", int_ppc_altivec_vminuw>; + +def VMRGHB : VXForm_1< 12, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vmrghb $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VMRGHB_shuffle_mask))]>; +def VMRGHH : VXForm_1< 76, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vmrghh $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VMRGHH_shuffle_mask))]>; +def VMRGHW : VXForm_1<140, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vmrghw $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VMRGHW_shuffle_mask))]>; +def VMRGLB : VXForm_1<268, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vmrglb $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VMRGLB_shuffle_mask))]>; +def VMRGLH : VXForm_1<332, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vmrglh $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VMRGLH_shuffle_mask))]>; +def VMRGLW : VXForm_1<396, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vmrglw $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VMRGLW_shuffle_mask))]>; + +def VMSUMMBM : VA1a_Int<37, "vmsummbm", int_ppc_altivec_vmsummbm>; +def VMSUMSHM : VA1a_Int<40, "vmsumshm", int_ppc_altivec_vmsumshm>; +def VMSUMSHS : VA1a_Int<41, "vmsumshs", int_ppc_altivec_vmsumshs>; +def VMSUMUBM : VA1a_Int<36, "vmsumubm", int_ppc_altivec_vmsumubm>; +def VMSUMUHM : VA1a_Int<38, "vmsumuhm", int_ppc_altivec_vmsumuhm>; +def VMSUMUHS : VA1a_Int<39, "vmsumuhs", int_ppc_altivec_vmsumuhs>; + +def VMULESB : VX1_Int<776, "vmulesb", int_ppc_altivec_vmulesb>; +def VMULESH : VX1_Int<840, "vmulesh", int_ppc_altivec_vmulesh>; +def VMULEUB : VX1_Int<520, "vmuleub", int_ppc_altivec_vmuleub>; +def VMULEUH : VX1_Int<584, "vmuleuh", int_ppc_altivec_vmuleuh>; +def VMULOSB : VX1_Int<264, "vmulosb", int_ppc_altivec_vmulosb>; +def VMULOSH : VX1_Int<328, "vmulosh", int_ppc_altivec_vmulosh>; +def VMULOUB : VX1_Int< 8, "vmuloub", int_ppc_altivec_vmuloub>; +def VMULOUH : VX1_Int< 72, "vmulouh", int_ppc_altivec_vmulouh>; + +def VREFP : VX2_Int<266, "vrefp", int_ppc_altivec_vrefp>; +def VRFIM : VX2_Int<714, "vrfim", int_ppc_altivec_vrfim>; +def VRFIN : VX2_Int<522, "vrfin", int_ppc_altivec_vrfin>; +def VRFIP : VX2_Int<650, "vrfip", int_ppc_altivec_vrfip>; +def VRFIZ : VX2_Int<586, "vrfiz", int_ppc_altivec_vrfiz>; +def VRSQRTEFP : VX2_Int<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>; + +def VSUBCUW : VX1_Int<74, "vsubcuw", int_ppc_altivec_vsubcuw>; + +def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vsubfp $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>; +def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vsububm $vD, $vA, $vB", VecGeneral, + [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>; +def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vsubuhm $vD, $vA, $vB", VecGeneral, + [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>; +def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vsubuwm $vD, $vA, $vB", VecGeneral, + [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>; + +def VSUBSBS : VX1_Int<1792, "vsubsbs" , int_ppc_altivec_vsubsbs>; +def VSUBSHS : VX1_Int<1856, "vsubshs" , int_ppc_altivec_vsubshs>; +def VSUBSWS : VX1_Int<1920, "vsubsws" , int_ppc_altivec_vsubsws>; +def VSUBUBS : VX1_Int<1536, "vsububs" , int_ppc_altivec_vsububs>; +def VSUBUHS : VX1_Int<1600, "vsubuhs" , int_ppc_altivec_vsubuhs>; +def VSUBUWS : VX1_Int<1664, "vsubuws" , int_ppc_altivec_vsubuws>; +def VSUMSWS : VX1_Int<1928, "vsumsws" , int_ppc_altivec_vsumsws>; +def VSUM2SWS: VX1_Int<1672, "vsum2sws", int_ppc_altivec_vsum2sws>; +def VSUM4SBS: VX1_Int<1672, "vsum4sbs", int_ppc_altivec_vsum4sbs>; +def VSUM4SHS: VX1_Int<1608, "vsum4shs", int_ppc_altivec_vsum4shs>; +def VSUM4UBS: VX1_Int<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs>; + def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), "vnor $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>; @@ -222,57 +404,151 @@ def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), "vxor $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>; +def VRLB : VX1_Int< 4, "vrlb", int_ppc_altivec_vrlb>; +def VRLH : VX1_Int< 68, "vrlh", int_ppc_altivec_vrlh>; +def VRLW : VX1_Int< 132, "vrlw", int_ppc_altivec_vrlw>; + +def VSL : VX1_Int< 452, "vsl" , int_ppc_altivec_vsl >; +def VSLO : VX1_Int<1036, "vslo", int_ppc_altivec_vslo>; +def VSLB : VX1_Int< 260, "vslb", int_ppc_altivec_vslb>; +def VSLH : VX1_Int< 324, "vslh", int_ppc_altivec_vslh>; +def VSLW : VX1_Int< 388, "vslw", int_ppc_altivec_vslw>; + def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), "vspltb $vD, $vB, $UIMM", VecPerm, - []>; + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef), + VSPLTB_shuffle_mask:$UIMM))]>; def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), "vsplth $vD, $vB, $UIMM", VecPerm, - []>; + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef), + VSPLTH_shuffle_mask:$UIMM))]>; def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), "vspltw $vD, $vB, $UIMM", VecPerm, - [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef), - VSPLT_shuffle_mask:$UIMM))]>; - -def VSPLTISB : VXForm_1<780, (ops VRRC:$vD, s5imm:$SIMM), - "vspltisb $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>; -def VSPLTISH : VXForm_1<844, (ops VRRC:$vD, s5imm:$SIMM), - "vspltish $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>; -def VSPLTISW : VXForm_1<908, (ops VRRC:$vD, s5imm:$SIMM), - "vspltisw $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>; + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef), + VSPLTW_shuffle_mask:$UIMM))]>; + +def VSR : VX1_Int< 708, "vsr" , int_ppc_altivec_vsr>; +def VSRO : VX1_Int<1100, "vsro" , int_ppc_altivec_vsro>; +def VSRAB : VX1_Int< 772, "vsrab", int_ppc_altivec_vsrab>; +def VSRAH : VX1_Int< 836, "vsrah", int_ppc_altivec_vsrah>; +def VSRAW : VX1_Int< 900, "vsraw", int_ppc_altivec_vsraw>; +def VSRB : VX1_Int< 516, "vsrb" , int_ppc_altivec_vsrb>; +def VSRH : VX1_Int< 580, "vsrh" , int_ppc_altivec_vsrh>; +def VSRW : VX1_Int< 644, "vsrw" , int_ppc_altivec_vsrw>; + + +def VSPLTISB : VXForm_3<780, (ops VRRC:$vD, s5imm:$SIMM), + "vspltisb $vD, $SIMM", VecPerm, + [(set VRRC:$vD, (v16i8 vecspltisb:$SIMM))]>; +def VSPLTISH : VXForm_3<844, (ops VRRC:$vD, s5imm:$SIMM), + "vspltish $vD, $SIMM", VecPerm, + [(set VRRC:$vD, (v8i16 vecspltish:$SIMM))]>; +def VSPLTISW : VXForm_3<908, (ops VRRC:$vD, s5imm:$SIMM), + "vspltisw $vD, $SIMM", VecPerm, + [(set VRRC:$vD, (v4i32 vecspltisw:$SIMM))]>; + +// Vector Pack. +def VPKPX : VX1_Int<782, "vpkpx", int_ppc_altivec_vpkpx>; +def VPKSHSS : VX1_Int<398, "vpkshss", int_ppc_altivec_vpkshss>; +def VPKSHUS : VX1_Int<270, "vpkshus", int_ppc_altivec_vpkshus>; +def VPKSWSS : VX1_Int<462, "vpkswss", int_ppc_altivec_vpkswss>; +def VPKSWUS : VX1_Int<334, "vpkswus", int_ppc_altivec_vpkswus>; +def VPKUHUM : VXForm_1<14, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vpkuhum $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VPKUHUM_shuffle_mask))]>; +def VPKUHUS : VX1_Int<142, "vpkuhus", int_ppc_altivec_vpkuhus>; +def VPKUWUM : VXForm_1<78, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + "vpkuwum $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA), + VRRC:$vB, VPKUWUM_shuffle_mask))]>; +def VPKUWUS : VX1_Int<206, "vpkuwus", int_ppc_altivec_vpkuwus>; + +// Vector Unpack. +def VUPKHPX : VX2_Int<846, "vupkhpx", int_ppc_altivec_vupkhpx>; +def VUPKHSB : VX2_Int<526, "vupkhsb", int_ppc_altivec_vupkhsb>; +def VUPKHSH : VX2_Int<590, "vupkhsh", int_ppc_altivec_vupkhsh>; +def VUPKLPX : VX2_Int<974, "vupklpx", int_ppc_altivec_vupklpx>; +def VUPKLSB : VX2_Int<654, "vupklsb", int_ppc_altivec_vupklsb>; +def VUPKLSH : VX2_Int<718, "vupklsh", int_ppc_altivec_vupklsh>; + + +// Altivec Comparisons. + +class VCMP xo, string asmstr, ValueType Ty> + : VXRForm_1; +class VCMPo xo, string asmstr, ValueType Ty> + : VXRForm_1 { + let Defs = [CR6]; + let RC = 1; +} +// f32 element comparisons.0 +def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>; +def VCMPBFPo : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>; +def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>; +def VCMPEQFPo : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>; +def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>; +def VCMPGEFPo : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>; +def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>; +def VCMPGTFPo : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>; + +// i8 element comparisons. +def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>; +def VCMPEQUBo : VCMPo< 6, "vcmpequb. $vD, $vA, $vB", v16i8>; +def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>; +def VCMPGTSBo : VCMPo<774, "vcmpgtsb. $vD, $vA, $vB", v16i8>; +def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>; +def VCMPGTUBo : VCMPo<518, "vcmpgtub. $vD, $vA, $vB", v16i8>; + +// i16 element comparisons. +def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>; +def VCMPEQUHo : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>; +def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>; +def VCMPGTSHo : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>; +def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>; +def VCMPGTUHo : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>; + +// i32 element comparisons. +def VCMPEQUW : VCMP <134, "vcmpequw $vD, $vA, $vB" , v4i32>; +def VCMPEQUWo : VCMPo<134, "vcmpequw. $vD, $vA, $vB", v4i32>; +def VCMPGTSW : VCMP <902, "vcmpgtsw $vD, $vA, $vB" , v4i32>; +def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>; +def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>; +def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>; -// VX-Form Pseudo Instructions - def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD), "vxor $vD, $vD, $vD", VecFP, - [(set VRRC:$vD, (v4f32 vecimm0))]>; + [(set VRRC:$vD, (v4i32 immAllZerosV))]>; } //===----------------------------------------------------------------------===// // Additional Altivec Patterns // -// Undef/Zero. -def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>; -def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>; -def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>; -def : Pat<(v16i8 vecimm0), (v16i8 (V_SET0))>; -def : Pat<(v8i16 vecimm0), (v8i16 (V_SET0))>; -def : Pat<(v4i32 vecimm0), (v4i32 (V_SET0))>; +// DS* intrinsics. +def : Pat<(int_ppc_altivec_dss imm:$STRM), (DSS 0, imm:$STRM, 0, 0)>; +def : Pat<(int_ppc_altivec_dssall), (DSS 1, 0, 0, 0)>; +def : Pat<(int_ppc_altivec_dst GPRC:$rA, GPRC:$rB, imm:$STRM), + (DST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dstt GPRC:$rA, GPRC:$rB, imm:$STRM), + (DST 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dstst GPRC:$rA, GPRC:$rB, imm:$STRM), + (DSTST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dststt GPRC:$rA, GPRC:$rB, imm:$STRM), + (DSTST 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>; + +// Undef. +def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VRRC)>; +def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VRRC)>; +def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VRRC)>; // Loads. -def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>; -def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>; -def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>; +def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>; // Stores. -def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst), - (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>; -def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst), - (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>; def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst), (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>; @@ -293,35 +569,41 @@ def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>; def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>; def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>; -// Immediate vector formation with vsplti*. -def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>; -def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>; -def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>; - -def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>; -def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>; -def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>; - -def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>; -def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>; -def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>; +// Shuffles. + +// Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x) +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VSLDOI_unary_shuffle_mask:$in), + (VSLDOI VRRC:$vA, VRRC:$vA, VSLDOI_unary_shuffle_mask:$in)>; +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef,VPKUWUM_unary_shuffle_mask:$in), + (VPKUWUM VRRC:$vA, VRRC:$vA)>; +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef,VPKUHUM_unary_shuffle_mask:$in), + (VPKUHUM VRRC:$vA, VRRC:$vA)>; + +// Match vmrg*(x,x) +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLB_unary_shuffle_mask:$in), + (VMRGLB VRRC:$vA, VRRC:$vA)>; +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLH_unary_shuffle_mask:$in), + (VMRGLH VRRC:$vA, VRRC:$vA)>; +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLW_unary_shuffle_mask:$in), + (VMRGLW VRRC:$vA, VRRC:$vA)>; +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHB_unary_shuffle_mask:$in), + (VMRGHB VRRC:$vA, VRRC:$vA)>; +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHH_unary_shuffle_mask:$in), + (VMRGHH VRRC:$vA, VRRC:$vA)>; +def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHW_unary_shuffle_mask:$in), + (VMRGHW VRRC:$vA, VRRC:$vA)>; // Logical Operations -def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))), - (v16i8 (VANDC VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))), - (v8i16 (VANDC VRRC:$A, VRRC:$B))>; +def : Pat<(v4i32 (vnot VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>; +def : Pat<(v4i32 (vnot_conv VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>; + +def : Pat<(v4i32 (vnot_conv (or VRRC:$A, VRRC:$B))), + (VNOR VRRC:$A, VRRC:$B)>; +def : Pat<(v4i32 (and VRRC:$A, (vnot_conv VRRC:$B))), + (VANDC VRRC:$A, VRRC:$B)>; def : Pat<(fmul VRRC:$vA, VRRC:$vB), - (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>; + (VMADDFP VRRC:$vA, VRRC:$vB, (v4i32 (V_SET0)))>; // Fused multiply add and multiply sub for packed float. These are represented // separately from the real instructions above, for operations that must have @@ -336,13 +618,5 @@ def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C), def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C), (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>; -def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM), - (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>; - -def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC), - (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>; - -def : Pat<(v4i32 (PPClve_x xoaddr:$src)), - (v4i32 (LVEWX xoaddr:$src))>; - - +def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC), + (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC)>;