(VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
(VMOVAPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
+ (VMOVAPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
+ (VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v4i64 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v8i32 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(store (v16i16 VR256:$src), addr:$dst),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(store (v32i8 VR256:$src), addr:$dst),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
}
//===----------------------------------------------------------------------===//
ret <4 x double> %i0
}
+; CHECK: vmovaps %ymm
+define void @storev16i16(<16 x i16> %a) nounwind {
+ store <16 x i16> %a, <16 x i16>* undef, align 32
+ unreachable
+}
+
+; CHECK: vmovups %ymm
+define void @storev16i16_01(<16 x i16> %a) nounwind {
+ store <16 x i16> %a, <16 x i16>* undef, align 4
+ unreachable
+}
+
+; CHECK: vmovaps %ymm
+define void @storev32i8(<32 x i8> %a) nounwind {
+ store <32 x i8> %a, <32 x i8>* undef, align 32
+ unreachable
+}
+
+; CHECK: vmovups %ymm
+define void @storev32i8_01(<32 x i8> %a) nounwind {
+ store <32 x i8> %a, <32 x i8>* undef, align 4
+ unreachable
+}
+