; SSE2: # BB#0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
-;\r
-; SSE3-LABEL: shuffle_v2f64_00:\r
-; SSE3: # BB#0:\r
-; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; SSE3-NEXT: retq\r
-;\r
-; SSSE3-LABEL: shuffle_v2f64_00:\r
-; SSSE3: # BB#0:\r
-; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; SSSE3-NEXT: retq\r
-;\r
-; SSE41-LABEL: shuffle_v2f64_00:\r
-; SSE41: # BB#0:\r
-; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; SSE41-NEXT: retq\r
-;\r
-; AVX-LABEL: shuffle_v2f64_00:\r
-; AVX: # BB#0:\r
-; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; AVX-NEXT: retq\r
- %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>\r
- ret <2 x double> %shuffle\r
+;
+; SSE3-LABEL: shuffle_v2f64_00:
+; SSE3: # BB#0:
+; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v2f64_00:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v2f64_00:
+; SSE41: # BB#0:
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v2f64_00:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %shuffle
}
define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: shuffle_v2f64_10:
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
-;\r
-; SSE3-LABEL: shuffle_v2f64_22:\r
-; SSE3: # BB#0:\r
-; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]\r
-; SSE3-NEXT: retq\r
-;\r
-; SSSE3-LABEL: shuffle_v2f64_22:\r
-; SSSE3: # BB#0:\r
-; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]\r
-; SSSE3-NEXT: retq\r
-;\r
-; SSE41-LABEL: shuffle_v2f64_22:\r
-; SSE41: # BB#0:\r
-; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]\r
-; SSE41-NEXT: retq\r
-;\r
-; AVX-LABEL: shuffle_v2f64_22:\r
-; AVX: # BB#0:\r
-; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]\r
-; AVX-NEXT: retq\r
- %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 2>\r
- ret <2 x double> %shuffle\r
+;
+; SSE3-LABEL: shuffle_v2f64_22:
+; SSE3: # BB#0:
+; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v2f64_22:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v2f64_22:
+; SSE41: # BB#0:
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v2f64_22:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 2>
+ ret <2 x double> %shuffle
}
define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: shuffle_v2f64_32:
; SSE2: # BB#0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
-;\r
-; SSE3-LABEL: insert_dup_reg_v2f64:\r
-; SSE3: # BB#0:\r
-; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; SSE3-NEXT: retq\r
-;\r
-; SSSE3-LABEL: insert_dup_reg_v2f64:\r
-; SSSE3: # BB#0:\r
-; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; SSSE3-NEXT: retq\r
-;\r
-; SSE41-LABEL: insert_dup_reg_v2f64:\r
-; SSE41: # BB#0:\r
-; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; SSE41-NEXT: retq\r
-;\r
-; AVX-LABEL: insert_dup_reg_v2f64:\r
-; AVX: # BB#0:\r
-; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]\r
-; AVX-NEXT: retq\r
- %v = insertelement <2 x double> undef, double %a, i32 0\r
- %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>\r
+;
+; SSE3-LABEL: insert_dup_reg_v2f64:
+; SSE3: # BB#0:
+; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: insert_dup_reg_v2f64:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: insert_dup_reg_v2f64:
+; SSE41: # BB#0:
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_dup_reg_v2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: retq
+ %v = insertelement <2 x double> undef, double %a, i32 0
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>
ret <2 x double> %shuffle
}
define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {