ret <2 x double> %shuf
}
+define <2 x double> @hadd_v2f64_scalar_splat(<2 x double> %a) {
+; SSSE3_SLOW-LABEL: hadd_v2f64_scalar_splat:
+; SSSE3_SLOW: # %bb.0:
+; SSSE3_SLOW-NEXT: movapd %xmm0, %xmm1
+; SSSE3_SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3_SLOW-NEXT: addsd %xmm0, %xmm1
+; SSSE3_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
+; SSSE3_SLOW-NEXT: retq
+;
+; SSSE3_FAST-LABEL: hadd_v2f64_scalar_splat:
+; SSSE3_FAST: # %bb.0:
+; SSSE3_FAST-NEXT: haddpd %xmm0, %xmm0
+; SSSE3_FAST-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3_FAST-NEXT: retq
+;
+; AVX1_SLOW-LABEL: hadd_v2f64_scalar_splat:
+; AVX1_SLOW: # %bb.0:
+; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX1_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1_SLOW-NEXT: retq
+;
+; AVX1_FAST-LABEL: hadd_v2f64_scalar_splat:
+; AVX1_FAST: # %bb.0:
+; AVX1_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; AVX1_FAST-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1_FAST-NEXT: retq
+;
+; AVX2_SLOW-LABEL: hadd_v2f64_scalar_splat:
+; AVX2_SLOW: # %bb.0:
+; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX2_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX2_SLOW-NEXT: retq
+;
+; AVX2_FAST-LABEL: hadd_v2f64_scalar_splat:
+; AVX2_FAST: # %bb.0:
+; AVX2_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; AVX2_FAST-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX2_FAST-NEXT: retq
+ %a0 = extractelement <2 x double> %a, i32 0
+ %a1 = extractelement <2 x double> %a, i32 1
+ %hop = fadd double %a0, %a1
+ %ins = insertelement <2 x double> undef, double %hop, i32 0
+ %shuf = shufflevector <2 x double> %ins, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %shuf
+}
+
define <4 x double> @hadd_v4f64(<4 x double> %a) {
; SSSE3_SLOW-LABEL: hadd_v4f64:
; SSSE3_SLOW: # %bb.0: