; 128-bit vectors, float/double, fadd/fsub
-define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32:
+define float @extract_extract01_v4f32_fadd_f32(<4 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32:
+; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32:
+; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32:
+; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
ret float %x01
}
-define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute:
+define float @extract_extract23_v4f32_fadd_f32(<4 x float> %x) {
+; SSE3-LABEL: extract_extract23_v4f32_fadd_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4f32_fadd_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 2
+ %x1 = extractelement <4 x float> %x, i32 3
+ %x01 = fadd float %x0, %x1
+ ret float %x01
+}
+
+define float @extract_extract01_v4f32_fadd_f32_commute(<4 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
ret float %x01
}
-define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64:
+define float @extract_extract23_v4f32_fadd_f32_commute(<4 x float> %x) {
+; SSE3-LABEL: extract_extract23_v4f32_fadd_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4f32_fadd_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 2
+ %x1 = extractelement <4 x float> %x, i32 3
+ %x01 = fadd float %x1, %x0
+ ret float %x01
+}
+
+define double @extract_extract01_v2f64_fadd_f64(<2 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v2f64_fadd_f64:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64:
+; SSE3-FAST-LABEL: extract_extract01_v2f64_fadd_f64:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX-SLOW-LABEL: extract_extract01_v2f64_fadd_f64:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX-FAST-LABEL: extract_extract01_v2f64_fadd_f64:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
ret double %x01
}
-define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute:
+define double @extract_extract01_v2f64_fadd_f64_commute(<2 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v2f64_fadd_f64_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSE3-FAST-LABEL: extract_extract01_v2f64_fadd_f64_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX-SLOW-LABEL: extract_extract01_v2f64_fadd_f64_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX-FAST-LABEL: extract_extract01_v2f64_fadd_f64_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
ret double %x01
}
-define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4f32_fsub_f32:
+define float @extract_extract01_v4f32_fsub_f32(<4 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f32_fsub_f32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: subss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f32_fsub_f32:
+; SSE3-FAST-LABEL: extract_extract01_v4f32_fsub_f32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX-SLOW-LABEL: extract_extract01_v4f32_fsub_f32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX-FAST-LABEL: extract_extract01_v4f32_fsub_f32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
ret float %x01
}
-define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
-; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
+define float @extract_extract23_v4f32_fsub_f32(<4 x float> %x) {
+; SSE3-LABEL: extract_extract23_v4f32_fsub_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: subss %xmm0, %xmm1
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4f32_fsub_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 2
+ %x1 = extractelement <4 x float> %x, i32 3
+ %x01 = fsub float %x0, %x1
+ ret float %x01
+}
+
+define float @extract_extract01_v4f32_fsub_f32_commute(<4 x float> %x) {
+; SSE3-LABEL: extract_extract01_v4f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; AVX-LABEL: extract_extract01_v4f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
ret float %x01
}
-define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v2f64_fsub_f64:
+define float @extract_extract23_v4f32_fsub_f32_commute(<4 x float> %x) {
+; SSE3-LABEL: extract_extract23_v4f32_fsub_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: subss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4f32_fsub_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 2
+ %x1 = extractelement <4 x float> %x, i32 3
+ %x01 = fsub float %x1, %x0
+ ret float %x01
+}
+
+define double @extract_extract01_v2f64_fsub_f64(<2 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v2f64_fsub_f64:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v2f64_fsub_f64:
+; SSE3-FAST-LABEL: extract_extract01_v2f64_fsub_f64:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX-SLOW-LABEL: extract_extract01_v2f64_fsub_f64:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX-FAST-LABEL: extract_extract01_v2f64_fsub_f64:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
ret double %x01
}
-define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
+define double @extract_extract01_v2f64_fsub_f64_commute(<2 x double> %x) {
+; SSE3-LABEL: extract_extract01_v2f64_fsub_f64_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; AVX-LABEL: extract_extract01_v2f64_fsub_f64_commute:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; 256-bit vectors, float/double, fadd/fsub
-define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32:
+define float @extract_extract01_v8f32_fadd_f32(<8 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8f32_fadd_f32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32:
+; SSE3-FAST-LABEL: extract_extract01_v8f32_fadd_f32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32:
+; AVX-SLOW-LABEL: extract_extract01_v8f32_fadd_f32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32:
+; AVX-FAST-LABEL: extract_extract01_v8f32_fadd_f32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret float %x01
}
-define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute:
+define float @extract_extract23_v8f32_fadd_f32(<8 x float> %x) {
+; SSE3-LABEL: extract_extract23_v8f32_fadd_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8f32_fadd_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 2
+ %x1 = extractelement <8 x float> %x, i32 3
+ %x01 = fadd float %x0, %x1
+ ret float %x01
+}
+
+define float @extract_extract01_v8f32_fadd_f32_commute(<8 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8f32_fadd_f32_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; SSE3-FAST-LABEL: extract_extract01_v8f32_fadd_f32_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; AVX-SLOW-LABEL: extract_extract01_v8f32_fadd_f32_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; AVX-FAST-LABEL: extract_extract01_v8f32_fadd_f32_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret float %x01
}
-define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64:
+define float @extract_extract23_v8f32_fadd_f32_commute(<8 x float> %x) {
+; SSE3-LABEL: extract_extract23_v8f32_fadd_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8f32_fadd_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 2
+ %x1 = extractelement <8 x float> %x, i32 3
+ %x01 = fadd float %x1, %x0
+ ret float %x01
+}
+
+define double @extract_extract01_v4f64_fadd_f64(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f64_fadd_f64:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64:
+; SSE3-FAST-LABEL: extract_extract01_v4f64_fadd_f64:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX-SLOW-LABEL: extract_extract01_v4f64_fadd_f64:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX-FAST-LABEL: extract_extract01_v4f64_fadd_f64:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret double %x01
}
-define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute:
+define double @extract_extract01_v4f64_fadd_f64_commute(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f64_fadd_f64_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; SSE3-FAST-LABEL: extract_extract01_v4f64_fadd_f64_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX-SLOW-LABEL: extract_extract01_v4f64_fadd_f64_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX-FAST-LABEL: extract_extract01_v4f64_fadd_f64_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret double %x01
}
-define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8f32_fsub_f32:
+define float @extract_extract01_v8f32_fsub_f32(<8 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8f32_fsub_f32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: subss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8f32_fsub_f32:
+; SSE3-FAST-LABEL: extract_extract01_v8f32_fsub_f32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8f32_fsub_f32:
+; AVX-SLOW-LABEL: extract_extract01_v8f32_fsub_f32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8f32_fsub_f32:
+; AVX-FAST-LABEL: extract_extract01_v8f32_fsub_f32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret float %x01
}
+define float @extract_extract23_v8f32_fsub_f32(<8 x float> %x) {
+; SSE3-LABEL: extract_extract23_v8f32_fsub_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: subss %xmm0, %xmm1
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8f32_fsub_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 2
+ %x1 = extractelement <8 x float> %x, i32 3
+ %x01 = fsub float %x0, %x1
+ ret float %x01
+}
+
; Negative test...or get hoppy and negate?
-define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) {
-; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
+define float @extract_extract01_v8f32_fsub_f32_commute(<8 x float> %x) {
+; SSE3-LABEL: extract_extract01_v8f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v8f32_fsub_f32_commute:
+; AVX-LABEL: extract_extract01_v8f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
ret float %x01
}
-define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4f64_fsub_f64:
+define double @extract_extract01_v4f64_fsub_f64(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f64_fsub_f64:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f64_fsub_f64:
+; SSE3-FAST-LABEL: extract_extract01_v4f64_fsub_f64:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX-SLOW-LABEL: extract_extract01_v4f64_fsub_f64:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX-FAST-LABEL: extract_extract01_v4f64_fsub_f64:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
; Negative test...or get hoppy and negate?
-define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
+define double @extract_extract01_v4f64_fsub_f64_commute(<4 x double> %x) {
+; SSE3-LABEL: extract_extract01_v4f64_fsub_f64_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v4f64_fsub_f64_commute:
+; AVX-LABEL: extract_extract01_v4f64_fsub_f64_commute:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; 512-bit vectors, float/double, fadd/fsub
-define float @extract_extract_v16f32_fadd_f32(<16 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32:
+define float @extract_extract01_v16f32_fadd_f32(<16 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16f32_fadd_f32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32:
+; SSE3-FAST-LABEL: extract_extract01_v16f32_fadd_f32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32:
+; AVX-SLOW-LABEL: extract_extract01_v16f32_fadd_f32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32:
+; AVX-FAST-LABEL: extract_extract01_v16f32_fadd_f32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret float %x01
}
-define float @extract_extract_v16f32_fadd_f32_commute(<16 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute:
+define float @extract_extract01_v16f32_fadd_f32_commute(<16 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16f32_fadd_f32_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; SSE3-FAST-LABEL: extract_extract01_v16f32_fadd_f32_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; AVX-SLOW-LABEL: extract_extract01_v16f32_fadd_f32_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; AVX-FAST-LABEL: extract_extract01_v16f32_fadd_f32_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret float %x01
}
-define double @extract_extract_v8f64_fadd_f64(<8 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64:
+define double @extract_extract01_v8f64_fadd_f64(<8 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8f64_fadd_f64:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64:
+; SSE3-FAST-LABEL: extract_extract01_v8f64_fadd_f64:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64:
+; AVX-SLOW-LABEL: extract_extract01_v8f64_fadd_f64:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64:
+; AVX-FAST-LABEL: extract_extract01_v8f64_fadd_f64:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret double %x01
}
-define double @extract_extract_v8f64_fadd_f64_commute(<8 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute:
+define double @extract_extract01_v8f64_fadd_f64_commute(<8 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8f64_fadd_f64_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; SSE3-FAST-LABEL: extract_extract01_v8f64_fadd_f64_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; AVX-SLOW-LABEL: extract_extract01_v8f64_fadd_f64_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; AVX-FAST-LABEL: extract_extract01_v8f64_fadd_f64_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret double %x01
}
-define float @extract_extract_v16f32_fsub_f32(<16 x float> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16f32_fsub_f32:
+define float @extract_extract01_v16f32_fsub_f32(<16 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16f32_fsub_f32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: subss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16f32_fsub_f32:
+; SSE3-FAST-LABEL: extract_extract01_v16f32_fsub_f32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16f32_fsub_f32:
+; AVX-SLOW-LABEL: extract_extract01_v16f32_fsub_f32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16f32_fsub_f32:
+; AVX-FAST-LABEL: extract_extract01_v16f32_fsub_f32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret float %x01
}
-define float @extract_extract_v16f32_fsub_f32_commute(<16 x float> %x) {
-; SSE3-LABEL: extract_extract_v16f32_fsub_f32_commute:
+define float @extract_extract01_v16f32_fsub_f32_commute(<16 x float> %x) {
+; SSE3-LABEL: extract_extract01_v16f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v16f32_fsub_f32_commute:
+; AVX-LABEL: extract_extract01_v16f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
ret float %x01
}
-define double @extract_extract_v8f64_fsub_f64(<8 x double> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8f64_fsub_f64:
+define double @extract_extract01_v8f64_fsub_f64(<8 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8f64_fsub_f64:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1
; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8f64_fsub_f64:
+; SSE3-FAST-LABEL: extract_extract01_v8f64_fsub_f64:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8f64_fsub_f64:
+; AVX-SLOW-LABEL: extract_extract01_v8f64_fsub_f64:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8f64_fsub_f64:
+; AVX-FAST-LABEL: extract_extract01_v8f64_fsub_f64:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vzeroupper
ret double %x01
}
-define double @extract_extract_v8f64_fsub_f64_commute(<8 x double> %x) {
-; SSE3-LABEL: extract_extract_v8f64_fsub_f64_commute:
+define double @extract_extract01_v8f64_fsub_f64_commute(<8 x double> %x) {
+; SSE3-LABEL: extract_extract01_v8f64_fsub_f64_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v8f64_fsub_f64_commute:
+; AVX-LABEL: extract_extract01_v8f64_fsub_f64_commute:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; Check output when 1 or both extracts have extra uses.
-define float @extract_extract_v4f32_fadd_f32_uses1(<4 x float> %x, float* %p) {
-; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+define float @extract_extract01_v4f32_fadd_f32_uses1(<4 x float> %x, float* %p) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses1:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movss %xmm0, (%rdi)
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses1:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: movss %xmm0, (%rdi)
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses1:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovss %xmm0, (%rdi)
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses1:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vmovss %xmm0, (%rdi)
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
ret float %x01
}
-define float @extract_extract_v4f32_fadd_f32_uses2(<4 x float> %x, float* %p) {
-; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+define float @extract_extract01_v4f32_fadd_f32_uses2(<4 x float> %x, float* %p) {
+; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses2:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-SLOW-NEXT: movss %xmm1, (%rdi)
; SSE3-SLOW-NEXT: addss %xmm1, %xmm0
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses2:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-FAST-NEXT: movss %xmm1, (%rdi)
; SSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses2:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-SLOW-NEXT: vmovss %xmm1, (%rdi)
; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses2:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vextractps $1, %xmm0, (%rdi)
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
ret float %x01
}
-define float @extract_extract_v4f32_fadd_f32_uses3(<4 x float> %x, float* %p1, float* %p2) {
-; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses3:
+define float @extract_extract01_v4f32_fadd_f32_uses3(<4 x float> %x, float* %p1, float* %p2) {
+; SSE3-LABEL: extract_extract01_v4f32_fadd_f32_uses3:
; SSE3: # %bb.0:
; SSE3-NEXT: movss %xmm0, (%rdi)
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses3:
+; AVX-LABEL: extract_extract01_v4f32_fadd_f32_uses3:
; AVX: # %bb.0:
; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; 128-bit vectors, 16/32-bit, add/sub
-define i32 @extract_extract_v4i32_add_i32(<4 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32:
+define i32 @extract_extract01_v4i32_add_i32(<4 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4i32_add_i32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: addl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32:
+; SSE3-FAST-LABEL: extract_extract01_v4i32_add_i32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32:
+; AVX-SLOW-LABEL: extract_extract01_v4i32_add_i32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
; AVX-SLOW-NEXT: addl %ecx, %eax
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4i32_add_i32:
+; AVX-FAST-LABEL: extract_extract01_v4i32_add_i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i32 @extract_extract_v4i32_add_i32_commute(<4 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_commute:
+define i32 @extract_extract23_v4i32_add_i32(<4 x i32> %x) {
+; SSE3-LABEL: extract_extract23_v4i32_add_i32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm1, %ecx
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4i32_add_i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractps $2, %xmm0, %ecx
+; AVX-NEXT: vextractps $3, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x i32> %x, i32 2
+ %x1 = extractelement <4 x i32> %x, i32 3
+ %x01 = add i32 %x0, %x1
+ ret i32 %x01
+}
+
+define i32 @extract_extract01_v4i32_add_i32_commute(<4 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4i32_add_i32_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: addl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_commute:
+; SSE3-FAST-LABEL: extract_extract01_v4i32_add_i32_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_commute:
+; AVX-SLOW-LABEL: extract_extract01_v4i32_add_i32_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
; AVX-SLOW-NEXT: addl %ecx, %eax
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_commute:
+; AVX-FAST-LABEL: extract_extract01_v4i32_add_i32_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i16 @extract_extract_v8i16_add_i16(<8 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8i16_add_i16:
+define i32 @extract_extract23_v4i32_add_i32_commute(<4 x i32> %x) {
+; SSE3-LABEL: extract_extract23_v4i32_add_i32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm1, %ecx
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4i32_add_i32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractps $2, %xmm0, %ecx
+; AVX-NEXT: vextractps $3, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x i32> %x, i32 2
+ %x1 = extractelement <4 x i32> %x, i32 3
+ %x01 = add i32 %x1, %x0
+ ret i32 %x01
+}
+
+define i16 @extract_extract01_v8i16_add_i16(<8 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8i16_add_i16:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8i16_add_i16:
+; SSE3-FAST-LABEL: extract_extract01_v8i16_add_i16:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8i16_add_i16:
+; AVX-SLOW-LABEL: extract_extract01_v8i16_add_i16:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8i16_add_i16:
+; AVX-FAST-LABEL: extract_extract01_v8i16_add_i16:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i16 @extract_extract_v8i16_add_i16_commute(<8 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8i16_add_i16_commute:
+define i16 @extract_extract45_v8i16_add_i16(<8 x i16> %x) {
+; SSE3-LABEL: extract_extract45_v8i16_add_i16:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract45_v8i16_add_i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i16> %x, i32 4
+ %x1 = extractelement <8 x i16> %x, i32 5
+ %x01 = add i16 %x0, %x1
+ ret i16 %x01
+}
+
+define i16 @extract_extract01_v8i16_add_i16_commute(<8 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8i16_add_i16_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8i16_add_i16_commute:
+; SSE3-FAST-LABEL: extract_extract01_v8i16_add_i16_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8i16_add_i16_commute:
+; AVX-SLOW-LABEL: extract_extract01_v8i16_add_i16_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8i16_add_i16_commute:
+; AVX-FAST-LABEL: extract_extract01_v8i16_add_i16_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i32 @extract_extract_v4i32_sub_i32(<4 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v4i32_sub_i32:
+define i16 @extract_extract45_v8i16_add_i16_commute(<8 x i16> %x) {
+; SSE3-LABEL: extract_extract45_v8i16_add_i16_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract45_v8i16_add_i16_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i16> %x, i32 4
+ %x1 = extractelement <8 x i16> %x, i32 5
+ %x01 = add i16 %x1, %x0
+ ret i16 %x01
+}
+
+define i32 @extract_extract01_v4i32_sub_i32(<4 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v4i32_sub_i32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %eax
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: subl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4i32_sub_i32:
+; SSE3-FAST-LABEL: extract_extract01_v4i32_sub_i32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phsubd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4i32_sub_i32:
+; AVX-SLOW-LABEL: extract_extract01_v4i32_sub_i32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %eax
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %ecx
; AVX-SLOW-NEXT: subl %ecx, %eax
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4i32_sub_i32:
+; AVX-FAST-LABEL: extract_extract01_v4i32_sub_i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i32 @extract_extract_v4i32_sub_i32_commute(<4 x i32> %x) {
-; SSE3-LABEL: extract_extract_v4i32_sub_i32_commute:
+define i32 @extract_extract23_v4i32_sub_i32(<4 x i32> %x) {
+; SSE3-LABEL: extract_extract23_v4i32_sub_i32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm1, %eax
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %ecx
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4i32_sub_i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractps $2, %xmm0, %eax
+; AVX-NEXT: vextractps $3, %xmm0, %ecx
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x i32> %x, i32 2
+ %x1 = extractelement <4 x i32> %x, i32 3
+ %x01 = sub i32 %x0, %x1
+ ret i32 %x01
+}
+
+define i32 @extract_extract01_v4i32_sub_i32_commute(<4 x i32> %x) {
+; SSE3-LABEL: extract_extract01_v4i32_sub_i32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-NEXT: subl %ecx, %eax
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v4i32_sub_i32_commute:
+; AVX-LABEL: extract_extract01_v4i32_sub_i32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: vpextrd $1, %xmm0, %eax
ret i32 %x01
}
-define i16 @extract_extract_v8i16_sub_i16(<8 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8i16_sub_i16:
+define i32 @extract_extract23_v4i32_sub_i32_commute(<4 x i32> %x) {
+; SSE3-LABEL: extract_extract23_v4i32_sub_i32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm1, %ecx
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4i32_sub_i32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractps $2, %xmm0, %ecx
+; AVX-NEXT: vextractps $3, %xmm0, %eax
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x i32> %x, i32 2
+ %x1 = extractelement <4 x i32> %x, i32 3
+ %x01 = sub i32 %x1, %x0
+ ret i32 %x01
+}
+
+define i16 @extract_extract01_v8i16_sub_i16(<8 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8i16_sub_i16:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %eax
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %ecx
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8i16_sub_i16:
+; SSE3-FAST-LABEL: extract_extract01_v8i16_sub_i16:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phsubw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8i16_sub_i16:
+; AVX-SLOW-LABEL: extract_extract01_v8i16_sub_i16:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %eax
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %ecx
; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8i16_sub_i16:
+; AVX-FAST-LABEL: extract_extract01_v8i16_sub_i16:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i16 @extract_extract_v8i16_sub_i16_commute(<8 x i16> %x) {
-; SSE3-LABEL: extract_extract_v8i16_sub_i16_commute:
+define i16 @extract_extract23_v8i16_sub_i16(<8 x i16> %x) {
+; SSE3-LABEL: extract_extract23_v8i16_sub_i16:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSE3-NEXT: pextrw $3, %xmm0, %ecx
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8i16_sub_i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrw $2, %xmm0, %eax
+; AVX-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i16> %x, i32 2
+ %x1 = extractelement <8 x i16> %x, i32 3
+ %x01 = sub i16 %x0, %x1
+ ret i16 %x01
+}
+
+define i16 @extract_extract01_v8i16_sub_i16_commute(<8 x i16> %x) {
+; SSE3-LABEL: extract_extract01_v8i16_sub_i16_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pextrw $1, %xmm0, %eax
; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v8i16_sub_i16_commute:
+; AVX-LABEL: extract_extract01_v8i16_sub_i16_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: vpextrw $1, %xmm0, %eax
ret i16 %x01
}
+define i16 @extract_extract23_v8i16_sub_i16_commute(<8 x i16> %x) {
+; SSE3-LABEL: extract_extract23_v8i16_sub_i16_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pextrw $2, %xmm0, %ecx
+; SSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8i16_sub_i16_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i16> %x, i32 2
+ %x1 = extractelement <8 x i16> %x, i32 3
+ %x01 = sub i16 %x1, %x0
+ ret i16 %x01
+}
+
; 256-bit vectors, i32/i16, add/sub
-define i32 @extract_extract_v8i32_add_i32(<8 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8i32_add_i32:
+define i32 @extract_extract01_v8i32_add_i32(<8 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8i32_add_i32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: addl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8i32_add_i32:
+; SSE3-FAST-LABEL: extract_extract01_v8i32_add_i32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8i32_add_i32:
+; AVX-SLOW-LABEL: extract_extract01_v8i32_add_i32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8i32_add_i32:
+; AVX-FAST-LABEL: extract_extract01_v8i32_add_i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i32 @extract_extract_v8i32_add_i32_commute(<8 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8i32_add_i32_commute:
+define i32 @extract_extract23_v8i32_add_i32(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract23_v8i32_add_i32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm1, %ecx
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8i32_add_i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractps $2, %xmm0, %ecx
+; AVX-NEXT: vextractps $3, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i32> %x, i32 2
+ %x1 = extractelement <8 x i32> %x, i32 3
+ %x01 = add i32 %x0, %x1
+ ret i32 %x01
+}
+
+define i32 @extract_extract01_v8i32_add_i32_commute(<8 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8i32_add_i32_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: addl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8i32_add_i32_commute:
+; SSE3-FAST-LABEL: extract_extract01_v8i32_add_i32_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8i32_add_i32_commute:
+; AVX-SLOW-LABEL: extract_extract01_v8i32_add_i32_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8i32_add_i32_commute:
+; AVX-FAST-LABEL: extract_extract01_v8i32_add_i32_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i16 @extract_extract_v16i16_add_i16(<16 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16i16_add_i16:
+define i32 @extract_extract23_v8i32_add_i32_commute(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract23_v8i32_add_i32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm1, %ecx
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8i32_add_i32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractps $2, %xmm0, %ecx
+; AVX-NEXT: vextractps $3, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i32> %x, i32 2
+ %x1 = extractelement <8 x i32> %x, i32 3
+ %x01 = add i32 %x1, %x0
+ ret i32 %x01
+}
+
+define i16 @extract_extract01_v16i16_add_i16(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16i16_add_i16:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16i16_add_i16:
+; SSE3-FAST-LABEL: extract_extract01_v16i16_add_i16:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16i16_add_i16:
+; AVX-SLOW-LABEL: extract_extract01_v16i16_add_i16:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16i16_add_i16:
+; AVX-FAST-LABEL: extract_extract01_v16i16_add_i16:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i16 @extract_extract_v16i16_add_i16_commute(<16 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16i16_add_i16_commute:
+define i16 @extract_extract23_v16i16_add_i16(<16 x i16> %x) {
+; SSE3-LABEL: extract_extract23_v16i16_add_i16:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pextrw $2, %xmm0, %ecx
+; SSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v16i16_add_i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <16 x i16> %x, i32 2
+ %x1 = extractelement <16 x i16> %x, i32 3
+ %x01 = add i16 %x0, %x1
+ ret i16 %x01
+}
+
+define i16 @extract_extract01_v16i16_add_i16_commute(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16i16_add_i16_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16i16_add_i16_commute:
+; SSE3-FAST-LABEL: extract_extract01_v16i16_add_i16_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16i16_add_i16_commute:
+; AVX-SLOW-LABEL: extract_extract01_v16i16_add_i16_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16i16_add_i16_commute:
+; AVX-FAST-LABEL: extract_extract01_v16i16_add_i16_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i32 @extract_extract_v8i32_sub_i32(<8 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v8i32_sub_i32:
+define i16 @extract_extract45_v16i16_add_i16_commute(<16 x i16> %x) {
+; SSE3-LABEL: extract_extract45_v16i16_add_i16_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract45_v16i16_add_i16_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <16 x i16> %x, i32 4
+ %x1 = extractelement <16 x i16> %x, i32 5
+ %x01 = add i16 %x1, %x0
+ ret i16 %x01
+}
+
+define i32 @extract_extract01_v8i32_sub_i32(<8 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v8i32_sub_i32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %eax
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: subl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v8i32_sub_i32:
+; SSE3-FAST-LABEL: extract_extract01_v8i32_sub_i32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phsubd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v8i32_sub_i32:
+; AVX-SLOW-LABEL: extract_extract01_v8i32_sub_i32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %eax
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %ecx
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v8i32_sub_i32:
+; AVX-FAST-LABEL: extract_extract01_v8i32_sub_i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
+define i32 @extract_extract23_v8i32_sub_i32(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract23_v8i32_sub_i32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm1, %eax
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %ecx
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v8i32_sub_i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractps $2, %xmm0, %eax
+; AVX-NEXT: vextractps $3, %xmm0, %ecx
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i32> %x, i32 2
+ %x1 = extractelement <8 x i32> %x, i32 3
+ %x01 = sub i32 %x0, %x1
+ ret i32 %x01
+}
+
; Negative test...or get hoppy and negate?
-define i32 @extract_extract_v8i32_sub_i32_commute(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract_v8i32_sub_i32_commute:
+define i32 @extract_extract01_v8i32_sub_i32_commute(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract01_v8i32_sub_i32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-NEXT: subl %ecx, %eax
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v8i32_sub_i32_commute:
+; AVX-LABEL: extract_extract01_v8i32_sub_i32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: vpextrd $1, %xmm0, %eax
ret i32 %x01
}
-define i16 @extract_extract_v16i16_sub_i16(<16 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16i16_sub_i16:
+define i16 @extract_extract01_v16i16_sub_i16(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16i16_sub_i16:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %eax
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %ecx
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16i16_sub_i16:
+; SSE3-FAST-LABEL: extract_extract01_v16i16_sub_i16:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phsubw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16i16_sub_i16:
+; AVX-SLOW-LABEL: extract_extract01_v16i16_sub_i16:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %eax
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %ecx
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16i16_sub_i16:
+; AVX-FAST-LABEL: extract_extract01_v16i16_sub_i16:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
; Negative test...or get hoppy and negate?
-define i16 @extract_extract_v16i16_sub_i16_commute(<16 x i16> %x) {
-; SSE3-LABEL: extract_extract_v16i16_sub_i16_commute:
+define i16 @extract_extract01_v16i16_sub_i16_commute(<16 x i16> %x) {
+; SSE3-LABEL: extract_extract01_v16i16_sub_i16_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pextrw $1, %xmm0, %eax
; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v16i16_sub_i16_commute:
+; AVX-LABEL: extract_extract01_v16i16_sub_i16_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: vpextrw $1, %xmm0, %eax
; 512-bit vectors, i32/i16, add/sub
-define i32 @extract_extract_v16i32_add_i32(<16 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16i32_add_i32:
+define i32 @extract_extract01_v16i32_add_i32(<16 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16i32_add_i32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: addl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16i32_add_i32:
+; SSE3-FAST-LABEL: extract_extract01_v16i32_add_i32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16i32_add_i32:
+; AVX-SLOW-LABEL: extract_extract01_v16i32_add_i32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16i32_add_i32:
+; AVX-FAST-LABEL: extract_extract01_v16i32_add_i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i32 @extract_extract_v16i32_add_i32_commute(<16 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16i32_add_i32_commute:
+define i32 @extract_extract01_v16i32_add_i32_commute(<16 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16i32_add_i32_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: addl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16i32_add_i32_commute:
+; SSE3-FAST-LABEL: extract_extract01_v16i32_add_i32_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16i32_add_i32_commute:
+; AVX-SLOW-LABEL: extract_extract01_v16i32_add_i32_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16i32_add_i32_commute:
+; AVX-FAST-LABEL: extract_extract01_v16i32_add_i32_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i16 @extract_extract_v32i16_add_i16(<32 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v32i16_add_i16:
+define i16 @extract_extract01_v32i16_add_i16(<32 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v32i16_add_i16:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v32i16_add_i16:
+; SSE3-FAST-LABEL: extract_extract01_v32i16_add_i16:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v32i16_add_i16:
+; AVX-SLOW-LABEL: extract_extract01_v32i16_add_i16:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v32i16_add_i16:
+; AVX-FAST-LABEL: extract_extract01_v32i16_add_i16:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i16 @extract_extract_v32i16_add_i16_commute(<32 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v32i16_add_i16_commute:
+define i16 @extract_extract01_v32i16_add_i16_commute(<32 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v32i16_add_i16_commute:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v32i16_add_i16_commute:
+; SSE3-FAST-LABEL: extract_extract01_v32i16_add_i16_commute:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v32i16_add_i16_commute:
+; AVX-SLOW-LABEL: extract_extract01_v32i16_add_i16_commute:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v32i16_add_i16_commute:
+; AVX-FAST-LABEL: extract_extract01_v32i16_add_i16_commute:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i32 @extract_extract_v16i32_sub_i32(<16 x i32> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v16i32_sub_i32:
+define i32 @extract_extract01_v16i32_sub_i32(<16 x i32> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v16i32_sub_i32:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %eax
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: subl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v16i32_sub_i32:
+; SSE3-FAST-LABEL: extract_extract01_v16i32_sub_i32:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phsubd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v16i32_sub_i32:
+; AVX-SLOW-LABEL: extract_extract01_v16i32_sub_i32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %eax
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %ecx
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v16i32_sub_i32:
+; AVX-FAST-LABEL: extract_extract01_v16i32_sub_i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i32 %x01
}
-define i32 @extract_extract_v16i32_sub_i32_commute(<16 x i32> %x) {
-; SSE3-LABEL: extract_extract_v16i32_sub_i32_commute:
+define i32 @extract_extract01_v16i32_sub_i32_commute(<16 x i32> %x) {
+; SSE3-LABEL: extract_extract01_v16i32_sub_i32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-NEXT: subl %ecx, %eax
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v16i32_sub_i32_commute:
+; AVX-LABEL: extract_extract01_v16i32_sub_i32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: vpextrd $1, %xmm0, %eax
ret i32 %x01
}
-define i16 @extract_extract_v32i16_sub_i16(<32 x i16> %x) {
-; SSE3-SLOW-LABEL: extract_extract_v32i16_sub_i16:
+define i16 @extract_extract01_v32i16_sub_i16(<32 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract01_v32i16_sub_i16:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %eax
; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %ecx
; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v32i16_sub_i16:
+; SSE3-FAST-LABEL: extract_extract01_v32i16_sub_i16:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phsubw %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v32i16_sub_i16:
+; AVX-SLOW-LABEL: extract_extract01_v32i16_sub_i16:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %eax
; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %ecx
; AVX-SLOW-NEXT: vzeroupper
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v32i16_sub_i16:
+; AVX-FAST-LABEL: extract_extract01_v32i16_sub_i16:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
ret i16 %x01
}
-define i16 @extract_extract_v32i16_sub_i16_commute(<32 x i16> %x) {
-; SSE3-LABEL: extract_extract_v32i16_sub_i16_commute:
+define i16 @extract_extract01_v32i16_sub_i16_commute(<32 x i16> %x) {
+; SSE3-LABEL: extract_extract01_v32i16_sub_i16_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pextrw $1, %xmm0, %eax
; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v32i16_sub_i16_commute:
+; AVX-LABEL: extract_extract01_v32i16_sub_i16_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: vpextrw $1, %xmm0, %eax
; Check output when 1 or both extracts have extra uses.
-define i32 @extract_extract_v4i32_add_i32_uses1(<4 x i32> %x, i32* %p) {
-; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_uses1:
+define i32 @extract_extract01_v4i32_add_i32_uses1(<4 x i32> %x, i32* %p) {
+; SSE3-SLOW-LABEL: extract_extract01_v4i32_add_i32_uses1:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: movd %xmm0, (%rdi)
; SSE3-SLOW-NEXT: addl %ecx, %eax
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_uses1:
+; SSE3-FAST-LABEL: extract_extract01_v4i32_add_i32_uses1:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: movd %xmm0, (%rdi)
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_uses1:
+; AVX-SLOW-LABEL: extract_extract01_v4i32_add_i32_uses1:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vmovd %xmm0, (%rdi)
; AVX-SLOW-NEXT: addl %ecx, %eax
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_uses1:
+; AVX-FAST-LABEL: extract_extract01_v4i32_add_i32_uses1:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vmovd %xmm0, (%rdi)
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
ret i32 %x01
}
-define i32 @extract_extract_v4i32_add_i32_uses2(<4 x i32> %x, i32* %p) {
-; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_uses2:
+define i32 @extract_extract01_v4i32_add_i32_uses2(<4 x i32> %x, i32* %p) {
+; SSE3-SLOW-LABEL: extract_extract01_v4i32_add_i32_uses2:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: movd %xmm0, %ecx
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE3-SLOW-NEXT: movd %xmm0, (%rdi)
; SSE3-SLOW-NEXT: retq
;
-; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_uses2:
+; SSE3-FAST-LABEL: extract_extract01_v4i32_add_i32_uses2:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE3-FAST-NEXT: movd %xmm1, (%rdi)
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_uses2:
+; AVX-SLOW-LABEL: extract_extract01_v4i32_add_i32_uses2:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
; AVX-SLOW-NEXT: vpextrd $1, %xmm0, (%rdi)
; AVX-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_uses2:
+; AVX-FAST-LABEL: extract_extract01_v4i32_add_i32_uses2:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vpextrd $1, %xmm0, (%rdi)
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
ret i32 %x01
}
-define i32 @extract_extract_v4i32_add_i32_uses3(<4 x i32> %x, i32* %p1, i32* %p2) {
-; SSE3-LABEL: extract_extract_v4i32_add_i32_uses3:
+define i32 @extract_extract01_v4i32_add_i32_uses3(<4 x i32> %x, i32* %p1, i32* %p2) {
+; SSE3-LABEL: extract_extract01_v4i32_add_i32_uses3:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: movd %xmm0, (%rdi)
; SSE3-NEXT: movd %xmm0, (%rsi)
; SSE3-NEXT: retq
;
-; AVX-LABEL: extract_extract_v4i32_add_i32_uses3:
+; AVX-LABEL: extract_extract01_v4i32_add_i32_uses3:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: vmovd %xmm0, (%rdi)