; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE,SSE-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE,SSE-FAST
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512-FAST
; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
; SSE-FAST-NEXT: haddps %xmm0, %xmm0
; SSE-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: add_ps_007_2:
-; AVX-SLOW: # %bb.0:
-; AVX-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-SLOW-NEXT: retq
+; AVX1-SLOW-LABEL: add_ps_007_2:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: add_ps_007_2:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
+;
+; AVX512-SLOW-LABEL: add_ps_007_2:
+; AVX512-SLOW: # %bb.0:
+; AVX512-SLOW-NEXT: vbroadcastss %xmm0, %xmm1
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX512-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX512-SLOW-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
%r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
%add = fadd <4 x float> %l, %r
; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: add_ps_018:
-; AVX-SLOW: # %bb.0:
-; AVX-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX-SLOW-NEXT: retq
+; AVX1-SLOW-LABEL: add_ps_018:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: add_ps_018:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-FAST-NEXT: retq
+;
+; AVX512-SLOW-LABEL: add_ps_018:
+; AVX512-SLOW: # %bb.0:
+; AVX512-SLOW-NEXT: vbroadcastss %xmm0, %xmm1
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX512-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX512-SLOW-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
%r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
%add = fadd <4 x float> %l, %r
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1,AVX1-FAST
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX2,AVX2-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX2,AVX2-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512,AVX512-FAST
; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test14_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vphaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add = add i32 %vecext, %vecext1
; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test15_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add = add i32 %vecext, %vecext1
; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test16_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add = add i32 %vecext, %vecext1
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test17_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vphaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add1 = add i32 %vecext, %vecext1