+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse2 -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx2 -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefix=AVX
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx2 -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx512vl -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
; Verify that 128-bit vector logical ops are reassociated.
; Verify that 256-bit vector logical ops are reassociated.
define <8 x i32> @reassociate_and_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
+; SSE-LABEL: reassociate_and_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pand %xmm7, %xmm5
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_and_v8i32:
; AVX: # %bb.0:
; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
}
define <8 x i32> @reassociate_or_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
+; SSE-LABEL: reassociate_or_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: por %xmm6, %xmm4
+; SSE-NEXT: por %xmm4, %xmm0
+; SSE-NEXT: por %xmm7, %xmm5
+; SSE-NEXT: por %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_or_v8i32:
; AVX: # %bb.0:
; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
}
define <8 x i32> @reassociate_xor_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
+; SSE-LABEL: reassociate_xor_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm6, %xmm4
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm7, %xmm5
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_xor_v8i32:
; AVX: # %bb.0:
; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
ret <8 x i32> %t2
}
+
+; Verify that 512-bit vector logical ops are reassociated.
+
+define <16 x i32> @reassociate_and_v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, <16 x i32> %x3) {
+; SSE-LABEL: reassociate_and_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm1
+; SSE-NEXT: paddd %xmm6, %xmm2
+; SSE-NEXT: paddd %xmm7, %xmm3
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: reassociate_and_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm6, %ymm4, %ymm2
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm7, %ymm5, %ymm2
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: reassociate_and_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandd %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+
+ %t0 = add <16 x i32> %x0, %x1
+ %t1 = and <16 x i32> %x2, %t0
+ %t2 = and <16 x i32> %x3, %t1
+ ret <16 x i32> %t2
+}
+
+define <16 x i32> @reassociate_or_v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, <16 x i32> %x3) {
+; SSE-LABEL: reassociate_or_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm1
+; SSE-NEXT: paddd %xmm6, %xmm2
+; SSE-NEXT: paddd %xmm7, %xmm3
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: reassociate_or_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm6, %ymm4, %ymm2
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm7, %ymm5, %ymm2
+; AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: reassociate_or_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpord %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+
+ %t0 = add <16 x i32> %x0, %x1
+ %t1 = or <16 x i32> %x2, %t0
+ %t2 = or <16 x i32> %x3, %t1
+ ret <16 x i32> %t2
+}
+
+define <16 x i32> @reassociate_xor_v16i32(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, <16 x i32> %x3) {
+; SSE-LABEL: reassociate_xor_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm1
+; SSE-NEXT: paddd %xmm6, %xmm2
+; SSE-NEXT: paddd %xmm7, %xmm3
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: reassociate_xor_v16i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm6, %ymm4, %ymm2
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm7, %ymm5, %ymm2
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: reassociate_xor_v16i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpxord %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+
+ %t0 = add <16 x i32> %x0, %x1
+ %t1 = xor <16 x i32> %x2, %t0
+ %t2 = xor <16 x i32> %x3, %t1
+ ret <16 x i32> %t2
+}