-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
;
; Float Comparisons
; AVX: # BB#0:
; AVX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp oeq <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpneqps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp une <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpordps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ord <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpunordps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp uno <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX-NEXT: vcmpunordps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vorps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ueq <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX-NEXT: vcmpordps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp one <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp olt <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ole <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp oeq <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpneqps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp une <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpordps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ord <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpunordps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp uno <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX-NEXT: vcmpunordps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ueq <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX-NEXT: vcmpordps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp one <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp olt <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ole <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
; AVX: # BB#0:
; AVX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp oeq <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX: # BB#0:
; AVX-NEXT: vcmpneqpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp une <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX: # BB#0:
; AVX-NEXT: vcmpordpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ord <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX-NEXT: vcmpunordpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vorpd %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ueq <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX-NEXT: vcmpordpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp one <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX: # BB#0:
; AVX-NEXT: vcmpunordpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp uno <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp olt <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ole <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
; AVX: # BB#0:
; AVX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp oeq <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
; AVX: # BB#0:
; AVX-NEXT: vcmpneqpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp une <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
; AVX: # BB#0:
; AVX-NEXT: vcmpordpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ord <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
; AVX: # BB#0:
; AVX-NEXT: vcmpunordpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp uno <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
; AVX-NEXT: vcmpunordpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vorpd %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ueq <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
; AVX-NEXT: vcmpordpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vandpd %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp one <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp olt <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ole <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>