-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mattr=+avx -mtriple=i686-unknown-unknown | FileCheck %s
define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind {
; CHECK-NEXT: vmovups %ymm0, (%eax)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl $4
-;
%b = load <18 x i16>, <18 x i16>* %bp, align 16
%x = add <18 x i16> zeroinitializer, %b
store <18 x i16> %x, <18 x i16>* %ret, align 16
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i386-apple-darwin < %s | FileCheck %s
; PR30841: https://llvm.org/bugs/show_bug.cgi?id=30841
; CHECK-NEXT: negl %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retl
-;
entry:
%or = or i64 %argc, -4294967296
br label %end
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s
; PR3253
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB0_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB0_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB1_1
-;
+; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB1_1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB2_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB2_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB3_1
-;
+; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB3_1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB4_1
-;
+; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB4_1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %tmp29, %x
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB5_1
-;
+; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB5_1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %x, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB6_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB6_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB7_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB7_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB8_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB8_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB9_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB9_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB10_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB10_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %tmp29, %x
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB11_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB11_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %x, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB12_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB12_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB13_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB13_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB14_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB14_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB15_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB15_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB16_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB16_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %tmp29, %x
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB17_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB17_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %x, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB18_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB18_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %tmp29, %x
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jae .LBB19_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB19_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %x, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB20_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB20_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB21_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB21_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB22_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB22_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 %tmp29, 1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB23_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB23_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 1, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB24_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB24_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %tmp29, %x
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB25_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB25_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %x, %tmp29
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB26_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB26_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %tmp29, %x
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: jb .LBB27_2
-;
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .LBB27_2: # %UnifiedReturnBlock
+; CHECK-NEXT: retq
entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %x, %tmp29
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: setb %al
; CHECK-NEXT: retq
-;
%neg = xor i32 %flags, -1
%shl = shl i32 1, %flag
%and = and i32 %shl, %neg
define zeroext i1 @extend(i32 %bit, i64 %bits) {
; CHECK-LABEL: extend:
-; CHECK: # BB#0:
-; CHECK-NEXT: btl %edi, %esi
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: btl %edi, %esi
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: retq
entry:
%and = and i32 %bit, 31
%sh_prom = zext i32 %and to i64
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
; cmp with single-use load, should not form branch.
; CHECK-NEXT: cmovbel %edx, %esi
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: retq
-;
%load = load double, double* %b, align 8
%cmp = fcmp olt double %load, %a
%cond = select i1 %cmp, i32 %x, i32 %y
; CHECK-NEXT: cmovbel %esi, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
-;
%cmp = fcmp ogt double %a, %b
%cond = select i1 %cmp, i32 %x, i32 %y
ret i32 %cond
; CHECK-NEXT: cmovael %ecx, %edx
; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
-;
%load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cond = select i1 %cmp, i32 %x, i32 %y
; CHECK-NEXT: cmovael %edx, %ecx
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: retq
-;
%load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cmp1 = icmp ugt i32 %load, %a
; CHECK-NEXT: cmovnel %edi, %esi
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: retq
-;
%cmp = icmp ne i32 %a, 0
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !0
ret i32 %sel
; CHECK-LABEL: weighted_select2:
; CHECK: # BB#0:
; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: jne [[LABEL_BB5:.*]]
-; CHECK: movl %esi, %edi
-; CHECK-NEXT: [[LABEL_BB5]]
+; CHECK-NEXT: jne .LBB5_2
+; CHECK-NEXT: # BB#1: # %select.false
+; CHECK-NEXT: movl %esi, %edi
+; CHECK-NEXT: .LBB5_2: # %select.end
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
-;
%cmp = icmp ne i32 %a, 0
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !1
ret i32 %sel
; CHECK-LABEL: weighted_select3:
; CHECK: # BB#0:
; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je [[LABEL_BB6:.*]]
-; CHECK: movl %edi, %eax
+; CHECK-NEXT: je .LBB6_1
+; CHECK-NEXT: # BB#2: # %select.end
+; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
-; CHECK: [[LABEL_BB6]]
+; CHECK-NEXT: .LBB6_1: # %select.false
; CHECK-NEXT: movl %esi, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
-;
%cmp = icmp ne i32 %a, 0
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !2
ret i32 %sel
; CHECK-NEXT: cmovnel %edi, %esi
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: retq
-;
%cmp = icmp ne i32 %a, 0
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !3
ret i32 %sel
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE41
define double @test1_add(double %A, double %B) {
; SSE41: # BB#0:
; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%add = add <2 x i32> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: paddw %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <4 x i16>
%2 = bitcast double %B to <4 x i16>
%add = add <4 x i16> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: paddb %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <8 x i8>
%2 = bitcast double %B to <8 x i8>
%add = add <8 x i8> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: psubd %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%sub = sub <2 x i32> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: psubw %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <4 x i16>
%2 = bitcast double %B to <4 x i16>
%sub = sub <4 x i16> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: psubb %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <8 x i8>
%2 = bitcast double %B to <8 x i8>
%sub = sub <8 x i8> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%mul = mul <2 x i32> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: pmullw %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <4 x i16>
%2 = bitcast double %B to <4 x i16>
%mul = mul <4 x i16> %1, %2
; SSE41-NEXT: pmullw %xmm2, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <8 x i8>
%2 = bitcast double %B to <8 x i8>
%mul = mul <8 x i8> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: andps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%and = and <2 x i32> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: andps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <4 x i16>
%2 = bitcast double %B to <4 x i16>
%and = and <4 x i16> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: andps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <8 x i8>
%2 = bitcast double %B to <8 x i8>
%and = and <8 x i8> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: orps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%or = or <2 x i32> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: orps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <4 x i16>
%2 = bitcast double %B to <4 x i16>
%or = or <4 x i16> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: orps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <8 x i8>
%2 = bitcast double %B to <8 x i8>
%or = or <8 x i8> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%xor = xor <2 x i32> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <4 x i16>
%2 = bitcast double %B to <4 x i16>
%xor = xor <4 x i16> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <8 x i8>
%2 = bitcast double %B to <8 x i8>
%xor = xor <8 x i8> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x float>
%2 = bitcast double %B to <2 x float>
%add = fadd <2 x float> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: subps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x float>
%2 = bitcast double %B to <2 x float>
%sub = fsub <2 x float> %1, %2
; SSE41: # BB#0:
; SSE41-NEXT: mulps %xmm1, %xmm0
; SSE41-NEXT: retq
-;
%1 = bitcast double %A to <2 x float>
%2 = bitcast double %B to <2 x float>
%mul = fmul <2 x float> %1, %2
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -mattr=avx | FileCheck %s --check-prefix=AVX
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp oeq float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp oeq double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ogt float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ogt double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp oge float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp oge double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp olt float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp olt double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ole float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ole double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ord float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ord double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp uno float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp uno double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ugt float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ugt double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp uge float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp uge double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ult float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ult double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ule float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp ule double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp une float %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
; AVX512-NEXT: retq
-;
%1 = fcmp une double %a, %b
%2 = select i1 %1, double %c, double %d
ret double %2
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+sse2 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+avx | FileCheck %s --check-prefix=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=AVX512DQ
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2 < %s | FileCheck %s
; PR22428: https://llvm.org/bugs/show_bug.cgi?id=22428
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl %edi, %eax
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 %bc1, %y
ret i32 %and
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl %edi, %eax
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 %y, %bc1
ret i32 %and
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 %bc1, 1
ret i32 %and
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl $2, %eax
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 2, %bc1
ret i32 %and
; CHECK-NEXT: movd %edi, %xmm1
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 %bc1, %y
%bc2 = bitcast i32 %and to float
; CHECK-NEXT: movd %edi, %xmm1
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 %y, %bc1
%bc2 = bitcast i32 %and to float
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 %bc1, 3
%bc2 = bitcast i32 %and to float
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 4, %bc1
%bc2 = bitcast i32 %and to float
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%bc2 = bitcast float %y to i32
%and = and i32 %bc1, %bc2
; CHECK: # BB#0:
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%bc2 = bitcast float %y to i32
%and = and i32 %bc1, %bc2
; CHECK: # BB#0:
; CHECK-NEXT: orps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%bc2 = bitcast float %y to i32
%and = or i32 %bc1, %bc2
; CHECK: # BB#0:
; CHECK-NEXT: xorps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%bc2 = bitcast float %y to i32
%and = xor i32 %bc1, %bc2
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: orps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = or i32 %bc1, 3
%bc2 = bitcast i32 %and to float
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: xorps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = xor i32 %bc1, 3
%bc2 = bitcast i32 %and to float
; CHECK: # BB#0:
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast double %x to i64
%bc2 = bitcast double %y to i64
%and = and i64 %bc1, %bc2
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast double %x to i64
%and = and i64 %bc1, 3
%bc2 = bitcast i64 %and to double
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%and = and i32 %bc1, 2147483648
%bc2 = bitcast i32 %and to float
; CHECK: # BB#0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast double %x to i64
%and = and i64 %bc1, 9223372036854775807
%bc2 = bitcast i64 %and to double
; CHECK: # BB#0:
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast float %x to i32
%xor = xor i32 %bc1, 2147483648
%bc2 = bitcast i32 %xor to float
; CHECK: # BB#0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast <2 x double> %x to <2 x i64>
%and = and <2 x i64> %bc1, <i64 9223372036854775807, i64 9223372036854775807>
%bc2 = bitcast <2 x i64> %and to <2 x double>
; CHECK: # BB#0:
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
-;
%bc1 = bitcast <4 x float> %x to <4 x i32>
%xor = xor <4 x i32> %bc1, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
%bc2 = bitcast <4 x i32> %xor to <4 x float>