From: Simon Pilgrim Date: Thu, 10 Nov 2016 22:34:12 +0000 (+0000) Subject: [X86] Updated knownbits vector ADD/SUB test X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=d31cbc45ab39e746ef8d85281ad8debab7ba893f;p=llvm [X86] Updated knownbits vector ADD/SUB test In preparation for demandedelts support git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@286513 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/known-bits-vector.ll b/test/CodeGen/X86/known-bits-vector.ll index c31dff3d3dd..967447cebc8 100644 --- a/test/CodeGen/X86/known-bits-vector.ll +++ b/test/CodeGen/X86/known-bits-vector.ll @@ -204,36 +204,56 @@ define <4 x i32> @knownbits_mask_trunc_shuffle_shl(<4 x i64> %a0) nounwind { ret <4 x i32> %4 } -define <4 x i32> @knownbits_add_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind { -; X32-LABEL: knownbits_add_lshr: +define <4 x i32> @knownbits_mask_add_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind { +; X32-LABEL: knownbits_mask_add_shuffle_lshr: ; X32: # BB#0: -; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767] +; X32-NEXT: vpand %xmm2, %xmm0, %xmm0 +; X32-NEXT: vpand %xmm2, %xmm1, %xmm1 +; X32-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X32-NEXT: vpsrld $17, %xmm0, %xmm0 ; X32-NEXT: retl ; -; X64-LABEL: knownbits_add_lshr: +; X64-LABEL: knownbits_mask_add_shuffle_lshr: ; X64: # BB#0: -; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767] +; X64-NEXT: vpand %xmm2, %xmm0, %xmm0 +; X64-NEXT: vpand %xmm2, %xmm1, %xmm1 +; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X64-NEXT: vpsrld $17, %xmm0, %xmm0 ; X64-NEXT: retq - %1 = and <4 x i32> %a0, - %2 = and <4 x i32> %a1, + %1 = and <4 x i32> %a0, + %2 = and <4 x i32> %a1, %3 = add <4 x i32> %1, %2 - %4 = lshr <4 x i32> %3, - ret <4 x i32> %4 + %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> + %5 = lshr <4 x i32> %4, + ret <4 x i32> %5 } -define <4 x i32> @knownbits_sub_lshr(<4 x i32> %a0) nounwind { -; X32-LABEL: knownbits_sub_lshr: +define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind { +; X32-LABEL: knownbits_mask_sub_shuffle_lshr: ; X32: # BB#0: -; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] +; X32-NEXT: vpsubd %xmm0, %xmm1, %xmm0 +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X32-NEXT: vpsrld $22, %xmm0, %xmm0 ; X32-NEXT: retl ; -; X64-LABEL: knownbits_sub_lshr: +; X64-LABEL: knownbits_mask_sub_shuffle_lshr: ; X64: # BB#0: -; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] +; X64-NEXT: vpsubd %xmm0, %xmm1, %xmm0 +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X64-NEXT: vpsrld $22, %xmm0, %xmm0 ; X64-NEXT: retq - %1 = and <4 x i32> %a0, + %1 = and <4 x i32> %a0, %2 = sub <4 x i32> , %1 - %3 = lshr <4 x i32> %2, - ret <4 x i32> %3 + %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> + %4 = lshr <4 x i32> %3, + ret <4 x i32> %4 }