From 46cba73063fd8b5a11b155615b2bd801b90b916f Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 22 Jul 2019 19:58:49 +0000 Subject: [PATCH] [X86] When using AND+PACKUS in lowerV16I8Shuffle, generate the build vector directly in v16i8 with the correct 0x00 or 0xFF elements rather than using another VT and bitcasting it. The build_vector will become a constant pool load. By using the desired type initially, it ensures we don't generate a bitcast of the constant pool load which will need to be folded with the load. While experimenting with another patch, I noticed that when the load type and the constant pool type don't match, then SimplifyDemandedBits can't handle it. While we should probably fix that, this was a simple way to fix the issue I saw. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@366732 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 9 ++++--- test/CodeGen/X86/avg.ll | 2 +- test/CodeGen/X86/masked_store_trunc.ll | 4 ++-- test/CodeGen/X86/mmx-arith.ll | 4 ++-- test/CodeGen/X86/oddshuffles.ll | 4 ++-- test/CodeGen/X86/oddsubvector.ll | 2 +- test/CodeGen/X86/psubus.ll | 2 +- test/CodeGen/X86/sse2-intrinsics-canonical.ll | 2 +- test/CodeGen/X86/vector-reduce-and-bool.ll | 4 ++-- test/CodeGen/X86/vector-reduce-or-bool.ll | 4 ++-- test/CodeGen/X86/vector-reduce-xor-bool.ll | 4 ++-- test/CodeGen/X86/vector-shuffle-128-v16.ll | 2 +- test/CodeGen/X86/vector-trunc-math-widen.ll | 24 +++++++++---------- test/CodeGen/X86/vector-trunc-math.ll | 24 +++++++++---------- test/CodeGen/X86/vector-trunc-widen.ll | 6 ++--- test/CodeGen/X86/vector-trunc.ll | 6 ++--- 16 files changed, 51 insertions(+), 52 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 41f19076a25..367590ac138 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14157,11 +14157,10 @@ static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef Mask, // First we need to zero all the dropped bytes. assert(NumEvenDrops <= 3 && "No support for dropping even elements more than 3 times."); - // We use the mask type to pick which bytes are preserved based on how many - // elements are dropped. - MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 }; - SDValue ByteClearMask = DAG.getBitcast( - MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1])); + SmallVector ByteClearOps(16, DAG.getConstant(0, DL, MVT::i8)); + for (unsigned i = 0; i != 16; i += 1 << NumEvenDrops) + ByteClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i8); + SDValue ByteClearMask = DAG.getBuildVector(MVT::v16i8, DL, ByteClearOps); V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask); if (!IsSingleInput) V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask); diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll index 86d8628c9ae..8a07b44bd50 100644 --- a/test/CodeGen/X86/avg.ll +++ b/test/CodeGen/X86/avg.ll @@ -226,7 +226,7 @@ define void @avg_v48i8(<48 x i8>* %a, <48 x i8>* %b) nounwind { ; SSE2-NEXT: psubd %xmm5, %xmm0 ; SSE2-NEXT: psrld $1, %xmm3 ; SSE2-NEXT: psrld $1, %xmm8 -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; SSE2-NEXT: pand %xmm7, %xmm8 ; SSE2-NEXT: pand %xmm7, %xmm3 ; SSE2-NEXT: packuswb %xmm8, %xmm3 diff --git a/test/CodeGen/X86/masked_store_trunc.ll b/test/CodeGen/X86/masked_store_trunc.ll index 8e73194bfa9..4111e83b599 100644 --- a/test/CodeGen/X86/masked_store_trunc.ll +++ b/test/CodeGen/X86/masked_store_trunc.ll @@ -4673,7 +4673,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, <32 x i8>* %p, <32 x i8> %ma ; SSE2: # %bb.0: ; SSE2-NEXT: pxor %xmm7, %xmm7 ; SSE2-NEXT: pcmpeqb %xmm4, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm6, %xmm1 ; SSE2-NEXT: pand %xmm6, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -6209,7 +6209,7 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %p, <16 x i8> %ma ; SSE2: # %bb.0: ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: pcmpeqb %xmm2, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm4, %xmm1 ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 diff --git a/test/CodeGen/X86/mmx-arith.ll b/test/CodeGen/X86/mmx-arith.ll index 5df75efb1ea..a51fa2ac80e 100644 --- a/test/CodeGen/X86/mmx-arith.ll +++ b/test/CodeGen/X86/mmx-arith.ll @@ -42,7 +42,7 @@ define void @test0(x86_mmx* %A, x86_mmx* %B) { ; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; X32-NEXT: pmullw %xmm0, %xmm1 -; X32-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255] +; X32-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; X32-NEXT: movdqa %xmm1, %xmm2 ; X32-NEXT: pand %xmm0, %xmm2 ; X32-NEXT: packuswb %xmm2, %xmm2 @@ -100,7 +100,7 @@ define void @test0(x86_mmx* %A, x86_mmx* %B) { ; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; X64-NEXT: pmullw %xmm0, %xmm1 -; X64-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255] +; X64-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; X64-NEXT: movdqa %xmm1, %xmm2 ; X64-NEXT: pand %xmm0, %xmm2 ; X64-NEXT: packuswb %xmm2, %xmm2 diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll index 2c36385e714..2e66a22d711 100644 --- a/test/CodeGen/X86/oddshuffles.ll +++ b/test/CodeGen/X86/oddshuffles.ll @@ -259,7 +259,7 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind { ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pandn %xmm0, %xmm2 ; SSE2-NEXT: por %xmm1, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) @@ -659,7 +659,7 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind { define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounwind { ; SSE2-LABEL: pr29025: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; SSE2-NEXT: pand %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm3, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 diff --git a/test/CodeGen/X86/oddsubvector.ll b/test/CodeGen/X86/oddsubvector.ll index 9bc6c0f380a..d9cc5232a83 100644 --- a/test/CodeGen/X86/oddsubvector.ll +++ b/test/CodeGen/X86/oddsubvector.ll @@ -17,7 +17,7 @@ define void @insert_v7i8_v2i16_2(<7 x i8> *%a0, <2 x i16> *%a1) nounwind { ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,3] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3] -; SSE2-NEXT: movaps {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: andps %xmm0, %xmm1 ; SSE2-NEXT: packuswb %xmm1, %xmm1 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) diff --git a/test/CodeGen/X86/psubus.ll b/test/CodeGen/X86/psubus.ll index 1009b1ef906..351955fbb03 100644 --- a/test/CodeGen/X86/psubus.ll +++ b/test/CodeGen/X86/psubus.ll @@ -622,7 +622,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind { ; SSE2-NEXT: psubd %xmm5, %xmm4 ; SSE2-NEXT: por %xmm0, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm9, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; SSE2-NEXT: pand %xmm9, %xmm5 ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: pxor %xmm0, %xmm7 diff --git a/test/CodeGen/X86/sse2-intrinsics-canonical.ll b/test/CodeGen/X86/sse2-intrinsics-canonical.ll index 506fb9eb100..e8309e047b1 100644 --- a/test/CodeGen/X86/sse2-intrinsics-canonical.ll +++ b/test/CodeGen/X86/sse2-intrinsics-canonical.ll @@ -93,7 +93,7 @@ define <8 x i16> @test_x86_sse2_psubus_w(<8 x i16> %a0, <8 x i16> %a1) { define <8 x i8> @test_x86_sse2_paddus_b_64(<8 x i8> %a0, <8 x i8> %a1) { ; SSE-LABEL: test_x86_sse2_paddus_b_64: ; SSE: ## %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: ## encoding: [0x66,0x0f,0x6f,0x15,A,A,A,A] ; SSE-NEXT: ## fixup A - offset: 4, value: LCPI4_0, kind: FK_Data_4 ; SSE-NEXT: pand %xmm2, %xmm1 ## encoding: [0x66,0x0f,0xdb,0xca] diff --git a/test/CodeGen/X86/vector-reduce-and-bool.ll b/test/CodeGen/X86/vector-reduce-and-bool.ll index b8d57f6954d..91120dfcbd9 100644 --- a/test/CodeGen/X86/vector-reduce-and-bool.ll +++ b/test/CodeGen/X86/vector-reduce-and-bool.ll @@ -349,7 +349,7 @@ define i1 @trunc_v8i32_v8i1(<8 x i32>) { define i1 @trunc_v16i16_v16i1(<16 x i16>) { ; SSE2-LABEL: trunc_v16i16_v16i1: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -690,7 +690,7 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) { define i1 @trunc_v32i16_v32i1(<32 x i16>) { ; SSE2-LABEL: trunc_v32i16_v32i1: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm4, %xmm3 ; SSE2-NEXT: pand %xmm4, %xmm2 ; SSE2-NEXT: packuswb %xmm3, %xmm2 diff --git a/test/CodeGen/X86/vector-reduce-or-bool.ll b/test/CodeGen/X86/vector-reduce-or-bool.ll index de5f52ffc92..7a411610384 100644 --- a/test/CodeGen/X86/vector-reduce-or-bool.ll +++ b/test/CodeGen/X86/vector-reduce-or-bool.ll @@ -340,7 +340,7 @@ define i1 @trunc_v8i32_v8i1(<8 x i32>) { define i1 @trunc_v16i16_v16i1(<16 x i16>) { ; SSE2-LABEL: trunc_v16i16_v16i1: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -681,7 +681,7 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) { define i1 @trunc_v32i16_v32i1(<32 x i16>) { ; SSE2-LABEL: trunc_v32i16_v32i1: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm4, %xmm3 ; SSE2-NEXT: pand %xmm4, %xmm2 ; SSE2-NEXT: packuswb %xmm3, %xmm2 diff --git a/test/CodeGen/X86/vector-reduce-xor-bool.ll b/test/CodeGen/X86/vector-reduce-xor-bool.ll index eb0ba63fc12..059c0422b0a 100644 --- a/test/CodeGen/X86/vector-reduce-xor-bool.ll +++ b/test/CodeGen/X86/vector-reduce-xor-bool.ll @@ -355,7 +355,7 @@ define i1 @trunc_v8i32_v8i1(<8 x i32>) { define i1 @trunc_v16i16_v16i1(<16 x i16>) { ; SSE2-LABEL: trunc_v16i16_v16i1: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -767,7 +767,7 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) { define i1 @trunc_v32i16_v32i1(<32 x i16>) { ; SSE2-LABEL: trunc_v32i16_v32i1: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm4, %xmm3 ; SSE2-NEXT: pand %xmm4, %xmm2 ; SSE2-NEXT: packuswb %xmm3, %xmm2 diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll index 4df979adbdf..5b3ce4f9eab 100644 --- a/test/CodeGen/X86/vector-shuffle-128-v16.ll +++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll @@ -1688,7 +1688,7 @@ define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_01_02_03_04_05_06( define <16 x i8> @PR12412(<16 x i8> %inval1, <16 x i8> %inval2) { ; SSE2-LABEL: PR12412: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 diff --git a/test/CodeGen/X86/vector-trunc-math-widen.ll b/test/CodeGen/X86/vector-trunc-math-widen.ll index 5cba4a9342e..1c2813d35f0 100644 --- a/test/CodeGen/X86/vector-trunc-math-widen.ll +++ b/test/CodeGen/X86/vector-trunc-math-widen.ll @@ -389,7 +389,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: paddw %xmm2, %xmm0 ; SSE-NEXT: paddw %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -812,7 +812,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_add_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -1247,7 +1247,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: psubw %xmm2, %xmm0 ; SSE-NEXT: psubw %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -1638,7 +1638,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_sub_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -2240,7 +2240,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: pmullw %xmm2, %xmm0 ; SSE-NEXT: pmullw %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -2728,7 +2728,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE: # %bb.0: ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0 ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -3135,7 +3135,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; SSE-LABEL: trunc_and_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm4, %xmm3 ; SSE-NEXT: pand %xmm1, %xmm3 ; SSE-NEXT: pand %xmm4, %xmm2 @@ -3507,7 +3507,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_and_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -3914,7 +3914,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: pxor %xmm2, %xmm0 ; SSE-NEXT: pxor %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -4284,7 +4284,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_xor_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -4691,7 +4691,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind ; SSE: # %bb.0: ; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: por %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -5061,7 +5061,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_or_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 diff --git a/test/CodeGen/X86/vector-trunc-math.ll b/test/CodeGen/X86/vector-trunc-math.ll index 3d2202e385a..b91b2592f46 100644 --- a/test/CodeGen/X86/vector-trunc-math.ll +++ b/test/CodeGen/X86/vector-trunc-math.ll @@ -389,7 +389,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: paddw %xmm2, %xmm0 ; SSE-NEXT: paddw %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -812,7 +812,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_add_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -1247,7 +1247,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: psubw %xmm2, %xmm0 ; SSE-NEXT: psubw %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -1638,7 +1638,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_sub_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -2240,7 +2240,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: pmullw %xmm2, %xmm0 ; SSE-NEXT: pmullw %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -2728,7 +2728,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE: # %bb.0: ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0 ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -3135,7 +3135,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; SSE-LABEL: trunc_and_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm4, %xmm3 ; SSE-NEXT: pand %xmm1, %xmm3 ; SSE-NEXT: pand %xmm4, %xmm2 @@ -3507,7 +3507,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_and_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -3914,7 +3914,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin ; SSE: # %bb.0: ; SSE-NEXT: pxor %xmm2, %xmm0 ; SSE-NEXT: pxor %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -4284,7 +4284,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_xor_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -4691,7 +4691,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind ; SSE: # %bb.0: ; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: por %xmm3, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 @@ -5061,7 +5061,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind { define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind { ; SSE-LABEL: trunc_or_const_v16i16_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: packuswb %xmm1, %xmm0 diff --git a/test/CodeGen/X86/vector-trunc-widen.ll b/test/CodeGen/X86/vector-trunc-widen.ll index 23a4a978a2d..42bf9a7c049 100644 --- a/test/CodeGen/X86/vector-trunc-widen.ll +++ b/test/CodeGen/X86/vector-trunc-widen.ll @@ -1042,7 +1042,7 @@ entry: define void @trunc16i16_16i8(<16 x i16> %a) { ; SSE2-LABEL: trunc16i16_16i8: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -1251,7 +1251,7 @@ entry: define void @trunc32i16_32i8(<32 x i16> %a) { ; SSE2-LABEL: trunc32i16_32i8: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm4, %xmm1 ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -1722,7 +1722,7 @@ entry: define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) { ; SSE2-LABEL: trunc2x8i16_16i8: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 diff --git a/test/CodeGen/X86/vector-trunc.ll b/test/CodeGen/X86/vector-trunc.ll index 35b190b1132..d4874c18870 100644 --- a/test/CodeGen/X86/vector-trunc.ll +++ b/test/CodeGen/X86/vector-trunc.ll @@ -1052,7 +1052,7 @@ entry: define void @trunc16i16_16i8(<16 x i16> %a) { ; SSE2-LABEL: trunc16i16_16i8: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -1261,7 +1261,7 @@ entry: define void @trunc32i16_32i8(<32 x i16> %a) { ; SSE2-LABEL: trunc32i16_32i8: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm4, %xmm1 ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -1728,7 +1728,7 @@ entry: define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) { ; SSE2-LABEL: trunc2x8i16_16i8: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 -- 2.40.0