From: Sanjay Patel Date: Sun, 18 Jun 2017 14:45:23 +0000 (+0000) Subject: x86] adjust test constants to maintain coverage; NFC X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=5c2d0a5c5e10bd2f11831091f9138aafa4a33877;p=llvm x86] adjust test constants to maintain coverage; NFC Increment (add 1) could be transformed to sub -1, and we'd lose coverage for these patterns. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@305646 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/avx2-vbroadcast.ll b/test/CodeGen/X86/avx2-vbroadcast.ll index ba47e2ba15c..971d03af377 100644 --- a/test/CodeGen/X86/avx2-vbroadcast.ll +++ b/test/CodeGen/X86/avx2-vbroadcast.ll @@ -653,7 +653,7 @@ define <8 x i32> @V111(<8 x i32> %in) nounwind uwtable readnone ssp { ; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 ; X64-AVX512VL-NEXT: retq entry: - %g = add <8 x i32> %in, + %g = add <8 x i32> %in, ret <8 x i32> %g } diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll index 26be2084056..d96b5882556 100644 --- a/test/CodeGen/X86/avx512-arith.ll +++ b/test/CodeGen/X86/avx512-arith.ll @@ -348,7 +348,7 @@ define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind { ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 ; CHECK-NEXT: retq - %x = add <8 x i64> %i, + %x = add <8 x i64> %i, ret <8 x i64> %x } @@ -394,7 +394,7 @@ define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind { ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 ; CHECK-NEXT: retq - %x = add <16 x i32> %i, + %x = add <16 x i32> %i, ret <16 x i32> %x } @@ -446,7 +446,7 @@ define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} ; CHECK-NEXT: retq %mask = icmp ne <16 x i32> %mask1, zeroinitializer - %x = add <16 x i32> %i, + %x = add <16 x i32> %i, %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i ret <16 x i32> %r } @@ -473,7 +473,7 @@ define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z} ; CHECK-NEXT: retq %mask = icmp ne <16 x i32> %mask1, zeroinitializer - %x = add <16 x i32> %i, + %x = add <16 x i32> %i, %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer ret <16 x i32> %r } diff --git a/test/CodeGen/X86/avx512-logic.ll b/test/CodeGen/X86/avx512-logic.ll index 7153c1ffaaa..6e08753dbbb 100644 --- a/test/CodeGen/X86/avx512-logic.ll +++ b/test/CodeGen/X86/avx512-logic.ll @@ -11,8 +11,8 @@ define <16 x i32> @vpandd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnon ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <16 x i32> %a, + %a2 = add <16 x i32> %a, %x = and <16 x i32> %a2, %b ret <16 x i32> %x } @@ -25,8 +25,8 @@ define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readno ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <16 x i32> %a, + %a2 = add <16 x i32> %a, %b2 = xor <16 x i32> %b, %x = and <16 x i32> %a2, %b2 @@ -41,8 +41,8 @@ define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <16 x i32> %a, + %a2 = add <16 x i32> %a, %x = or <16 x i32> %a2, %b ret <16 x i32> %x } @@ -55,8 +55,8 @@ define <16 x i32> @vpxord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnon ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <16 x i32> %a, + %a2 = add <16 x i32> %a, %x = xor <16 x i32> %a2, %b ret <16 x i32> %x } @@ -69,7 +69,7 @@ define <8 x i64> @vpandq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone s ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i64> %a, + %a2 = add <8 x i64> %a, %x = and <8 x i64> %a2, %b ret <8 x i64> %x } @@ -82,7 +82,7 @@ define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i64> %a, + %a2 = add <8 x i64> %a, %b2 = xor <8 x i64> %b, %x = and <8 x i64> %a2, %b2 ret <8 x i64> %x @@ -96,7 +96,7 @@ define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ss ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i64> %a, + %a2 = add <8 x i64> %a, %x = or <8 x i64> %a2, %b ret <8 x i64> %x } @@ -109,7 +109,7 @@ define <8 x i64> @vpxorq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone s ; ALL-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i64> %a, + %a2 = add <8 x i64> %a, %x = xor <8 x i64> %a2, %b ret <8 x i64> %x } diff --git a/test/CodeGen/X86/avx512vl-logic.ll b/test/CodeGen/X86/avx512vl-logic.ll index 83fa8d4c34c..6e697cf59a4 100644 --- a/test/CodeGen/X86/avx512vl-logic.ll +++ b/test/CodeGen/X86/avx512vl-logic.ll @@ -12,7 +12,7 @@ define <8 x i32> @vpandd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i32> %a, + %a2 = add <8 x i32> %a, %x = and <8 x i32> %a2, %b ret <8 x i32> %x } @@ -25,7 +25,7 @@ define <8 x i32> @vpandnd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readno ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i32> %a, + %a2 = add <8 x i32> %a, %b2 = xor <8 x i32> %a, %x = and <8 x i32> %a2, %b2 ret <8 x i32> %x @@ -39,7 +39,7 @@ define <8 x i32> @vpord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i32> %a, + %a2 = add <8 x i32> %a, %x = or <8 x i32> %a2, %b ret <8 x i32> %x } @@ -52,7 +52,7 @@ define <8 x i32> @vpxord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <8 x i32> %a, + %a2 = add <8 x i32> %a, %x = xor <8 x i32> %a2, %b ret <8 x i32> %x } @@ -65,7 +65,7 @@ define <4 x i64> @vpandq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i64> %a, + %a2 = add <4 x i64> %a, %x = and <4 x i64> %a2, %b ret <4 x i64> %x } @@ -78,7 +78,7 @@ define <4 x i64> @vpandnq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readno ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i64> %a, + %a2 = add <4 x i64> %a, %b2 = xor <4 x i64> %b, %x = and <4 x i64> %a2, %b2 ret <4 x i64> %x @@ -92,7 +92,7 @@ define <4 x i64> @vporq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i64> %a, + %a2 = add <4 x i64> %a, %x = or <4 x i64> %a2, %b ret <4 x i64> %x } @@ -105,7 +105,7 @@ define <4 x i64> @vpxorq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i64> %a, + %a2 = add <4 x i64> %a, %x = xor <4 x i64> %a2, %b ret <4 x i64> %x } @@ -120,7 +120,7 @@ define <4 x i32> @vpandd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i32> %a, + %a2 = add <4 x i32> %a, %x = and <4 x i32> %a2, %b ret <4 x i32> %x } @@ -133,7 +133,7 @@ define <4 x i32> @vpandnd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readno ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i32> %a, + %a2 = add <4 x i32> %a, %b2 = xor <4 x i32> %b, %x = and <4 x i32> %a2, %b2 ret <4 x i32> %x @@ -147,7 +147,7 @@ define <4 x i32> @vpord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i32> %a, + %a2 = add <4 x i32> %a, %x = or <4 x i32> %a2, %b ret <4 x i32> %x } @@ -160,7 +160,7 @@ define <4 x i32> @vpxord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <4 x i32> %a, + %a2 = add <4 x i32> %a, %x = xor <4 x i32> %a2, %b ret <4 x i32> %x } @@ -173,7 +173,7 @@ define <2 x i64> @vpandq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <2 x i64> %a, + %a2 = add <2 x i64> %a, %x = and <2 x i64> %a2, %b ret <2 x i64> %x } @@ -186,7 +186,7 @@ define <2 x i64> @vpandnq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readno ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <2 x i64> %a, + %a2 = add <2 x i64> %a, %b2 = xor <2 x i64> %b, %x = and <2 x i64> %a2, %b2 ret <2 x i64> %x @@ -200,7 +200,7 @@ define <2 x i64> @vporq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <2 x i64> %a, + %a2 = add <2 x i64> %a, %x = or <2 x i64> %a2, %b ret <2 x i64> %x } @@ -213,7 +213,7 @@ define <2 x i64> @vpxorq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnon ; CHECK-NEXT: retq entry: ; Force the execution domain with an add. - %a2 = add <2 x i64> %a, + %a2 = add <2 x i64> %a, %x = xor <2 x i64> %a2, %b ret <2 x i64> %x }