From: Roman Lebedev Date: Wed, 18 Jul 2018 16:19:06 +0000 (+0000) Subject: [NFC][X86][AArch64][DAGCombine] More tests for optimizeSetCCOfSignedTruncationCheck() X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=c279de70f628db9c7fa2534ada27da77bc4a95e2;p=llvm [NFC][X86][AArch64][DAGCombine] More tests for optimizeSetCCOfSignedTruncationCheck() At least one of these cases is more canonical, so we really do have to handle it. https://godbolt.org/g/pkzP3X https://rise4fun.com/Alive/pQyh git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@337400 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll b/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll index 84b0ba15d0a..9bfac1bdc05 100644 --- a/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll +++ b/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll @@ -11,7 +11,7 @@ ; trunc + sext + icmp ne <- not canonical ; shl + ashr + icmp ne ; add + icmp ult -; add + icmp uge +; add + icmp uge/ugt ; However only the simplest form (with two shifts) gets lowered best. ; ---------------------------------------------------------------------------- ; @@ -253,6 +253,20 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind { ret i1 %tmp1 } +; Slightly more canonical variant +define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind { +; CHECK-LABEL: add_ugtcmp_i16_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: and w8, w8, #0xffff +; CHECK-NEXT: cmp w8, #255 // =255 +; CHECK-NEXT: cset w0, hi +; CHECK-NEXT: ret + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ugt i16 %tmp0, 255 ; (1U << 8) - 1 + ret i1 %tmp1 +} + ; Negative tests ; ---------------------------------------------------------------------------- ; @@ -367,3 +381,14 @@ define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind { %tmp1 = icmp uge i24 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 } + +; Slightly more canonical variant +define i1 @add_ugtcmp_bad_i16_i8(i16 %x) nounwind { +; CHECK-LABEL: add_ugtcmp_bad_i16_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w0, wzr +; CHECK-NEXT: ret + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ugt i16 %tmp0, -1 ; when we +1 it, it will wrap to 0 + ret i1 %tmp1 +} diff --git a/test/CodeGen/AArch64/signed-truncation-check.ll b/test/CodeGen/AArch64/signed-truncation-check.ll index 7c8627580da..6b5fffefe36 100644 --- a/test/CodeGen/AArch64/signed-truncation-check.ll +++ b/test/CodeGen/AArch64/signed-truncation-check.ll @@ -11,7 +11,7 @@ ; trunc + sext + icmp eq <- not canonical ; shl + ashr + icmp eq ; add + icmp uge -; add + icmp ult +; add + icmp ult/ule ; However only the simplest form (with two shifts) gets lowered best. ; ---------------------------------------------------------------------------- ; @@ -255,6 +255,20 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind { ret i1 %tmp1 } +; Slightly more canonical variant +define i1 @add_ulecmp_i16_i8(i16 %x) nounwind { +; CHECK-LABEL: add_ulecmp_i16_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: and w8, w8, #0xffff +; CHECK-NEXT: cmp w8, w0, uxth +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1 + ret i1 %tmp1 +} + ; Negative tests ; ---------------------------------------------------------------------------- ; @@ -368,3 +382,13 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind { %tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 } + +define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind { +; CHECK-LABEL: add_ulecmp_bad_i16_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: orr w0, wzr, #0x1 +; CHECK-NEXT: ret + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0 + ret i1 %tmp1 +} diff --git a/test/CodeGen/X86/lack-of-signed-truncation-check.ll b/test/CodeGen/X86/lack-of-signed-truncation-check.ll index 397b7f98f1d..813c4c58d4c 100644 --- a/test/CodeGen/X86/lack-of-signed-truncation-check.ll +++ b/test/CodeGen/X86/lack-of-signed-truncation-check.ll @@ -12,7 +12,7 @@ ; trunc + sext + icmp ne <- not canonical ; shl + ashr + icmp ne ; add + icmp ult -; add + icmp uge +; add + icmp uge/ugt ; However only the simplest form (with two shifts) gets lowered best. ; ---------------------------------------------------------------------------- ; @@ -418,6 +418,29 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind { ret i1 %tmp1 } +; Slightly more canonical variant +define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind { +; X86-LABEL: add_ugtcmp_i16_i8: +; X86: # %bb.0: +; X86-NEXT: movl $128, %eax +; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movzwl %ax, %eax +; X86-NEXT: cmpl $255, %eax +; X86-NEXT: seta %al +; X86-NEXT: retl +; +; X64-LABEL: add_ugtcmp_i16_i8: +; X64: # %bb.0: +; X64-NEXT: subl $-128, %edi +; X64-NEXT: movzwl %di, %eax +; X64-NEXT: cmpl $255, %eax +; X64-NEXT: seta %al +; X64-NEXT: retq + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ugt i16 %tmp0, 255 ; (1U << 8) - 1 + ret i1 %tmp1 +} + ; Negative tests ; ---------------------------------------------------------------------------- ; @@ -602,3 +625,14 @@ define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind { %tmp1 = icmp uge i24 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 } + +; Slightly more canonical variant +define i1 @add_ugtcmp_bad_i16_i8(i16 %x) nounwind { +; CHECK-LABEL: add_ugtcmp_bad_i16_i8: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: ret{{[l|q]}} + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ugt i16 %tmp0, -1 ; when we +1 it, it will wrap to 0 + ret i1 %tmp1 +} diff --git a/test/CodeGen/X86/signed-truncation-check.ll b/test/CodeGen/X86/signed-truncation-check.ll index f18307fbbc7..c41b4e681ed 100644 --- a/test/CodeGen/X86/signed-truncation-check.ll +++ b/test/CodeGen/X86/signed-truncation-check.ll @@ -12,7 +12,7 @@ ; trunc + sext + icmp eq <- not canonical ; shl + ashr + icmp eq ; add + icmp uge -; add + icmp ult +; add + icmp ult/ule ; However only the simplest form (with two shifts) gets lowered best. ; ---------------------------------------------------------------------------- ; @@ -422,6 +422,27 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind { ret i1 %tmp1 } +; Slightly more canonical variant +define i1 @add_ulecmp_i16_i8(i16 %x) nounwind { +; X86-LABEL: add_ulecmp_i16_i8: +; X86: # %bb.0: +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movsbl %al, %ecx +; X86-NEXT: cmpw %ax, %cx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: add_ulecmp_i16_i8: +; X64: # %bb.0: +; X64-NEXT: movsbl %dil, %eax +; X64-NEXT: cmpw %di, %ax +; X64-NEXT: sete %al +; X64-NEXT: retq + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1 + ret i1 %tmp1 +} + ; Negative tests ; ---------------------------------------------------------------------------- ; @@ -602,3 +623,13 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind { %tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 } + +define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind { +; CHECK-LABEL: add_ulecmp_bad_i16_i8: +; CHECK: # %bb.0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: ret{{[l|q]}} + %tmp0 = add i16 %x, 128 ; 1U << (8-1) + %tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0 + ret i1 %tmp1 +}