From: Roman Lebedev Date: Tue, 23 Jul 2019 12:42:49 +0000 (+0000) Subject: [InstSimplify][NFC] Tests for skipping 'div-by-0' checks before inverted @llvm.umul... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=392def1d8491cb353095db40dba1abbaa575de98;p=llvm [InstSimplify][NFC] Tests for skipping 'div-by-0' checks before inverted @llvm.umul.with.overflow It would be already handled by the non-inverted case if we were hoisting the `not` in InstCombine, but we don't (granted, we don't sink it in this case either), so this is a separate case. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@366801 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll b/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll new file mode 100644 index 00000000000..a2fd8351d59 --- /dev/null +++ b/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll @@ -0,0 +1,106 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instsimplify -S | FileCheck %s + +declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1 + +define i1 @t0_umul(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @t0_umul( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 0 + %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) + %smul.ov = extractvalue { i4, i1 } %smul, 1 + %phitmp = xor i1 %smul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +} + +define i1 @t1_commutative(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @t1_commutative( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[PHITMP]], [[CMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 0 + %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) + %smul.ov = extractvalue { i4, i1 } %smul, 1 + %phitmp = xor i1 %smul.ov, true + %or = or i1 %phitmp, %cmp ; swapped + ret i1 %or +} + +define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) { +; CHECK-LABEL: @n2_wrong_size( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0 +; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size1, 0 ; not %size0 + %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size0, i4 %nmemb) + %smul.ov = extractvalue { i4, i1 } %smul, 1 + %phitmp = xor i1 %smul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +} + +define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @n3_wrong_pred( +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp ne i4 %size, 0 ; not 'eq' + %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) + %smul.ov = extractvalue { i4, i1 } %smul, 1 + %phitmp = xor i1 %smul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +} + +define i1 @n4_not_and(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @n4_not_and( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 0 + %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) + %smul.ov = extractvalue { i4, i1 } %smul, 1 + %phitmp = xor i1 %smul.ov, true + %or = and i1 %cmp, %phitmp ; not 'or' + ret i1 %or +} + +define i1 @n5_not_zero(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @n5_not_zero( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1 +; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 1 ; should be '0' + %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) + %smul.ov = extractvalue { i4, i1 } %smul, 1 + %phitmp = xor i1 %smul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +} diff --git a/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll b/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll new file mode 100644 index 00000000000..a34b4202836 --- /dev/null +++ b/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll @@ -0,0 +1,106 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instsimplify -S | FileCheck %s + +declare { i4, i1 } @llvm.umul.with.overflow.i4(i4, i4) #1 + +define i1 @t0_umul(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @t0_umul( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 0 + %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) + %umul.ov = extractvalue { i4, i1 } %umul, 1 + %phitmp = xor i1 %umul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +} + +define i1 @t1_commutative(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @t1_commutative( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[PHITMP]], [[CMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 0 + %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) + %umul.ov = extractvalue { i4, i1 } %umul, 1 + %phitmp = xor i1 %umul.ov, true + %or = or i1 %phitmp, %cmp ; swapped + ret i1 %or +} + +define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) { +; CHECK-LABEL: @n2_wrong_size( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0 +; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size1, 0 ; not %size0 + %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size0, i4 %nmemb) + %umul.ov = extractvalue { i4, i1 } %umul, 1 + %phitmp = xor i1 %umul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +} + +define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @n3_wrong_pred( +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp ne i4 %size, 0 ; not 'eq' + %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) + %umul.ov = extractvalue { i4, i1 } %umul, 1 + %phitmp = xor i1 %umul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +} + +define i1 @n4_not_and(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @n4_not_and( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0 +; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 0 + %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) + %umul.ov = extractvalue { i4, i1 } %umul, 1 + %phitmp = xor i1 %umul.ov, true + %or = and i1 %cmp, %phitmp ; not 'or' + ret i1 %or +} + +define i1 @n5_not_zero(i4 %size, i4 %nmemb) { +; CHECK-LABEL: @n5_not_zero( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1 +; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) +; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 +; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true +; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] +; CHECK-NEXT: ret i1 [[OR]] +; + %cmp = icmp eq i4 %size, 1 ; should be '0' + %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) + %umul.ov = extractvalue { i4, i1 } %umul, 1 + %phitmp = xor i1 %umul.ov, true + %or = or i1 %cmp, %phitmp + ret i1 %or +}