Summary:
Currently when computing a GEP offset using the function EmitGEPOffset
for the following instruction
getelementptr inbounds i32, i32* %p, i64 %offs
we get
mul nuw i64 %offs, 4
Unfortunately we cannot assume that unsigned wrapping won't happen
here because %offs is allowed to be negative.
Making such assumptions can lead to miscompilations: see the new test
test24_neg_offs in InstCombine/icmp.ll. Without the patch InstCombine
would generate the following comparison:
icmp eq i64 %offs,
4611686018427387902; 0x3ffffffffffffffe
Whereas the correct value to compare with is -2.
This patch replaces the NUW flag with NSW in the multiplication
instructions generated by EmitGEPOffset and adjusts the test suite.
https://bugs.llvm.org/show_bug.cgi?id=42699
Reviewers: chandlerc, craig.topper, ostannard, lebedev.ri, spatel, efriedma, nlopes, aqjune
Reviewed By: lebedev.ri
Subscribers: reames, lebedev.ri, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68342
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@375089
91177308-0d34-0410-b5e6-
96231b3b80d8
Value *Result = Constant::getNullValue(IntPtrTy);
// If the GEP is inbounds, we know that none of the addressing operations will
- // overflow in an unsigned sense.
+ // overflow in a signed sense.
bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
// Build a mask for high order bits.
Constant *Scale = ConstantInt::get(IntPtrTy, Size);
Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
- Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
+ Scale =
+ ConstantExpr::getMul(OC, Scale, false /*NUW*/, isInBounds /*NSW*/);
// Emit an add instruction.
Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
continue;
if (Size != 1) {
// We'll let instcombine(mul) convert this to a shl if possible.
Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
- GEP->getName()+".idx", isInBounds /*NUW*/);
+ GEP->getName() + ".idx", false /*NUW*/,
+ isInBounds /*NSW*/);
}
// Emit an add instruction.
define <2 x i1> @test6(<2 x i32> %X, <2 x %S*> %P) nounwind {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> [[X:%.*]], <i32 1073741823, i32 1073741823>
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> %X, <i32 -1, i32 -1>
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%A = getelementptr inbounds %S, <2 x %S*> %P, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 1>, <2 x i32> %X
; Same as above, but indices scalarized.
define <2 x i1> @test6b(<2 x i32> %X, <2 x %S*> %P) nounwind {
; CHECK-LABEL: @test6b(
-; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> [[X:%.*]], <i32 1073741823, i32 1073741823>
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> %X, <i32 -1, i32 -1>
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%A = getelementptr inbounds %S, <2 x %S*> %P, i32 0, i32 1, <2 x i32> %X
; CHECK: %C = icmp eq i64 %X, -1
}
-; This is a test of icmp + shl nuw in disguise - 4611... is 0x3fff...
define <2 x i1> @test13_vector(<2 x i64> %X, <2 x %S*> %P) nounwind {
; CHECK-LABEL: @test13_vector(
-; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> %X, <i64 4611686018427387903, i64 4611686018427387903>
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> %X, <i64 -1, i64 -1>
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%A = getelementptr inbounds %S, <2 x %S*> %P, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 1>, <2 x i64> %X
ret <2 x i1> %C
}
-; This is a test of icmp + shl nuw in disguise - 4611... is 0x3fff...
define <2 x i1> @test13_vector2(i64 %X, <2 x %S*> %P) nounwind {
; CHECK-LABEL: @test13_vector2(
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i64> undef, i64 [[X:%.*]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw <2 x i64> [[DOTSPLATINSERT]], <i64 2, i64 undef>
+; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i64> [[DOTSPLATINSERT]], <i64 2, i64 undef>
; CHECK-NEXT: [[A_IDX:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> undef, <2 x i32> zeroinitializer
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> [[A_IDX]], <i64 -4, i64 -4>
; CHECK-NEXT: ret <2 x i1> [[C]]
define <2 x i1> @test13_vector3(i64 %X, <2 x %S*> %P) nounwind {
; CHECK-LABEL: @test13_vector3(
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i64> undef, i64 [[X:%.*]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw <2 x i64> [[DOTSPLATINSERT]], <i64 2, i64 undef>
+; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i64> [[DOTSPLATINSERT]], <i64 2, i64 undef>
; CHECK-NEXT: [[A_IDX:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> undef, <2 x i32> zeroinitializer
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> [[A_IDX]], <i64 4, i64 4>
; CHECK-NEXT: ret <2 x i1> [[C]]
ret i1 %C
}
-; This is a test of icmp + shl nuw in disguise - 16383 is 0x3fff.
define <2 x i1> @test13_vector_as1(<2 x i16> %X, <2 x %S addrspace(1)*> %P) {
; CHECK-LABEL: @test13_vector_as1(
-; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i16> %X, <i16 16383, i16 16383>
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i16> %X, <i16 -1, i16 -1>
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%A = getelementptr inbounds %S, <2 x %S addrspace(1)*> %P, <2 x i16> <i16 0, i16 0>, <2 x i32> <i32 1, i32 1>, <2 x i16> %X
; CHECK-LABEL: @test60(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i32 [[TMP1]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i32 [[TMP1]], 2
; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[TMP3]]
;
; CHECK-LABEL: @test60_as1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[TMP1]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[TMP3]]
;
define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast_smaller(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[I:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
ret i1 %cmp
}
+; Note: offs can be negative, LLVM used to make an incorrect assumption that
+; unsigned overflow does not happen during offset computation
+define i1 @test24_neg_offs(i32* %p, i64 %offs) {
+; CHECK-LABEL: @test24_neg_offs(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[OFFS:%.*]], -2
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %p1 = getelementptr inbounds i32, i32* %p, i64 %offs
+ %conv1 = ptrtoint i32* %p to i64
+ %conv2 = ptrtoint i32* %p1 to i64
+ %delta = sub i64 %conv1, %conv2
+ %cmp = icmp eq i64 %delta, 8
+ ret i1 %cmp
+}
+
@X_as1 = addrspace(1) global [1000 x i32] zeroinitializer
define i1 @test24_as1(i64 %i) {
define i1 @test60(i8* %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i64 [[I:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i64 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
; CHECK-LABEL: @test60_as1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[TMP1]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[TMP3]]
;
; bitcast. This uses the same sized addrspace.
define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i64 [[I:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i64 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast_smaller(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[I:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
define i64 @test24b(i8* %P, i64 %A){
; CHECK-LABEL: @test24b(
-; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i64 [[A:%.*]], 1
+; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i64 [[A:%.*]], 1
; CHECK-NEXT: ret i64 [[B_IDX]]
;
%B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A
define i64 @test25(i8* %P, i64 %A){
; CHECK-LABEL: @test25(
-; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i64 [[A:%.*]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[B_IDX]], -84
-; CHECK-NEXT: ret i64 [[TMP1]]
+; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i64 [[A:%.*]], 1
+; CHECK-NEXT: [[DIFF_NEG:%.*]] = add i64 [[B_IDX]], -84
+; CHECK-NEXT: ret i64 [[DIFF_NEG]]
;
%B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A
%C = ptrtoint i16* %B to i64
define i16 @test25_as1(i8 addrspace(1)* %P, i64 %A) {
; CHECK-LABEL: @test25_as1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16
-; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i16 [[TMP1]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = add i16 [[B_IDX]], -84
-; CHECK-NEXT: ret i16 [[TMP2]]
+; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i16 [[TMP1]], 1
+; CHECK-NEXT: [[DIFF_NEG:%.*]] = add i16 [[B_IDX]], -84
+; CHECK-NEXT: ret i16 [[DIFF_NEG]]
;
%B = getelementptr inbounds [42 x i16], [42 x i16] addrspace(1)* @Arr_as1, i64 0, i64 %A
%C = ptrtoint i16 addrspace(1)* %B to i16
define i64 @test30(i8* %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test30(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i64 [[I:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[TMP1]]
;
define i16 @test30_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
; CHECK-LABEL: @test30_as1(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[I:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i16 [[TMP1]]
;