From: Philip Reames Date: Tue, 19 Feb 2019 23:49:38 +0000 (+0000) Subject: [GVN] Fix a crash bug w/non-integral pointers and memtransfers X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=3eb92b56d66c92b702ae90eb19500af84f065b98;p=llvm [GVN] Fix a crash bug w/non-integral pointers and memtransfers Problem is very similiar to the one fixed for memsets in r354399, we try to coerce a value to non-integral type, and then crash while try to do so. Since we shouldn't be doing such coercions to start with, easy fix. From inspection, I see two other cases which look to be similiar and will follow up with most test cases and fixes if confirmed. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@354403 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Transforms/Utils/VNCoercion.cpp b/lib/Transforms/Utils/VNCoercion.cpp index 8e21472f2c1..1aedce75ca6 100644 --- a/lib/Transforms/Utils/VNCoercion.cpp +++ b/lib/Transforms/Utils/VNCoercion.cpp @@ -300,6 +300,11 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, if (Offset == -1) return Offset; + // Don't coerce non-integral pointers to integers or vice versa. + if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) + // TODO: Can allow nullptrs from constant zeros + return -1; + unsigned AS = Src->getType()->getPointerAddressSpace(); // Otherwise, see if we can constant fold a load from the constant with the // offset applied as appropriate. diff --git a/test/Transforms/GVN/non-integral-pointers.ll b/test/Transforms/GVN/non-integral-pointers.ll index c5a281e1423..fc7f74d3796 100644 --- a/test/Transforms/GVN/non-integral-pointers.ll +++ b/test/Transforms/GVN/non-integral-pointers.ll @@ -154,3 +154,62 @@ define i8 addrspace(4)* @forward_store_zero(i8 addrspace(4)* addrspace(4)* %loc) %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc ret i8 addrspace(4)* %ref } + +@NonZeroConstant = constant <4 x i64> +@ZeroConstant = constant <4 x i64> zeroinitializer + + +; Can't forward as the load might be dead. (Pretend we wrote out the alwaysfalse idiom above.) +define i8 addrspace(4)* @neg_forward_memcopy(i8 addrspace(4)* addrspace(4)* %loc) { +; CHECK-LABEL: @neg_forward_memcopy( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)* +; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 8, i1 false) +; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]] +; CHECK-NEXT: ret i8 addrspace(4)* [[REF]] +; +entry: + %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)* + %src.bc = bitcast <4 x i64>* @NonZeroConstant to i8* + call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false) + %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc + ret i8 addrspace(4)* %ref +} + +define <1 x i8 addrspace(4)*> @neg_forward_memcpy_vload(<1 x i8 addrspace(4)*> addrspace(4)* %loc) { +; CHECK-LABEL: @neg_forward_memcpy_vload( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)* +; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 8, i1 false) +; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]] +; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]] +; +entry: + %loc.bc = bitcast <1 x i8 addrspace(4)*> addrspace(4)* %loc to i8 addrspace(4)* + %src.bc = bitcast <4 x i64>* @NonZeroConstant to i8* + call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false) + %ref = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* %loc + ret <1 x i8 addrspace(4)*> %ref +} + + +; Can forward since we can do so w/o breaking types +; TODO: missed optimization +define i8 addrspace(4)* @forward_memcpy_zero(i8 addrspace(4)* addrspace(4)* %loc) { +; CHECK-LABEL: @forward_memcpy_zero( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)* +; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @ZeroConstant to i8*), i64 8, i1 false) +; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]] +; CHECK-NEXT: ret i8 addrspace(4)* [[REF]] +; +entry: + %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)* + %src.bc = bitcast <4 x i64>* @ZeroConstant to i8* + call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false) + %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc + ret i8 addrspace(4)* %ref +} + + +declare void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* nocapture, i8* nocapture, i64, i1) nounwind