From af5e339c6b385fb8758c0659a18219e45a1eff9e Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Tue, 19 Feb 2019 23:07:15 +0000 Subject: [PATCH] [GVN] Fix a crash bug around non-integral pointers If we encountered a location where we tried to forward the value of a memset to a load of a non-integral pointer, we crashed. Such a forward is not legal in general, but we can forward null pointers. Test for both cases are included. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@354399 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Utils/VNCoercion.cpp | 19 +++++- test/Transforms/GVN/non-integral-pointers.ll | 68 ++++++++++++++++++++ 2 files changed, 84 insertions(+), 3 deletions(-) diff --git a/lib/Transforms/Utils/VNCoercion.cpp b/lib/Transforms/Utils/VNCoercion.cpp index 19e5db4eee2..74572eb2d46 100644 --- a/lib/Transforms/Utils/VNCoercion.cpp +++ b/lib/Transforms/Utils/VNCoercion.cpp @@ -32,9 +32,15 @@ bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, // Don't coerce non-integral pointers to integers or vice versa. if (DL.isNonIntegralPointerType(StoredVal->getType()) != - DL.isNonIntegralPointerType(LoadTy)) + DL.isNonIntegralPointerType(LoadTy)) { + // As a special case, allow coercion of memset used to initialize + // an array w/null. Despite non-integral pointers not generally having a + // specific bit pattern, we do assume null is zero. + if (auto *CI = dyn_cast(StoredVal)) + return CI->isZero(); return false; - + } + return true; } @@ -264,9 +270,16 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, // If this is memset, we just need to see if the offset is valid in the size // of the memset.. - if (MI->getIntrinsicID() == Intrinsic::memset) + if (MI->getIntrinsicID() == Intrinsic::memset) { + Value *StoredVal = cast(MI)->getValue(); + if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) { + auto *CI = dyn_cast(StoredVal); + if (!CI || !CI->isZero()) + return -1; + } return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), MemSizeInBits, DL); + } // If we have a memcpy/memmove, the only case we can handle is if this is a // copy from constant memory. In that case, we can read directly from the diff --git a/test/Transforms/GVN/non-integral-pointers.ll b/test/Transforms/GVN/non-integral-pointers.ll index be83034dfe0..c222a8954bd 100644 --- a/test/Transforms/GVN/non-integral-pointers.ll +++ b/test/Transforms/GVN/non-integral-pointers.ll @@ -55,3 +55,71 @@ define i64 @f1(i1 %alwaysFalse, i8 addrspace(4)* %val, i8 addrspace(4)** %loc) { alwaysTaken: ret i64 42 } + +;; Note: For terseness, we stop using the %alwaysfalse trick for the +;; tests below and just exercise the bits of forwarding logic directly. + +declare void @llvm.memset.p4i8.i64(i8 addrspace(4)* nocapture, i8, i64, i1) nounwind + +; Can't forward as the load might be dead. (Pretend we wrote out the alwaysfalse idiom above.) +define i8 addrspace(4)* @neg_forward_memset(i8 addrspace(4)* addrspace(4)* %loc) { +; CHECK-LABEL: @neg_forward_memset( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)* +; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 7, i64 8, i1 false) +; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]] +; CHECK-NEXT: ret i8 addrspace(4)* [[REF]] +; + entry: + %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)* + call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8 7, i64 8, i1 false) + %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc + ret i8 addrspace(4)* %ref +} + +; Can forward since we can do so w/o breaking types +define i8 addrspace(4)* @forward_memset_zero(i8 addrspace(4)* addrspace(4)* %loc) { +; CHECK-LABEL: @forward_memset_zero( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)* +; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 0, i64 8, i1 false) +; CHECK-NEXT: ret i8 addrspace(4)* null +; + entry: + %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)* + call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8 0, i64 8, i1 false) + %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc + ret i8 addrspace(4)* %ref +} + +; Can't forward as the load might be dead. (Pretend we wrote out the alwaysfalse idiom above.) +define i8 addrspace(4)* @neg_forward_store(i8 addrspace(4)* addrspace(4)* %loc) { +; CHECK-LABEL: @neg_forward_store( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)* +; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]] +; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]] +; CHECK-NEXT: ret i8 addrspace(4)* [[REF]] +; + entry: + %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i64 addrspace(4)* + store i64 5, i64 addrspace(4)* %loc.bc + %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc + ret i8 addrspace(4)* %ref +} + +; TODO: missed optimization, we can forward the null. +define i8 addrspace(4)* @forward_store_zero(i8 addrspace(4)* addrspace(4)* %loc) { +; CHECK-LABEL: @forward_store_zero( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)* +; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]] +; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]] +; CHECK-NEXT: ret i8 addrspace(4)* [[REF]] +; + entry: + %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i64 addrspace(4)* + store i64 5, i64 addrspace(4)* %loc.bc + %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc + ret i8 addrspace(4)* %ref +} -- 2.40.0