From 099f7f2187bd812012f276f17f78c5eaa6a00b93 Mon Sep 17 00:00:00 2001 From: David Majnemer Date: Sun, 19 Jun 2016 06:14:56 +0000 Subject: [PATCH] [LoadCombine] Combine Loads formed from GEPS with negative indexes Change the underlying offset and comparisons to use int64_t instead of uint64_t. Patch by River Riddle! Differential Revision: http://reviews.llvm.org/D21499 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@273105 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Scalar/LoadCombine.cpp | 17 ++++++++++------- .../LoadCombine/load-combine-negativegep.ll | 19 +++++++++++++++++++ 2 files changed, 29 insertions(+), 7 deletions(-) create mode 100644 test/Transforms/LoadCombine/load-combine-negativegep.ll diff --git a/lib/Transforms/Scalar/LoadCombine.cpp b/lib/Transforms/Scalar/LoadCombine.cpp index 4fe3b0257d5..9e457a19180 100644 --- a/lib/Transforms/Scalar/LoadCombine.cpp +++ b/lib/Transforms/Scalar/LoadCombine.cpp @@ -40,7 +40,7 @@ STATISTIC(NumLoadsCombined, "Number of loads combined"); namespace { struct PointerOffsetPair { Value *Pointer; - uint64_t Offset; + int64_t Offset; }; struct LoadPOPPair { @@ -102,7 +102,7 @@ PointerOffsetPair LoadCombine::getPointerOffsetPair(LoadInst &LI) { unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType()); APInt Offset(BitWidth, 0); if (GEP->accumulateConstantOffset(DL, Offset)) - POP.Offset += Offset.getZExtValue(); + POP.Offset += Offset.getSExtValue(); else // Can't handle GEPs with variable indices. return POP; @@ -138,28 +138,31 @@ bool LoadCombine::aggregateLoads(SmallVectorImpl &Loads) { LoadInst *BaseLoad = nullptr; SmallVector AggregateLoads; bool Combined = false; - uint64_t PrevOffset = -1ull; + bool ValidPrevOffset = false; + int64_t PrevOffset = 0; uint64_t PrevSize = 0; for (auto &L : Loads) { - if (PrevOffset == -1ull) { + if (ValidPrevOffset == false) { BaseLoad = L.Load; PrevOffset = L.POP.Offset; PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize( L.Load->getType()); AggregateLoads.push_back(L); + ValidPrevOffset = true; continue; } if (L.Load->getAlignment() > BaseLoad->getAlignment()) continue; - if (L.POP.Offset > PrevOffset + PrevSize) { + int64_t PrevEnd = PrevOffset + PrevSize; + if (L.POP.Offset > PrevEnd) { // No other load will be combinable if (combineLoads(AggregateLoads)) Combined = true; AggregateLoads.clear(); - PrevOffset = -1; + ValidPrevOffset = false; continue; } - if (L.POP.Offset != PrevOffset + PrevSize) + if (L.POP.Offset != PrevEnd) // This load is offset less than the size of the last load. // FIXME: We may want to handle this case. continue; diff --git a/test/Transforms/LoadCombine/load-combine-negativegep.ll b/test/Transforms/LoadCombine/load-combine-negativegep.ll new file mode 100644 index 00000000000..7c5700b4295 --- /dev/null +++ b/test/Transforms/LoadCombine/load-combine-negativegep.ll @@ -0,0 +1,19 @@ +; RUN: opt -basicaa -load-combine -S < %s | FileCheck %s +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define i32 @Load_NegGep(i32* %i){ + %1 = getelementptr inbounds i32, i32* %i, i64 -1 + %2 = load i32, i32* %1, align 4 + %3 = load i32, i32* %i, align 4 + %4 = add nsw i32 %3, %2 + ret i32 %4 +; CHECK-LABEL: @Load_NegGep( +; CHECK: %[[load:.*]] = load i64 +; CHECK: %[[combine_extract_lo:.*]] = trunc i64 %[[load]] to i32 +; CHECK: %[[combine_extract_shift:.*]] = lshr i64 %[[load]], 32 +; CHECK: %[[combine_extract_hi:.*]] = trunc i64 %[[combine_extract_shift]] to i32 +; CHECK: %[[add:.*]] = add nsw i32 %[[combine_extract_hi]], %[[combine_extract_lo]] +} + + -- 2.50.1