// str r0, [r5], #4
//
// note that we saved 2 registers here almostly "for free".
+//
+// However, merging globals can have tradeoffs:
+// - it confuses debuggers, tools, and users
+// - it makes linker optimizations less useful (order files, LOHs, ...)
+// - it forces usage of indexed addressing (which isn't necessarily "free")
+// - it can increase register pressure when the uses are disparate enough.
+//
+// We use heuristics to discover the best global grouping we can (cf cl::opts).
// ===---------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "global-merge"
cl::desc("Enable the global merge pass"),
cl::init(true));
+static cl::opt<bool> GlobalMergeGroupByUse(
+ "global-merge-group-by-use", cl::Hidden,
+ cl::desc("Improve global merge pass to look at uses"), cl::init(true));
+
+static cl::opt<bool> GlobalMergeIgnoreSingleUse(
+ "global-merge-ignore-single-use", cl::Hidden,
+ cl::desc("Improve global merge pass to ignore globals only used alone"),
+ cl::init(true));
+
static cl::opt<bool>
EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
cl::desc("Enable global merge pass on constants"),
bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const;
+ /// \brief Merge everything in \p Globals for which the corresponding bit
+ /// in \p GlobalSet is set.
+ bool doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
+ const BitVector &GlobalSet, Module &M, bool isConst,
+ unsigned AddrSpace) const;
/// \brief Check if the given variable has been identified as must keep
/// \pre setMustKeepGlobalVariables must have been called on the Module that
return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
});
+ // If we want to just blindly group all globals together, do so.
+ if (!GlobalMergeGroupByUse) {
+ BitVector AllGlobals(Globals.size());
+ AllGlobals.set();
+ return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
+ }
+
+ // If we want to be smarter, look at all uses of each global, to try to
+ // discover all sets of globals used together, and how many times each of
+ // these sets occured.
+ //
+ // Keep this reasonably efficient, by having an append-only list of all sets
+ // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
+ // code (currently, a Function) to the set of globals seen so far that are
+ // used together in that unit (GlobalUsesByFunction).
+ //
+ // When we look at the Nth global, we now that any new set is either:
+ // - the singleton set {N}, containing this global only, or
+ // - the union of {N} and a previously-discovered set, containing some
+ // combination of the previous N-1 globals.
+ // Using that knowledge, when looking at the Nth global, we can keep:
+ // - a reference to the singleton set {N} (CurGVOnlySetIdx)
+ // - a list mapping each previous set to its union with {N} (EncounteredUGS),
+ // if it actually occurs.
+
+ // We keep track of the sets of globals used together "close enough".
+ struct UsedGlobalSet {
+ UsedGlobalSet(size_t Size) : Globals(Size), UsageCount(1) {}
+ BitVector Globals;
+ unsigned UsageCount;
+ };
+
+ // Each set is unique in UsedGlobalSets.
+ std::vector<UsedGlobalSet> UsedGlobalSets;
+
+ // Avoid repeating the create-global-set pattern.
+ auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
+ UsedGlobalSets.emplace_back(Globals.size());
+ return UsedGlobalSets.back();
+ };
+
+ // The first set is the empty set.
+ CreateGlobalSet().UsageCount = 0;
+
+ // We define "close enough" to be "in the same function".
+ // FIXME: Grouping uses by function is way too aggressive, so we should have
+ // a better metric for distance between uses.
+ // The obvious alternative would be to group by BasicBlock, but that's in
+ // turn too conservative..
+ // Anything in between wouldn't be trivial to compute, so just stick with
+ // per-function grouping.
+
+ // The value type is an index into UsedGlobalSets.
+ // The default (0) conveniently points to the empty set.
+ DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;
+
+ // Now, look at each merge-eligible global in turn.
+
+ // Keep track of the sets we already encountered to which we added the
+ // current global.
+ // Each element matches the same-index element in UsedGlobalSets.
+ // This lets us efficiently tell whether a set has already been expanded to
+ // include the current global.
+ std::vector<size_t> EncounteredUGS;
+
+ for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
+ GlobalVariable *GV = Globals[GI];
+
+ // Reset the encountered sets for this global...
+ std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0);
+ // ...and grow it in case we created new sets for the previous global.
+ EncounteredUGS.resize(UsedGlobalSets.size());
+
+ // We might need to create a set that only consists of the current global.
+ // Keep track of its index into UsedGlobalSets.
+ size_t CurGVOnlySetIdx = 0;
+
+ // For each global, look at all its Uses.
+ for (auto &U : GV->uses()) {
+ // This Use might be a ConstantExpr. We're interested in Instruction
+ // users, so look through ConstantExpr...
+ Use *UI, *UE;
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
+ UI = &*CE->use_begin();
+ UE = nullptr;
+ } else if (isa<Instruction>(U.getUser())) {
+ UI = &U;
+ UE = UI->getNext();
+ } else {
+ continue;
+ }
+
+ // ...to iterate on all the instruction users of the global.
+ // Note that we iterate on Uses and not on Users to be able to getNext().
+ for (; UI != UE; UI = UI->getNext()) {
+ Instruction *I = dyn_cast<Instruction>(UI->getUser());
+ if (!I)
+ continue;
+
+ Function *ParentFn = I->getParent()->getParent();
+ size_t UGSIdx = GlobalUsesByFunction[ParentFn];
+
+ // If this is the first global the basic block uses, map it to the set
+ // consisting of this global only.
+ if (!UGSIdx) {
+ // If that set doesn't exist yet, create it.
+ if (!CurGVOnlySetIdx) {
+ CurGVOnlySetIdx = UsedGlobalSets.size();
+ CreateGlobalSet().Globals.set(GI);
+ } else {
+ ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
+ }
+
+ GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
+ continue;
+ }
+
+ // If we already encountered this BB, just increment the counter.
+ if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
+ ++UsedGlobalSets[UGSIdx].UsageCount;
+ continue;
+ }
+
+ // If not, the previous set wasn't actually used in this function.
+ --UsedGlobalSets[UGSIdx].UsageCount;
+
+ // If we already expanded the previous set to include this global, just
+ // reuse that expanded set.
+ if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
+ ++UsedGlobalSets[ExpandedIdx].UsageCount;
+ GlobalUsesByFunction[ParentFn] = ExpandedIdx;
+ continue;
+ }
+
+ // If not, create a new set consisting of the union of the previous set
+ // and this global. Mark it as encountered, so we can reuse it later.
+ GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
+ UsedGlobalSets.size();
+
+ UsedGlobalSet &NewUGS = CreateGlobalSet();
+ NewUGS.Globals.set(GI);
+ NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
+ }
+ }
+ }
+
+ // Now we found a bunch of sets of globals used together. We accumulated
+ // the number of times we encountered the sets (i.e., the number of blocks
+ // that use that exact set of globals).
+ //
+ // Multiply that by the size of the set to give us a crude profitability
+ // metric.
+ std::sort(UsedGlobalSets.begin(), UsedGlobalSets.end(),
+ [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
+ return UGS1.Globals.count() * UGS1.UsageCount <
+ UGS2.Globals.count() * UGS2.UsageCount;
+ });
+
+ // We can choose to merge all globals together, but ignore globals never used
+ // with another global. This catches the obviously non-profitable cases of
+ // having a single global, but is aggressive enough for any other case.
+ if (GlobalMergeIgnoreSingleUse) {
+ BitVector AllGlobals(Globals.size());
+ for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
+ const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
+ if (UGS.UsageCount == 0)
+ continue;
+ if (UGS.Globals.count() > 1)
+ AllGlobals |= UGS.Globals;
+ }
+ return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
+ }
+
+ // Starting from the sets with the best (=biggest) profitability, find a
+ // good combination.
+ // The ideal (and expensive) solution can only be found by trying all
+ // combinations, looking for the one with the best profitability.
+ // Don't be smart about it, and just pick the first compatible combination,
+ // starting with the sets with the best profitability.
+ BitVector PickedGlobals(Globals.size());
+ bool Changed = false;
+
+ for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
+ const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
+ if (UGS.UsageCount == 0)
+ continue;
+ if (PickedGlobals.anyCommon(UGS.Globals))
+ continue;
+ PickedGlobals |= UGS.Globals;
+ // If the set only contains one global, there's no point in merging.
+ // Ignore the global for inclusion in other sets though, so keep it in
+ // PickedGlobals.
+ if (UGS.Globals.count() < 2)
+ continue;
+ Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
+ }
+
+ return Changed;
+}
+
+bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
+ const BitVector &GlobalSet, Module &M, bool isConst,
+ unsigned AddrSpace) const {
+
Type *Int32Ty = Type::getInt32Ty(M.getContext());
assert(Globals.size() > 1);
- // FIXME: This simple solution merges globals all together as maximum as
- // possible. However, with this solution it would be hard to remove dead
- // global symbols at link-time. An alternative solution could be checking
- // global symbols references function by function, and make the symbols
- // being referred in the same function merged and we would probably need
- // to introduce heuristic algorithm to solve the merge conflict from
- // different functions.
- for (size_t i = 0, e = Globals.size(); i != e; ) {
- size_t j = 0;
+ DEBUG(dbgs() << " Trying to merge set, starts with #"
+ << GlobalSet.find_first() << "\n");
+
+ ssize_t i = GlobalSet.find_first();
+ while (i != -1) {
+ ssize_t j = 0;
uint64_t MergedSize = 0;
std::vector<Type*> Tys;
std::vector<Constant*> Inits;
bool HasExternal = false;
GlobalVariable *TheFirstExternal = 0;
- for (j = i; j != e; ++j) {
+ for (j = i; j != -1; j = GlobalSet.find_next(j)) {
Type *Ty = Globals[j]->getType()->getElementType();
MergedSize += DL->getTypeAllocSize(Ty);
if (MergedSize > MaxOffset) {
: "_MergedGlobals",
nullptr, GlobalVariable::NotThreadLocal, AddrSpace);
- for (size_t k = i; k < j; ++k) {
+ for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k)) {
GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
std::string Name = Globals[k]->getName();
Constant *Idx[2] = {
ConstantInt::get(Int32Ty, 0),
- ConstantInt::get(Int32Ty, k-i)
+ ConstantInt::get(Int32Ty, idx++)
};
Constant *GEP =
ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
--- /dev/null
+; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \
+; RUN: -aarch64-global-merge -global-merge-group-by-use -global-merge-ignore-single-use=false \
+; RUN: %s -o - | FileCheck %s
+
+; We assume that globals of the same size aren't reordered inside a set.
+
+; Check that we create two MergedGlobal instances for two functions using
+; disjoint sets of globals
+
+@m1 = internal global i32 0, align 4
+@n1 = internal global i32 0, align 4
+
+; CHECK-LABEL: f1:
+define void @f1(i32 %a1, i32 %a2) #0 {
+; CHECK-NEXT: adrp x8, [[SET1:__MergedGlobals[0-9]*]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET1]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m1, align 4
+ store i32 %a2, i32* @n1, align 4
+ ret void
+}
+
+@m2 = internal global i32 0, align 4
+@n2 = internal global i32 0, align 4
+@o2 = internal global i32 0, align 4
+
+; CHECK-LABEL: f2:
+define void @f2(i32 %a1, i32 %a2, i32 %a3) #0 {
+; CHECK-NEXT: adrp x8, [[SET2:__MergedGlobals[0-9]*]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET2]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8]
+; CHECK-NEXT: str w2, [x8, #8]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m2, align 4
+ store i32 %a2, i32* @n2, align 4
+ store i32 %a3, i32* @o2, align 4
+ ret void
+}
+
+; Sanity-check (don't worry about cost models) that we pick the biggest subset
+; of all global used "together" directly or indirectly. Here, that means
+; merging n3, m4, and n4 together, but ignoring m3.
+
+@m3 = internal global i32 0, align 4
+@n3 = internal global i32 0, align 4
+
+; CHECK-LABEL: f3:
+define void @f3(i32 %a1, i32 %a2) #0 {
+; CHECK-NEXT: adrp x8, _m3@PAGE
+; CHECK-NEXT: adrp x9, [[SET3:__MergedGlobals[0-9]*]]@PAGE
+; CHECK-NEXT: str w0, [x8, _m3@PAGEOFF]
+; CHECK-NEXT: str w1, [x9, [[SET3]]@PAGEOFF]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m3, align 4
+ store i32 %a2, i32* @n3, align 4
+ ret void
+}
+
+@m4 = internal global i32 0, align 4
+@n4 = internal global i32 0, align 4
+
+; CHECK-LABEL: f4:
+define void @f4(i32 %a1, i32 %a2, i32 %a3) #0 {
+; CHECK-NEXT: adrp x8, [[SET3]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET3]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8, #4]
+; CHECK-NEXT: str w2, [x8]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m4, align 4
+ store i32 %a2, i32* @n4, align 4
+ store i32 %a3, i32* @n3, align 4
+ ret void
+}
+
+; Finally, check that we don't do anything with one-element global sets.
+@o5 = internal global i32 0, align 4
+
+; CHECK-LABEL: f5:
+define void @f5(i32 %a1) #0 {
+; CHECK-NEXT: adrp x8, _o5@PAGE
+; CHECK-NEXT: str w0, [x8, _o5@PAGEOFF]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @o5, align 4
+ ret void
+}
+
+; CHECK-DAG: .zerofill __DATA,__bss,_o5,4,2
+
+; CHECK-DAG: .zerofill __DATA,__bss,[[SET1]],8,3
+; CHECK-DAG: .zerofill __DATA,__bss,[[SET2]],12,3
+; CHECK-DAG: .zerofill __DATA,__bss,[[SET3]],12,3
+
+attributes #0 = { nounwind }
--- /dev/null
+; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \
+; RUN: -aarch64-global-merge -global-merge-group-by-use -global-merge-ignore-single-use \
+; RUN: %s -o - | FileCheck %s
+
+; We assume that globals of the same size aren't reordered inside a set.
+
+@m1 = internal global i32 0, align 4
+@n1 = internal global i32 0, align 4
+@o1 = internal global i32 0, align 4
+
+; CHECK-LABEL: f1:
+define void @f1(i32 %a1, i32 %a2) #0 {
+; CHECK-NEXT: adrp x8, [[SET:__MergedGlobals]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m1, align 4
+ store i32 %a2, i32* @n1, align 4
+ ret void
+}
+
+@m2 = internal global i32 0, align 4
+@n2 = internal global i32 0, align 4
+
+; CHECK-LABEL: f2:
+define void @f2(i32 %a1, i32 %a2, i32 %a3) #0 {
+; CHECK-NEXT: adrp x8, [[SET]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8]
+; CHECK-NEXT: str w2, [x8, #8]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m1, align 4
+ store i32 %a2, i32* @n1, align 4
+ store i32 %a3, i32* @o1, align 4
+ ret void
+}
+
+; CHECK-LABEL: f3:
+define void @f3(i32 %a1, i32 %a2) #0 {
+; CHECK-NEXT: adrp x8, [[SET]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8, #12]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m2, align 4
+ store i32 %a2, i32* @n2, align 4
+ ret void
+}
+
+@o2 = internal global i32 0, align 4
+
+; CHECK-LABEL: f4:
+define void @f4(i32 %a1) #0 {
+; CHECK-NEXT: adrp x8, _o2@PAGE
+; CHECK-NEXT: str w0, [x8, _o2@PAGEOFF]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @o2, align 4
+ ret void
+}
+
+; CHECK-DAG: .zerofill __DATA,__bss,[[SET]],20,4
+; CHECK-DAG: .zerofill __DATA,__bss,_o2,4,2
+
+attributes #0 = { nounwind }