/// the one reasoning about the "captured" state for the argument or the one
/// reasoning on the memory access behavior of the function as a whole.
template <typename AAType>
- const AAType *getAAFor(const AbstractAttribute &QueryingAA,
+ const AAType &getAAFor(const AbstractAttribute &QueryingAA,
const IRPosition &IRP) {
static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
"Cannot query an attribute with a type not derived from "
"'AbstractAttribute'!");
- // Let's try an equivalent position if available, see
- // SubsumingPositionIterator for more information.
- for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
- // Lookup the abstract attribute of type AAType. If found, return it after
- // registering a dependence of QueryingAA on the one returned attribute.
- const auto &KindToAbstractAttributeMap =
- AAMap.lookup(const_cast<IRPosition &>(EquivIRP));
- if (AAType *AA = static_cast<AAType *>(
- KindToAbstractAttributeMap.lookup(&AAType::ID))) {
- // Do not return an attribute with an invalid state. This minimizes
- // checks at the calls sites and allows the fallback below to kick in.
- if (AA->getState().isValidState()) {
- QueryMap[AA].insert(const_cast<AbstractAttribute *>(&QueryingAA));
- return AA;
- }
- }
+ // Lookup the abstract attribute of type AAType. If found, return it after
+ // registering a dependence of QueryingAA on the one returned attribute.
+ const auto &KindToAbstractAttributeMap =
+ AAMap.lookup(const_cast<IRPosition &>(IRP));
+ if (AAType *AA = static_cast<AAType *>(
+ KindToAbstractAttributeMap.lookup(&AAType::ID))) {
+ // Do not registr a dependence on an attribute with an invalid state.
+ if (AA->getState().isValidState())
+ QueryMap[AA].insert(const_cast<AbstractAttribute *>(&QueryingAA));
+ return *AA;
}
- // No matching attribute found
- return nullptr;
+ // No matching attribute found, create one.
+ auto &AA = AAType::createForPosition(IRP, *this);
+ registerAA(AA);
+ if (AA.getState().isValidState())
+ QueryMap[&AA].insert(const_cast<AbstractAttribute *>(&QueryingAA));
+ return AA;
}
/// Introduce a new abstract attribute into the fixpoint analysis.
virtual size_t getNumReturnValues() const = 0;
virtual const SmallPtrSetImpl<CallBase *> &getUnresolvedCalls() const = 0;
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAReturnedValues &createForPosition(const IRPosition &IRP,
+ Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Returns true if nounwind is known.
bool isKnownNoUnwind() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoUnwind &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Returns true if "nosync" is known.
bool isKnownNoSync() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoSync &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return true if we know that underlying value is nonnull.
bool isKnownNonNull() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANonNull &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return true if "norecurse" is known.
bool isKnownNoRecurse() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoRecurse &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return true if "willreturn" is known.
bool isKnownWillReturn() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAWillReturn &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return true if we know that underlying value is noalias.
bool isKnownNoAlias() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoAlias &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return true if "nofree" is known.
bool isKnownNoFree() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoFree &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return true if the underlying object is known to never return.
bool isKnownNoReturn() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AANoReturn &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// of instructions is live.
template <typename T> bool isLiveInstSet(T begin, T end) const {
for (const auto &I : llvm::make_range(begin, end)) {
- assert(I->getFunction() == getIRPosition().getAnchorScope() &&
+ assert(I->getFunction() == getIRPosition().getAssociatedFunction() &&
"Instruction must be in the same anchor scope function.");
if (!isAssumedDead(I))
const IRPosition &getIRPosition() const { return *this; }
///}
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAIsDead &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return known dereferenceable bytes.
virtual uint32_t getKnownDereferenceableBytes() const = 0;
+ /// Create an abstract attribute view for the position \p IRP.
+ static AADereferenceable &createForPosition(const IRPosition &IRP,
+ Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
/// Return known alignemnt.
unsigned getKnownAlign() const { return getKnown(); }
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAAlign &createForPosition(const IRPosition &IRP, Attributor &A);
+
/// Unique ID (due to the unique address)
static const char ID;
};
const AAIsDead *LivenessAA = nullptr;
if (IRP.getAnchorScope())
- LivenessAA = A.getAAFor<AAIsDead>(
+ LivenessAA = &A.getAAFor<AAIsDead>(
QueryingAA, IRPosition::function(*IRP.getAnchorScope()));
// TODO: Use Positions here to allow context sensitivity in VisitValueCB
// Look through phi nodes, visit all live operands.
if (auto *PHI = dyn_cast<PHINode>(V)) {
+ assert(LivenessAA &&
+ "Expected liveness in the presence of instructions!");
for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
- if (!LivenessAA ||
- !LivenessAA->isAssumedDead(IncomingBB->getTerminator()))
+ if (!LivenessAA->isAssumedDead(IncomingBB->getTerminator()))
Worklist.push_back(PHI->getIncomingValue(u));
}
continue;
ChangeStatus
IRAttributeManifest::manifestAttrs(Attributor &A, IRPosition &IRP,
const ArrayRef<Attribute> &DeducedAttrs) {
- ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
-
Function *ScopeFn = IRP.getAssociatedFunction();
IRPosition::Kind PK = IRP.getPositionKind();
switch (PK) {
case IRPosition::IRP_INVALID:
case IRPosition::IRP_FLOAT:
- llvm_unreachable("Cannot manifest at a floating or invalid position!");
+ return ChangeStatus::UNCHANGED;
case IRPosition::IRP_ARGUMENT:
case IRPosition::IRP_FUNCTION:
case IRPosition::IRP_RETURNED:
break;
}
+ ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
LLVMContext &Ctx = IRP.getAnchorValue().getContext();
for (const Attribute &Attr : DeducedAttrs) {
if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
// Callback for each possibly returned value.
auto CheckReturnValue = [&](Value &RV) -> bool {
const IRPosition &RVPos = IRPosition::value(RV);
- const AAType *AA = A.getAAFor<AAType>(QueryingAA, RVPos);
- LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV
- << " AA: " << (AA ? AA->getAsStr() : "n/a") << " @ "
- << RVPos << "\n");
- // TODO: We should create abstract attributes on-demand, patches are already
- // prepared, pending approval.
- if (!AA || AA->getIRPosition() != RVPos)
- return false;
- const StateType &AAS = static_cast<const StateType &>(AA->getState());
+ const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
+ LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
+ << " @ " << RVPos << "\n");
+ const StateType &AAS = static_cast<const StateType &>(AA.getState());
if (T.hasValue())
*T &= AAS;
else
}
/// Helper class for generic deduction: return value -> returned position.
-template <typename AAType, typename StateType = typename AAType::StateType>
-struct AAReturnedFromReturnedValues : public AAType {
- AAReturnedFromReturnedValues(const IRPosition &IRP) : AAType(IRP) {}
+template <typename AAType, typename Base,
+ typename StateType = typename AAType::StateType>
+struct AAReturnedFromReturnedValues : public Base {
+ AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CallSiteCheck = [&](CallSite CS) {
const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
- const AAType *AA = A.getAAFor<AAType>(QueryingAA, CSArgPos);
+ const AAType &AA = A.getAAFor<AAType>(QueryingAA, CSArgPos);
LLVM_DEBUG(dbgs() << "[Attributor] CS: " << *CS.getInstruction()
- << " AA: " << (AA ? AA->getAsStr() : "n/a") << " @"
- << CSArgPos << "\n");
- // TODO: We should create abstract attributes on-demand, patches are already
- // prepared, pending approval.
- if (!AA || AA->getIRPosition() != CSArgPos)
- return false;
- const StateType &AAS = static_cast<const StateType &>(AA->getState());
+ << " AA: " << AA.getAsStr() << " @" << CSArgPos << "\n");
+ const StateType &AAS = static_cast<const StateType &>(AA.getState());
if (T.hasValue())
*T &= AAS;
else
}
/// Helper class for generic deduction: call site argument -> argument position.
-template <typename AAType, typename StateType = typename AAType::StateType>
-struct AAArgumentFromCallSiteArguments : public AAType {
- AAArgumentFromCallSiteArguments(const IRPosition &IRP) : AAType(IRP) {}
+template <typename AAType, typename Base,
+ typename StateType = typename AAType::StateType>
+struct AAArgumentFromCallSiteArguments : public Base {
+ AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
};
/// Helper class for generic replication: function returned -> cs returned.
-template <typename AAType>
-struct AACallSiteReturnedFromReturned : public AAType {
- AACallSiteReturnedFromReturned(const IRPosition &IRP) : AAType(IRP) {}
+template <typename AAType, typename Base>
+struct AACallSiteReturnedFromReturned : public Base {
+ AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
return S.indicatePessimisticFixpoint();
IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
- // TODO: We should create abstract attributes on-demand, patches are already
- // prepared, pending approval.
- const AAType *AA = A.getAAFor<AAType>(*this, FnPos);
- if (!AA)
- return S.indicatePessimisticFixpoint();
+ const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
return clampStateAndIndicateChange(
- S, static_cast<const typename AAType::StateType &>(AA->getState()));
+ S, static_cast<const typename AAType::StateType &>(AA.getState()));
}
};
if (!I.mayThrow())
return true;
- auto *NoUnwindAA = A.getAAFor<AANoUnwind>(*this, IRPosition::value(I));
- return NoUnwindAA && NoUnwindAA->isAssumedNoUnwind();
+ if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
+ const auto &NoUnwindAA =
+ A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
+ return NoUnwindAA.isAssumedNoUnwind();
+ }
+ return false;
};
if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
if (!CB || UnresolvedCalls.count(CB))
continue;
- const auto *RetValAAPtr =
+ const auto &RetValAA =
A.getAAFor<AAReturnedValues>(*this, IRPosition::callsite_function(*CB));
+ LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
+ << static_cast<const AbstractAttribute &>(RetValAA)
+ << "\n");
// Skip dead ends, thus if we do not know anything about the returned
// call we mark it as unresolved and it will stay that way.
- if (!RetValAAPtr || !RetValAAPtr->getState().isValidState()) {
+ if (!RetValAA.getState().isValidState()) {
LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
<< "\n");
UnresolvedCalls.insert(CB);
continue;
}
- const auto &RetValAA = *RetValAAPtr;
- LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
- << static_cast<const AbstractAttribute &>(RetValAA)
- << "\n");
-
// Do not try to learn partial information. If the callee has unresolved
// return values we will treat the call as unresolved/opaque.
auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
if (ICS.hasFnAttr(Attribute::NoSync))
return true;
- auto *NoSyncAA =
- A.getAAFor<AANoSyncImpl>(*this, IRPosition::callsite_function(ICS));
- if (NoSyncAA && NoSyncAA->isAssumedNoSync())
+ const auto &NoSyncAA =
+ A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
+ if (NoSyncAA.isAssumedNoSync())
return true;
return false;
}
if (ICS.hasFnAttr(Attribute::NoFree))
return true;
- auto *NoFreeAA =
- A.getAAFor<AANoFreeImpl>(*this, IRPosition::callsite_function(ICS));
- return NoFreeAA && NoFreeAA->isAssumedNoFree();
+ const auto &NoFreeAA =
+ A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
+ return NoFreeAA.isAssumedNoFree();
};
if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
bool Stripped) -> bool {
- if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
+ const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
+ if (!Stripped && this == &AA) {
+ if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
/* TODO: CtxI */ nullptr,
- /* TODO: DT */ nullptr)) {
- // Known non-zero, all good.
- } else if (const auto *AA =
- A.getAAFor<AANonNull>(*this, IRPosition::value(V))) {
- // Try to use abstract attribute information.
- if (!AA->isAssumedNonNull())
+ /* TODO: DT */ nullptr))
T.indicatePessimisticFixpoint();
} else {
- // IR information was not sufficient and we did not find an abstract
- // attribute to use. TODO: on-demand attribute creation!
- T.indicatePessimisticFixpoint();
+ // Use abstract attribute information.
+ const AANonNull::StateType &NS =
+ static_cast<const AANonNull::StateType &>(AA.getState());
+ T ^= NS;
}
return T.isValidState();
};
};
/// NonNull attribute for function return value.
-struct AANonNullReturned final : AAReturnedFromReturnedValues<AANonNullImpl> {
+struct AANonNullReturned final
+ : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
AANonNullReturned(const IRPosition &IRP)
- : AAReturnedFromReturnedValues<AANonNullImpl>(IRP) {}
+ : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
/// NonNull attribute for function argument.
struct AANonNullArgument final
- : AAArgumentFromCallSiteArguments<AANonNullImpl> {
+ : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
AANonNullArgument(const IRPosition &IRP)
- : AAArgumentFromCallSiteArguments<AANonNullImpl>(IRP) {}
+ : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
/// NonNull attribute for a call site return position.
struct AANonNullCallSiteReturned final
- : AACallSiteReturnedFromReturned<AANonNullImpl> {
+ : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
AANonNullCallSiteReturned(const IRPosition &IRP)
- : AACallSiteReturnedFromReturned<AANonNullImpl>(IRP) {}
+ : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CheckForWillReturn = [&](Instruction &I) {
- ImmutableCallSite ICS(&I);
- if (ICS.hasFnAttr(Attribute::WillReturn))
+ IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
+ const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
+ if (WillReturnAA.isKnownWillReturn())
return true;
-
- IRPosition IPos = IRPosition::callsite_function(ICS);
- auto *WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
- if (!WillReturnAA || !WillReturnAA->isAssumedWillReturn())
+ if (!WillReturnAA.isAssumedWillReturn())
return false;
-
- // FIXME: Prohibit any recursion for now.
- if (ICS.hasFnAttr(Attribute::NoRecurse))
- return true;
-
- auto *NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
- return NoRecurseAA && NoRecurseAA->isAssumedNoRecurse();
+ const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
+ return NoRecurseAA.isAssumedNoRecurse();
};
if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
if (!ICS)
return false;
- if (!ICS.returnDoesNotAlias()) {
- auto *NoAliasAA =
- A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
- if (!NoAliasAA || !NoAliasAA->isAssumedNoAlias())
- return false;
- }
+ const auto &NoAliasAA =
+ A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
+ if (!NoAliasAA.isAssumedNoAlias())
+ return false;
/// FIXME: We can improve capture check in two ways:
/// 1. Use the AANoCapture facilities.
/// and only place an unreachable in the normal successor.
if (Invoke2CallAllowed) {
if (Function *Callee = II->getCalledFunction()) {
- auto *AANoUnw =
- A.getAAFor<AANoUnwind>(*this, IRPosition::function(*Callee));
- if (Callee->hasFnAttribute(Attribute::NoUnwind) ||
- (AANoUnw && AANoUnw->isAssumedNoUnwind())) {
+ const IRPosition &IPos = IRPosition::callsite_function(*II);
+ const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
+ if (AANoUnw.isAssumedNoUnwind()) {
LLVM_DEBUG(dbgs()
<< "[AAIsDead] Replace invoke with call inst\n");
// We do not need an invoke (II) but instead want a call followed
// instruction but the unwind block might still be.
if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
// Use nounwind to justify the unwind block is dead as well.
- auto *AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
- if (!Invoke2CallAllowed ||
- (!AANoUnw || !AANoUnw->isAssumedNoUnwind())) {
+ const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
+ if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) {
AssumedLiveBlocks.insert(Invoke->getUnwindDest());
ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
}
}
- auto *NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
- if (ICS.hasFnAttr(Attribute::NoReturn) ||
- (NoReturnAA && NoReturnAA->isAssumedNoReturn()))
+ const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
+ if (NoReturnAA.isAssumedNoReturn())
return I;
}
for (const Attribute &Attr : Attrs)
takeKnownDerefBytesMaximum(Attr.getValueAsInt());
- NonNullAA = A.getAAFor<AANonNull>(*this, getIRPosition());
+ NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
}
/// See AbstractAttribute::getState()
const Value *Base =
V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
- const auto *AA =
- A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
+ const auto &AA =
+ A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
int64_t DerefBytes = 0;
- if (!AA || (!Stripped &&
- getIRPosition().getPositionKind() == IRPosition::IRP_FLOAT)) {
+ if (!Stripped && this == &AA) {
// Use IR information if we did not strip anything.
// TODO: track globally.
bool CanBeNull;
DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
T.GlobalState.indicatePessimisticFixpoint();
} else {
- const DerefState &DS = static_cast<const DerefState &>(AA->getState());
+ const DerefState &DS = static_cast<const DerefState &>(AA.getState());
DerefBytes = DS.DerefBytesState.getAssumed();
T.GlobalState &= DS.GlobalState;
}
T.takeAssumedDerefBytesMinimum(
std::max(int64_t(0), DerefBytes - Offset.getSExtValue()));
- if (!Stripped &&
- getIRPosition().getPositionKind() == IRPosition::IRP_FLOAT) {
+ if (!Stripped && this == &AA) {
T.takeKnownDerefBytesMaximum(
std::max(int64_t(0), DerefBytes - Offset.getSExtValue()));
T.indicatePessimisticFixpoint();
/// Dereferenceable attribute for a return value.
struct AADereferenceableReturned final
- : AAReturnedFromReturnedValues<AADereferenceableImpl> {
+ : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
+ DerefState> {
AADereferenceableReturned(const IRPosition &IRP)
- : AAReturnedFromReturnedValues<AADereferenceableImpl>(IRP) {}
+ : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
+ DerefState>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
/// Dereferenceable attribute for an argument
struct AADereferenceableArgument final
- : AAArgumentFromCallSiteArguments<AADereferenceableImpl> {
+ : AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl,
+ DerefState> {
AADereferenceableArgument(const IRPosition &IRP)
- : AAArgumentFromCallSiteArguments<AADereferenceableImpl>(IRP) {}
+ : AAArgumentFromCallSiteArguments<AADereferenceable,
+ AADereferenceableImpl, DerefState>(
+ IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override{
auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
bool Stripped) -> bool {
- if (!Stripped &&
- getIRPosition().getPositionKind() == IRPosition::IRP_FLOAT) {
+ const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
+ if (!Stripped && this == &AA) {
// Use only IR information if we did not strip anything.
T.takeKnownMaximum(V.getPointerAlignment(DL));
T.indicatePessimisticFixpoint();
- } else if (const auto *AA =
- A.getAAFor<AAAlign>(*this, IRPosition::value(V))) {
- // Try to use abstract attribute information.
- const AAAlign::StateType &DS =
- static_cast<const AAAlign::StateType &>(AA->getState());
- T.takeAssumedMinimum(DS.getAssumed());
} else {
- // Last resort, look into the IR.
- T.takeKnownMaximum(V.getPointerAlignment(DL));
- T.indicatePessimisticFixpoint();
+ // Use abstract attribute information.
+ const AAAlign::StateType &DS =
+ static_cast<const AAAlign::StateType &>(AA.getState());
+ T ^= DS;
}
return T.isValidState();
};
};
/// Align attribute for function return value.
-struct AAAlignReturned final : AAReturnedFromReturnedValues<AAAlignImpl> {
+struct AAAlignReturned final
+ : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
AAAlignReturned(const IRPosition &IRP)
- : AAReturnedFromReturnedValues<AAAlignImpl>(IRP) {}
+ : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
};
/// Align attribute for function argument.
-struct AAAlignArgument final : AAArgumentFromCallSiteArguments<AAAlignImpl> {
+struct AAAlignArgument final
+ : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
AAAlignArgument(const IRPosition &IRP)
- : AAArgumentFromCallSiteArguments<AAAlignImpl>(IRP) {}
+ : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
if (!LivenessAA)
LivenessAA =
- getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()));
- if (!LivenessAA || !LivenessAA->isAssumedDead(CtxI))
+ &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()));
+ if (!LivenessAA->isAssumedDead(CtxI))
return false;
// TODO: Do not track dependences automatically but add it here as only a
Instruction *I = cast<Instruction>(U.getUser());
Function *Caller = I->getFunction();
- auto *LivenessAA =
+ const auto &LivenessAA =
getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*Caller));
// Skip dead calls.
- if (LivenessAA && LivenessAA->isAssumedDead(I))
+ if (LivenessAA.isAssumedDead(I))
continue;
CallSite CS(U.getUser());
// and liveness information.
const IRPosition &QueryIRP = IRPosition::function_scope(IRP);
const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
- if (!AARetVal || !AARetVal->getState().isValidState())
+ if (!AARetVal.getState().isValidState())
return false;
- return AARetVal->checkForAllReturnedValuesAndReturnInsts(Pred);
+ return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
}
bool Attributor::checkForAllReturnedValues(
const IRPosition &QueryIRP = IRPosition::function_scope(IRP);
const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
- if (!AARetVal || !AARetVal->getState().isValidState())
+ if (!AARetVal.getState().isValidState())
return false;
- return AARetVal->checkForAllReturnedValuesAndReturnInsts(
+ return AARetVal.checkForAllReturnedValuesAndReturnInsts(
[&](Value &RV, const SmallPtrSetImpl<ReturnInst *> &) {
return Pred(RV);
});
for (unsigned Opcode : Opcodes) {
for (Instruction *I : OpcodeInstMap[Opcode]) {
// Skip dead instructions.
- if (LivenessAA && LivenessAA->isAssumedDead(I))
+ if (LivenessAA.isAssumedDead(I))
continue;
if (!Pred(*I))
for (Instruction *I :
InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
// Skip dead instructions.
- if (LivenessAA && LivenessAA->isAssumedDead(I))
+ if (LivenessAA.isAssumedDead(I))
continue;
if (!Pred(*I))
}
ChangeStatus Attributor::run() {
- // Initialize all abstract attributes.
- for (AbstractAttribute *AA : AllAbstractAttributes)
- AA->initialize(*this);
+ // Initialize all abstract attributes, allow new ones to be created.
+ for (unsigned u = 0; u < AllAbstractAttributes.size(); u++)
+ AllAbstractAttributes[u]->initialize(*this);
LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
<< AllAbstractAttributes.size()
Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
do {
+ // Remember the size to determine new attributes.
+ size_t NumAAs = AllAbstractAttributes.size();
LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
<< ", Worklist size: " << Worklist.size() << "\n");
Worklist.clear();
Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
+ // Add attributes to the worklist that have been created in the last
+ // iteration.
+ Worklist.insert(AllAbstractAttributes.begin() + NumAAs,
+ AllAbstractAttributes.end());
+
} while (!Worklist.empty() && ++IterationCounter < MaxFixpointIterations);
+ size_t NumFinalAAs = AllAbstractAttributes.size();
+
LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
<< IterationCounter << "/" << MaxFixpointIterations
<< " iterations\n");
NumAttributesManifested += NumManifested;
NumAttributesValidFixpoint += NumAtFixpoint;
+ assert(
+ NumFinalAAs == AllAbstractAttributes.size() &&
+ "Expected the final number of abstract attributes to remain unchanged!");
return ManifestChange;
}
///
/// \returns The created abstract argument, or nullptr if none was created.
template <typename AAType>
-static AAType *checkAndRegisterAA(const IRPosition &IRP, Attributor &A,
- DenseSet<const char *> *Whitelist) {
+static const AAType *checkAndRegisterAA(const IRPosition &IRP, Attributor &A,
+ DenseSet<const char *> *Whitelist) {
if (Whitelist && !Whitelist->count(&AAType::ID))
return nullptr;
const char AADereferenceable::ID = 0;
const char AAAlign::ID = 0;
+// Macro magic to create the static generator function for attributes that
+// follow the naming scheme.
+
+#define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
+ case IRPosition::PK: \
+ llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
+
+#define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
+ case IRPosition::PK: \
+ AA = new CLASS##SUFFIX(IRP); \
+ break;
+
+#define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
+ CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
+ CLASS *AA = nullptr; \
+ switch (IRP.getPositionKind()) { \
+ SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
+ SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
+ SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
+ SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
+ SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
+ SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
+ SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
+ SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
+ } \
+ AA->initialize(A); \
+ return *AA; \
+ }
+
+#define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
+ CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
+ CLASS *AA = nullptr; \
+ switch (IRP.getPositionKind()) { \
+ SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
+ SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
+ SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
+ SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
+ SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
+ SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
+ SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
+ SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
+ } \
+ AA->initialize(A); \
+ return *AA; \
+ }
+
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
+CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
+
+CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
+CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
+CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
+CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
+
+#undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
+#undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
+#undef SWITCH_PK_CREATE
+#undef SWITCH_PK_INV
+
INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
"Deduce and propagate attributes", false, false)
INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",