/// they form a two bit matrix and bit-tests for 'mod' or 'ref'
/// work with any of the possible values.
-enum ModRefInfo {
+enum class ModRefInfo {
/// The access neither references nor modifies the value stored in memory.
- MRI_NoModRef = 0,
+ NoModRef = 0,
/// The access may reference the value stored in memory.
- MRI_Ref = 1,
+ Ref = 1,
/// The access may modify the value stored in memory.
- MRI_Mod = 2,
+ Mod = 2,
/// The access may reference and may modify the value stored in memory.
- MRI_ModRef = MRI_Ref | MRI_Mod,
+ ModRef = Ref | Mod,
};
LLVM_NODISCARD inline bool isNoModRef(const ModRefInfo MRI) {
- return MRI == MRI_NoModRef;
+ return MRI == ModRefInfo::NoModRef;
}
LLVM_NODISCARD inline bool isModOrRefSet(const ModRefInfo MRI) {
- return MRI & MRI_ModRef;
+ return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::ModRef);
}
LLVM_NODISCARD inline bool isModAndRefSet(const ModRefInfo MRI) {
- return (MRI & MRI_ModRef) == MRI_ModRef;
+ return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::ModRef)) ==
+ static_cast<int>(ModRefInfo::ModRef);
}
LLVM_NODISCARD inline bool isModSet(const ModRefInfo MRI) {
- return MRI & MRI_Mod;
+ return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod);
}
LLVM_NODISCARD inline bool isRefSet(const ModRefInfo MRI) {
- return MRI & MRI_Ref;
+ return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref);
}
-LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) {
- return ModRefInfo(MRI | MRI_Ref);
-}
LLVM_NODISCARD inline ModRefInfo setMod(const ModRefInfo MRI) {
- return ModRefInfo(MRI | MRI_Mod);
+ return ModRefInfo(static_cast<int>(MRI) | static_cast<int>(ModRefInfo::Mod));
+}
+LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) {
+ return ModRefInfo(static_cast<int>(MRI) | static_cast<int>(ModRefInfo::Ref));
}
LLVM_NODISCARD inline ModRefInfo setModAndRef(const ModRefInfo MRI) {
- return ModRefInfo(MRI | MRI_ModRef);
+ return ModRefInfo(static_cast<int>(MRI) |
+ static_cast<int>(ModRefInfo::ModRef));
}
LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) {
- return ModRefInfo(MRI & MRI_Ref);
+ return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref));
}
LLVM_NODISCARD inline ModRefInfo clearRef(const ModRefInfo MRI) {
- return ModRefInfo(MRI & MRI_Mod);
+ return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod));
}
LLVM_NODISCARD inline ModRefInfo unionModRef(const ModRefInfo MRI1,
const ModRefInfo MRI2) {
- return ModRefInfo(MRI1 | MRI2);
+ return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2));
}
LLVM_NODISCARD inline ModRefInfo intersectModRef(const ModRefInfo MRI1,
const ModRefInfo MRI2) {
- return ModRefInfo(MRI1 & MRI2);
+ return ModRefInfo(static_cast<int>(MRI1) & static_cast<int>(MRI2));
}
/// The locations at which a function might access memory.
/// This property corresponds to the GCC 'const' attribute.
/// This property corresponds to the LLVM IR 'readnone' attribute.
/// This property corresponds to the IntrNoMem LLVM intrinsic flag.
- FMRB_DoesNotAccessMemory = FMRL_Nowhere | MRI_NoModRef,
+ FMRB_DoesNotAccessMemory =
+ FMRL_Nowhere | static_cast<int>(ModRefInfo::NoModRef),
/// The only memory references in this function (if it has any) are
/// non-volatile loads from objects pointed to by its pointer-typed
/// arguments, with arbitrary offsets.
///
/// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
- FMRB_OnlyReadsArgumentPointees = FMRL_ArgumentPointees | MRI_Ref,
+ FMRB_OnlyReadsArgumentPointees =
+ FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Ref),
/// The only memory references in this function (if it has any) are
/// non-volatile loads and stores from objects pointed to by its
/// pointer-typed arguments, with arbitrary offsets.
///
/// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag.
- FMRB_OnlyAccessesArgumentPointees = FMRL_ArgumentPointees | MRI_ModRef,
+ FMRB_OnlyAccessesArgumentPointees =
+ FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::ModRef),
/// The only memory references in this function (if it has any) are
/// references of memory that is otherwise inaccessible via LLVM IR.
///
/// This property corresponds to the LLVM IR inaccessiblememonly attribute.
- FMRB_OnlyAccessesInaccessibleMem = FMRL_InaccessibleMem | MRI_ModRef,
+ FMRB_OnlyAccessesInaccessibleMem =
+ FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::ModRef),
/// The function may perform non-volatile loads and stores of objects
/// pointed to by its pointer-typed arguments, with arbitrary offsets, and
/// This property corresponds to the LLVM IR
/// inaccessiblemem_or_argmemonly attribute.
FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem |
- FMRL_ArgumentPointees | MRI_ModRef,
+ FMRL_ArgumentPointees |
+ static_cast<int>(ModRefInfo::ModRef),
/// This function does not perform any non-local stores or volatile loads,
/// but may read from any memory location.
/// This property corresponds to the GCC 'pure' attribute.
/// This property corresponds to the LLVM IR 'readonly' attribute.
/// This property corresponds to the IntrReadMem LLVM intrinsic flag.
- FMRB_OnlyReadsMemory = FMRL_Anywhere | MRI_Ref,
+ FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Ref),
// This function does not read from memory anywhere, but may write to any
// memory location.
//
// This property corresponds to the LLVM IR 'writeonly' attribute.
// This property corresponds to the IntrWriteMem LLVM intrinsic flag.
- FMRB_DoesNotReadMemory = FMRL_Anywhere | MRI_Mod,
+ FMRB_DoesNotReadMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Mod),
/// This indicates that the function could not be classified into one of the
/// behaviors above.
- FMRB_UnknownModRefBehavior = FMRL_Anywhere | MRI_ModRef
+ FMRB_UnknownModRefBehavior =
+ FMRL_Anywhere | static_cast<int>(ModRefInfo::ModRef)
};
// Wrapper method strips bits significant only in FunctionModRefBehavior,
// entry with all bits set to 1.
LLVM_NODISCARD inline ModRefInfo
createModRefInfo(const FunctionModRefBehavior FMRB) {
- return ModRefInfo(FMRB & MRI_ModRef);
+ return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef));
}
class AAResults {
case Instruction::CatchRet:
return getModRefInfo((const CatchReturnInst *)I, Loc);
default:
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
}
}
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
}
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
};
}
ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
- ModRefInfo Result = MRI_ModRef;
+ ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx));
// Check if the two calls modify the same memory
return getModRefInfo(CS, Call);
} else if (I->isFenceLike()) {
- // If this is a fence, just return MRI_ModRef.
- return MRI_ModRef;
+ // If this is a fence, just return ModRef.
+ return ModRefInfo::ModRef;
} else {
// Otherwise, check if the call modifies or references the
// location this memory access defines. The best we can say
if (isModOrRefSet(MR))
return setModAndRef(MR);
}
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
- ModRefInfo Result = MRI_ModRef;
+ ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc));
auto MRB = getModRefBehavior(CS);
if (MRB == FMRB_DoesNotAccessMemory ||
MRB == FMRB_OnlyAccessesInaccessibleMem)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
if (onlyReadsMemory(MRB))
Result = clearMod(Result);
if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) {
bool DoesAlias = false;
- ModRefInfo AllArgsMask = MRI_NoModRef;
+ ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
if (doesAccessArgPointees(MRB)) {
for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
const Value *Arg = *AI;
}
}
}
- // Return MRI_NoModRef if no alias found with any argument.
+ // Return NoModRef if no alias found with any argument.
if (!DoesAlias)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Logical & between other AA analyses and argument analysis.
Result = intersectModRef(Result, AllArgsMask);
}
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) {
- ModRefInfo Result = MRI_ModRef;
+ ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2));
// If CS1 or CS2 are readnone, they don't interact.
auto CS1B = getModRefBehavior(CS1);
if (CS1B == FMRB_DoesNotAccessMemory)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
auto CS2B = getModRefBehavior(CS2);
if (CS2B == FMRB_DoesNotAccessMemory)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If they both only read from memory, there is no dependence.
if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If CS1 only reads memory, the only dependence on CS2 can be
// from CS1 reading memory written by CS2.
// information from CS1's references to the memory referenced by
// CS2's arguments.
if (onlyAccessesArgPointees(CS2B)) {
- ModRefInfo R = MRI_NoModRef;
+ ModRefInfo R = ModRefInfo::NoModRef;
if (doesAccessArgPointees(CS2B)) {
for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
const Value *Arg = *I;
// - If CS2 modifies location, dependence exists if CS1 reads or writes.
// - If CS2 only reads location, dependence exists if CS1 writes.
ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx);
- ModRefInfo ArgMask = MRI_NoModRef;
+ ModRefInfo ArgMask = ModRefInfo::NoModRef;
if (isModSet(ArgModRefCS2))
- ArgMask = MRI_ModRef;
+ ArgMask = ModRefInfo::ModRef;
else if (isRefSet(ArgModRefCS2))
- ArgMask = MRI_Mod;
+ ArgMask = ModRefInfo::Mod;
// ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use
// above ArgMask to update dependence info.
// If CS1 only accesses memory through arguments, check if CS2 references
// any of the memory referenced by CS1's arguments. If not, return NoModRef.
if (onlyAccessesArgPointees(CS1B)) {
- ModRefInfo R = MRI_NoModRef;
+ ModRefInfo R = ModRefInfo::NoModRef;
if (doesAccessArgPointees(CS1B)) {
for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
const Value *Arg = *I;
const MemoryLocation &Loc) {
// Be conservative in the face of atomic.
if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Otherwise, a load just reads.
- return MRI_Ref;
+ return ModRefInfo::Ref;
}
ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
const MemoryLocation &Loc) {
// Be conservative in the face of atomic.
if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
if (Loc.Ptr) {
// If the store address cannot alias the pointer in question, then the
// specified memory cannot be modified by the store.
if (!alias(MemoryLocation::get(S), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this store.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a store just writes.
- return MRI_Mod;
+ return ModRefInfo::Mod;
}
ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
// If we know that the location is a constant memory location, the fence
// cannot modify this location.
if (Loc.Ptr && pointsToConstantMemory(Loc))
- return MRI_Ref;
- return MRI_ModRef;
+ return ModRefInfo::Ref;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
// If the va_arg address cannot alias the pointer in question, then the
// specified memory cannot be accessed by the va_arg.
if (!alias(MemoryLocation::get(V), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this va_arg.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a va_arg reads and writes.
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a catchpad reads and writes.
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a catchret reads and writes.
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(RMW->getOrdering()))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
// If the atomicrmw address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
/// \brief Return information about whether a particular call site modifies
DominatorTree *DT,
OrderedBasicBlock *OBB) {
if (!DT)
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
const Value *Object =
GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
ImmutableCallSite CS(I);
if (!CS.getInstruction() || CS.getInstruction() == Object)
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
/* StoreCaptures */ true, I, DT,
/* include Object */ true,
/* OrderedBasicBlock */ OBB))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
unsigned ArgNo = 0;
- ModRefInfo R = MRI_NoModRef;
+ ModRefInfo R = ModRefInfo::NoModRef;
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture or byval pointer arguments. If this
if (CS.doesNotAccessMemory(ArgNo))
continue;
if (CS.onlyReadsMemory(ArgNo)) {
- R = MRI_Ref;
+ R = ModRefInfo::Ref;
continue;
}
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
return R;
}
///
bool AAResults::canBasicBlockModify(const BasicBlock &BB,
const MemoryLocation &Loc) {
- return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
+ return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod);
}
/// canInstructionRangeModRef - Return true if it is possible for the
if (ElTy->isSized()) Size = DL.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(C, Pointer, Size)) {
- case MRI_NoModRef:
+ case ModRefInfo::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer,
F.getParent());
++NoModRefCount;
break;
- case MRI_Mod:
+ case ModRefInfo::Mod:
PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent());
++ModCount;
break;
- case MRI_Ref:
+ case ModRefInfo::Ref:
PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent());
++RefCount;
break;
- case MRI_ModRef:
+ case ModRefInfo::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, I, Pointer,
F.getParent());
++ModRefCount;
if (D == C)
continue;
switch (AA.getModRefInfo(*C, *D)) {
- case MRI_NoModRef:
+ case ModRefInfo::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
++NoModRefCount;
break;
- case MRI_Mod:
+ case ModRefInfo::Mod:
PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
++ModCount;
break;
- case MRI_Ref:
+ case ModRefInfo::Ref:
PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
++RefCount;
break;
- case MRI_ModRef:
+ case ModRefInfo::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
++ModRefCount;
break;
unsigned ArgIdx) {
// Checking for known builtin intrinsics and target library functions.
if (isWriteOnlyParam(CS, ArgIdx, TLI))
- return MRI_Mod;
+ return ModRefInfo::Mod;
if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
- return MRI_Ref;
+ return ModRefInfo::Ref;
if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getArgModRefInfo(CS, ArgIdx);
}
if (isa<AllocaInst>(Object))
if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall())
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If the pointer is to a locally allocated object that does not escape,
// then the call can not mod/ref the pointer unless the call takes the pointer
// Optimistically assume that call doesn't touch Object and check this
// assumption in the following loop.
- ModRefInfo Result = MRI_NoModRef;
+ ModRefInfo Result = ModRefInfo::NoModRef;
unsigned OperandNo = 0;
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
continue;
}
// This operand aliases 'Object' and call reads and writes into it.
- Result = MRI_ModRef;
+ Result = ModRefInfo::ModRef;
break;
}
// Be conservative if the accessed pointer may alias the allocation -
// fallback to the generic handling below.
if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// The semantics of memcpy intrinsics forbid overlap between their respective
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
Loc)) == MustAlias)
// Loc is exactly the memcpy source thus disjoint from memcpy dest.
- return MRI_Ref;
+ return ModRefInfo::Ref;
if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
Loc)) == MustAlias)
// The converse case.
- return MRI_Mod;
+ return ModRefInfo::Mod;
// It's also possible for Loc to alias both src and dest, or neither.
- ModRefInfo rv = MRI_NoModRef;
+ ModRefInfo rv = ModRefInfo::NoModRef;
if (SrcAA != NoAlias)
rv = setRef(rv);
if (DestAA != NoAlias)
// proper control dependencies will be maintained, it never aliases any
// particular memory location.
if (isIntrinsicCall(CS, Intrinsic::assume))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
// that proper control dependencies are maintained but they never mods any
// heap state at the point the guard is issued needs to be consistent in case
// the guard invokes the "deopt" continuation.
if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
- return MRI_Ref;
+ return ModRefInfo::Ref;
// Like assumes, invariant.start intrinsics were also marked as arbitrarily
// writing so that proper control dependencies are maintained but they never
// rules of invariant.start) and print 40, while the first program always
// prints 50.
if (isIntrinsicCall(CS, Intrinsic::invariant_start))
- return MRI_Ref;
+ return ModRefInfo::Ref;
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS, Loc);
// particular memory location.
if (isIntrinsicCall(CS1, Intrinsic::assume) ||
isIntrinsicCall(CS2, Intrinsic::assume))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
// that proper control dependencies are maintained but they never mod any
// possibilities for guard intrinsics.
if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
- return isModSet(createModRefInfo(getModRefBehavior(CS2))) ? MRI_Ref
- : MRI_NoModRef;
+ return isModSet(createModRefInfo(getModRefBehavior(CS2)))
+ ? ModRefInfo::Ref
+ : ModRefInfo::NoModRef;
if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
- return isModSet(createModRefInfo(getModRefBehavior(CS1))) ? MRI_Mod
- : MRI_NoModRef;
+ return isModSet(createModRefInfo(getModRefBehavior(CS1)))
+ ? ModRefInfo::Mod
+ : ModRefInfo::NoModRef;
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS1, CS2);
enum { MayReadAnyGlobal = 4 };
/// Checks to document the invariants of the bit packing here.
- static_assert((MayReadAnyGlobal & MRI_ModRef) == 0,
+ static_assert((MayReadAnyGlobal & static_cast<int>(ModRefInfo::ModRef)) == 0,
"ModRef and the MayReadAnyGlobal flag bits overlap.");
- static_assert(((MayReadAnyGlobal | MRI_ModRef) >>
+ static_assert(((MayReadAnyGlobal | static_cast<int>(ModRefInfo::ModRef)) >>
AlignedMapPointerTraits::NumLowBitsAvailable) == 0,
"Insufficient low bits to store our flag and ModRef info.");
/// Returns the \c ModRefInfo info for this function.
ModRefInfo getModRefInfo() const {
- return ModRefInfo(Info.getInt() & MRI_ModRef);
+ return ModRefInfo(Info.getInt() & static_cast<int>(ModRefInfo::ModRef));
}
/// Adds new \c ModRefInfo for this function to its state.
void addModRefInfo(ModRefInfo NewMRI) {
- Info.setInt(Info.getInt() | NewMRI);
+ Info.setInt(Info.getInt() | static_cast<int>(NewMRI));
}
/// Returns whether this function may read any global variable, and we don't
/// Returns the \c ModRefInfo info for this function w.r.t. a particular
/// global, which may be more precise than the general information above.
ModRefInfo getModRefInfoForGlobal(const GlobalValue &GV) const {
- ModRefInfo GlobalMRI = mayReadAnyGlobal() ? MRI_Ref : MRI_NoModRef;
+ ModRefInfo GlobalMRI =
+ mayReadAnyGlobal() ? ModRefInfo::Ref : ModRefInfo::NoModRef;
if (AlignedMap *P = Info.getPointer()) {
auto I = P->Map.find(&GV);
if (I != P->Map.end())
}
/// Add mod/ref info from another function into ours, saturating towards
- /// MRI_ModRef.
+ /// ModRef.
void addFunctionInfo(const FunctionInfo &FI) {
addModRefInfo(FI.getModRefInfo());
Handles.emplace_front(*this, Reader);
Handles.front().I = Handles.begin();
}
- FunctionInfos[Reader].addModRefInfoForGlobal(GV, MRI_Ref);
+ FunctionInfos[Reader].addModRefInfoForGlobal(GV, ModRefInfo::Ref);
}
if (!GV.isConstant()) // No need to keep track of writers to constants
Handles.emplace_front(*this, Writer);
Handles.front().I = Handles.begin();
}
- FunctionInfos[Writer].addModRefInfoForGlobal(GV, MRI_Mod);
+ FunctionInfos[Writer].addModRefInfoForGlobal(GV, ModRefInfo::Mod);
}
++NumNonAddrTakenGlobalVars;
if (F->doesNotAccessMemory()) {
// Can't do better than that!
} else if (F->onlyReadsMemory()) {
- FI.addModRefInfo(MRI_Ref);
+ FI.addModRefInfo(ModRefInfo::Ref);
if (!F->isIntrinsic() && !F->onlyAccessesArgMemory())
// This function might call back into the module and read a global -
// consider every global as possibly being read by this function.
FI.setMayReadAnyGlobal();
} else {
- FI.addModRefInfo(MRI_ModRef);
+ FI.addModRefInfo(ModRefInfo::ModRef);
// Can't say anything useful unless it's an intrinsic - they don't
// read or write global variables of the kind considered here.
KnowNothing = !F->isIntrinsic();
if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) {
// FIXME: It is completely unclear why this is necessary and not
// handled by the above graph code.
- FI.addModRefInfo(MRI_ModRef);
+ FI.addModRefInfo(ModRefInfo::ModRef);
} else if (Function *Callee = CS.getCalledFunction()) {
// The callgraph doesn't include intrinsic calls.
if (Callee->isIntrinsic()) {
// All non-call instructions we use the primary predicates for whether
// thay read or write memory.
if (I.mayReadFromMemory())
- FI.addModRefInfo(MRI_Ref);
+ FI.addModRefInfo(ModRefInfo::Ref);
if (I.mayWriteToMemory())
- FI.addModRefInfo(MRI_Mod);
+ FI.addModRefInfo(ModRefInfo::Mod);
}
}
ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
const GlobalValue *GV) {
if (CS.doesNotAccessMemory())
- return MRI_NoModRef;
- ModRefInfo ConservativeResult = CS.onlyReadsMemory() ? MRI_Ref : MRI_ModRef;
+ return ModRefInfo::NoModRef;
+ ModRefInfo ConservativeResult =
+ CS.onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef;
// Iterate through all the arguments to the called function. If any argument
// is based on GV, return the conservative result.
}
// We identified all objects in the argument list, and none of them were GV.
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
- ModRefInfo Known = MRI_ModRef;
+ ModRefInfo Known = ModRefInfo::ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
getModRefInfoForArgument(CS, GV));
if (!isModOrRefSet(Known))
- return MRI_NoModRef; // No need to query other mod/ref analyses
+ return ModRefInfo::NoModRef; // No need to query other mod/ref analyses
return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc));
}
if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (LI->isUnordered()) {
Loc = MemoryLocation::get(LI);
- return MRI_Ref;
+ return ModRefInfo::Ref;
}
if (LI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(LI);
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
Loc = MemoryLocation();
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->isUnordered()) {
Loc = MemoryLocation::get(SI);
- return MRI_Mod;
+ return ModRefInfo::Mod;
}
if (SI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(SI);
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
Loc = MemoryLocation();
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Loc = MemoryLocation::get(V);
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
// calls to free() deallocate the entire structure
Loc = MemoryLocation(CI->getArgOperand(0));
- return MRI_Mod;
+ return ModRefInfo::Mod;
}
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
- return MRI_Mod;
+ return ModRefInfo::Mod;
case Intrinsic::invariant_end:
II->getAAMetadata(AAInfo);
Loc = MemoryLocation(
cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
- return MRI_Mod;
+ return ModRefInfo::Mod;
default:
break;
}
// Otherwise, just do the coarse-grained thing that always works.
if (Inst->mayWriteToMemory())
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
if (Inst->mayReadFromMemory())
- return MRI_Ref;
- return MRI_NoModRef;
+ return ModRefInfo::Ref;
+ return ModRefInfo::NoModRef;
}
/// Private helper for finding the local dependencies of a call site.
if (isModAndRefSet(MR))
MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB);
switch (MR) {
- case MRI_NoModRef:
+ case ModRefInfo::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
- case MRI_Mod:
+ case ModRefInfo::Mod:
return MemDepResult::getClobber(Inst);
- case MRI_Ref:
+ case ModRefInfo::Ref:
// If the call is known to never store to the pointer, and if this is a
// load query, we can safely ignore it (scan past it).
if (isLoad)
// These functions don't access any memory visible to the compiler.
// Note that this doesn't include objc_retainBlock, because it updates
// pointers when it copies block data.
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
default:
break;
}
if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata(
LLVMContext::MD_noalias)))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
if (!mayAliasInScopes(
CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
Loc.AATags.NoAlias))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS, Loc);
}
if (!mayAliasInScopes(
CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
if (!mayAliasInScopes(
CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS1, CS2);
}
if (const MDNode *M =
CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(L, M))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS, Loc);
}
if (const MDNode *M2 =
CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(M1, M2))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS1, CS2);
}
SmallPtrSet<Instruction*, 2> Ignore1;
Ignore1.insert(SI);
- if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
+ if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
StoreSize, *AA, Ignore1)) {
// Check if the load is the offending instruction.
Ignore1.insert(LI);
- if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
- StoreSize, *AA, Ignore1)) {
+ if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop,
+ BECount, StoreSize, *AA, Ignore1)) {
// Still bad. Nothing we can do.
goto CleanupAndExit;
}
SmallPtrSet<Instruction*, 2> Ignore2;
Ignore2.insert(SI);
- if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
- *AA, Ignore2))
+ if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
+ StoreSize, *AA, Ignore2))
goto CleanupAndExit;
// Check the stride.
BasicBlock *BB = Load->getParent();
MemoryLocation Loc = MemoryLocation::get(Load);
- if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, MRI_Mod))
+ if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod))
return false; // Pointer is invalidated!
// Now check every path from the entry block to the load for transparency.
// base pointer and checking the region.
Value *BasePtr =
Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
- if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
- *AA, Stores)) {
+ if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
+ StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
SmallPtrSet<Instruction *, 1> Stores;
Stores.insert(SI);
- if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
+ if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
Value *LoadBasePtr = Expander.expandCodeFor(
LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
- if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
- *AA, Stores)) {
+ if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
+ StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
make_range(Start.getIterator(), End.getIterator()))
if (Inst.mayThrow())
return true;
- return AA->canInstructionRangeModRef(Start, End, Loc, MRI_ModRef);
+ return AA->canInstructionRangeModRef(Start, End, Loc, ModRefInfo::ModRef);
}
///
auto &AA = getAAResults(*F);
// Check basic results
- EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), MRI_Mod);
- EXPECT_EQ(AA.getModRefInfo(Store1, None), MRI_Mod);
- EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), MRI_Ref);
- EXPECT_EQ(AA.getModRefInfo(Load1, None), MRI_Ref);
- EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), MRI_NoModRef);
- EXPECT_EQ(AA.getModRefInfo(Add1, None), MRI_NoModRef);
- EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), MRI_ModRef);
- EXPECT_EQ(AA.getModRefInfo(VAArg1, None), MRI_ModRef);
- EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), MRI_ModRef);
- EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), MRI_ModRef);
- EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), MRI_ModRef);
- EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), MRI_ModRef);
+ EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), ModRefInfo::Mod);
+ EXPECT_EQ(AA.getModRefInfo(Store1, None), ModRefInfo::Mod);
+ EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), ModRefInfo::Ref);
+ EXPECT_EQ(AA.getModRefInfo(Load1, None), ModRefInfo::Ref);
+ EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), ModRefInfo::NoModRef);
+ EXPECT_EQ(AA.getModRefInfo(Add1, None), ModRefInfo::NoModRef);
+ EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), ModRefInfo::ModRef);
+ EXPECT_EQ(AA.getModRefInfo(VAArg1, None), ModRefInfo::ModRef);
+ EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), ModRefInfo::ModRef);
+ EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), ModRefInfo::ModRef);
+ EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), ModRefInfo::ModRef);
+ EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), ModRefInfo::ModRef);
}
class AAPassInfraTest : public testing::Test {