}
};
-enum class Reorderability { Always, IfNoAlias, Never };
-
} // end namespace llvm
/// This does one-way checks to see if Use could theoretically be hoisted above
/// This assumes that, for the purposes of MemorySSA, Use comes directly after
/// MayClobber, with no potentially clobbering operations in between them.
/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
-static Reorderability getLoadReorderability(const LoadInst *Use,
- const LoadInst *MayClobber) {
+static bool areLoadsReorderable(const LoadInst *Use,
+ const LoadInst *MayClobber) {
bool VolatileUse = Use->isVolatile();
bool VolatileClobber = MayClobber->isVolatile();
// Volatile operations may never be reordered with other volatile operations.
if (VolatileUse && VolatileClobber)
- return Reorderability::Never;
-
- // The lang ref allows reordering of volatile and non-volatile operations.
- // Whether an aliasing nonvolatile load and volatile load can be reordered,
- // though, is ambiguous. Because it may not be best to exploit this ambiguity,
- // we only allow volatile/non-volatile reordering if the volatile and
- // non-volatile operations don't alias.
- Reorderability Result = VolatileUse || VolatileClobber
- ? Reorderability::IfNoAlias
- : Reorderability::Always;
+ return false;
+ // Otherwise, volatile doesn't matter here. From the language reference:
+ // 'optimizers may change the order of volatile operations relative to
+ // non-volatile operations.'"
// If a load is seq_cst, it cannot be moved above other loads. If its ordering
// is weaker, it can be moved above other loads. We just need to be sure that
bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
AtomicOrdering::Acquire);
- if (SeqCstUse || MayClobberIsAcquire)
- return Reorderability::Never;
- return Result;
+ return !(SeqCstUse || MayClobberIsAcquire);
}
static bool instructionClobbersQuery(MemoryDef *MD,
return isModOrRefSet(I);
}
- if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) {
- if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) {
- switch (getLoadReorderability(UseLoad, DefLoad)) {
- case Reorderability::Always:
- return false;
- case Reorderability::Never:
- return true;
- case Reorderability::IfNoAlias:
- return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad));
- }
- }
- }
+ if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
+ if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
+ return !areLoadsReorderable(UseLoad, DefLoad);
return isModSet(AA.getModRefInfo(DefInst, UseLoc));
}
ret i32 %4
}
-; Ensuring that we don't automatically hoist nonvolatile loads around volatile
-; loads
+; Ensuring we allow hoisting nonvolatile loads around volatile loads.
; CHECK-LABEL define void @volatile_only
define void @volatile_only(i32* %arg1, i32* %arg2) {
; Trivially NoAlias/MustAlias
; CHECK: MemoryUse(liveOnEntry)
; CHECK-NEXT: load i32, i32* %b
load i32, i32* %b
-; CHECK: MemoryUse(1)
+; CHECK: MemoryUse(liveOnEntry)
; CHECK-NEXT: load i32, i32* %a
load i32, i32* %a
; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: load volatile i32, i32* %arg1
load volatile i32, i32* %arg1
-; CHECK: MemoryUse(2)
+; CHECK: MemoryUse(liveOnEntry)
; CHECK-NEXT: load i32, i32* %arg2
load i32, i32* %arg2
; CHECK: MemoryUse(1)
; CHECK-NEXT: load atomic i32, i32* %b unordered, align 4
load atomic i32, i32* %b unordered, align 4
-; CHECK: MemoryUse(2)
+; CHECK: MemoryUse(1)
; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
load atomic i32, i32* %a unordered, align 4
-; CHECK: MemoryUse(2)
+; CHECK: MemoryUse(1)
; CHECK-NEXT: load i32, i32* %a
load i32, i32* %a
; CHECK: 3 = MemoryDef(2)
; CHECK-NEXT: load atomic volatile i32, i32* %arg1 monotonic, align 4
load atomic volatile i32, i32* %arg1 monotonic, align 4
-; CHECK: MemoryUse(3)
+; CHECK: MemoryUse(1)
; CHECK-NEXT: load i32, i32* %arg2
load i32, i32* %arg2