LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
- LI.getAlignment(), LI.getName() + Suffix);
+ LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
+ NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
- SI.getAlignment());
+ SI.getAlignment(), SI.isVolatile());
+ NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
/// later. However, it is risky in case some backend or other part of LLVM is
/// relying on the exact type loaded to select appropriate atomic operations.
static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
- // FIXME: We could probably with some care handle both volatile and atomic
- // loads here but it isn't clear that this is important.
- if (!LI.isSimple())
+ // FIXME: We could probably with some care handle both volatile and ordered
+ // atomic loads here but it isn't clear that this is important.
+ if (!LI.isUnordered())
return nullptr;
if (LI.use_empty())
/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
- // FIXME: We could probably with some care handle both volatile and atomic
- // stores here but it isn't clear that this is important.
- if (!SI.isSimple())
+ // FIXME: We could probably with some care handle both volatile and ordered
+ // atomic stores here but it isn't clear that this is important.
+ if (!SI.isUnordered())
return false;
Value *V = SI.getValueOperand();
merge:
ret i32 0
}
+
+declare void @clobber()
+
+define i32 @test18(float* %p) {
+; CHECK-LABEL: define i32 @test18(
+; CHECK: load atomic i32, i32* [[A:%.*]] unordered, align 4
+; CHECK: store atomic i32 [[B:%.*]], i32* [[C:%.*]] unordered, align 4
+ %x = load atomic float, float* %p unordered, align 4
+ call void @clobber() ;; keep the load around
+ store atomic float %x, float* %p unordered, align 4
+ ret i32 0
+}
+
+; TODO: probably also legal in this case
+define i32 @test19(float* %p) {
+; CHECK-LABEL: define i32 @test19(
+; CHECK: load atomic float, float* %p seq_cst, align 4
+; CHECK: store atomic float %x, float* %p seq_cst, align 4
+ %x = load atomic float, float* %p seq_cst, align 4
+ call void @clobber() ;; keep the load around
+ store atomic float %x, float* %p seq_cst, align 4
+ ret i32 0
+}
+
+define i32 @test20(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test20(
+; CHECK: store atomic i8* %v, i8** [[D:%.*]] unordered, align 4
+ %cast = bitcast i8* %v to i32*
+ store atomic i32* %cast, i32** %p unordered, align 4
+ ret i32 0
+}
+
+define i32 @test21(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test21(
+; CHECK: store atomic i32* %cast, i32** %p monotonic, align 4
+ %cast = bitcast i8* %v to i32*
+ store atomic i32* %cast, i32** %p monotonic, align 4
+ ret i32 0
+}
+
+define void @pr27490a(i8** %p1, i8** %p2) {
+; CHECK-LABEL: define void @pr27490
+; CHECK: %1 = bitcast i8** %p1 to i64*
+; CHECK: %l1 = load i64, i64* %1, align 8
+; CHECK: %2 = bitcast i8** %p2 to i64*
+; CHECK: store volatile i64 %l1, i64* %2, align 8
+ %l = load i8*, i8** %p1
+ store volatile i8* %l, i8** %p2
+ ret void
+}
+
+define void @pr27490b(i8** %p1, i8** %p2) {
+; CHECK-LABEL: define void @pr27490
+; CHECK: %1 = bitcast i8** %p1 to i64*
+; CHECK: %l1 = load i64, i64* %1, align 8
+; CHECK: %2 = bitcast i8** %p2 to i64*
+; CHECK: store atomic i64 %l1, i64* %2 seq_cst, align 8
+ %l = load i8*, i8** %p1
+ store atomic i8* %l, i8** %p2 seq_cst, align 8
+ ret void
+}