From: Tim Northover Date: Mon, 13 Feb 2017 22:14:16 +0000 (+0000) Subject: GlobalISel: represent atomic loads & stores via the MachineMemOperand. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=13a4b7e61f731bed2298fe36ebc626d7771fc4e9;p=llvm GlobalISel: represent atomic loads & stores via the MachineMemOperand. Also make sure the AArch64 backend doesn't try to convert them into normal loads and stores. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294993 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp index 97292dc5bd7..143134758c3 100644 --- a/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -271,10 +271,6 @@ bool IRTranslator::translateIndirectBr(const User &U, bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { const LoadInst &LI = cast(U); - if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic()) - return false; - - assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment"); auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; Flags |= MachineMemOperand::MOLoad; @@ -286,17 +282,13 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { Res, Addr, *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), Flags, DL->getTypeStoreSize(LI.getType()), - getMemOpAlignment(LI))); + getMemOpAlignment(LI), AAMDNodes(), nullptr, + LI.getSynchScope(), LI.getOrdering())); return true; } bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { const StoreInst &SI = cast(U); - - if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic()) - return false; - - assert(!SI.isAtomic() && "only non-atomic stores supported at the moment"); auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; Flags |= MachineMemOperand::MOStore; @@ -311,7 +303,8 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { *MF->getMachineMemOperand( MachinePointerInfo(SI.getPointerOperand()), Flags, DL->getTypeStoreSize(SI.getValueOperand()->getType()), - getMemOpAlignment(SI))); + getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSynchScope(), + SI.getOrdering())); return true; } diff --git a/lib/Target/AArch64/AArch64InstructionSelector.cpp b/lib/Target/AArch64/AArch64InstructionSelector.cpp index 42b4daf2318..6bced17d09d 100644 --- a/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -691,6 +691,12 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const { return false; } + auto &MemOp = **I.memoperands_begin(); + if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { + DEBUG(dbgs() << "Atomic load/store not supported yet\n"); + return false; + } + #ifndef NDEBUG // Sanity-check the pointer register. const unsigned PtrReg = I.getOperand(1).getReg(); diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll index 96ec2152725..4ef3dfcd11b 100644 --- a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -90,3 +90,12 @@ define void @legal_default([8 x i8] %in) { define i128 @sequence_sizes([8 x i8] %in) { ret i128 undef } + +; Just to make sure we don't accidentally emit a normal load/store. +; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops +; FALLBACK-WITH-REPORT-LABEL: atomic_ops: +define i64 @atomic_ops(i64* %addr) { + store atomic i64 0, i64* %addr unordered, align 8 + %res = load atomic i64, i64* %addr seq_cst, align 8 + ret i64 %res +} diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index acb342af9a5..418c71e3992 100644 --- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -1155,3 +1155,24 @@ define void @test_lifetime_intrin() { call void @llvm.lifetime.end(i64 0, i8* %slot) ret void } + +define void @test_load_store_atomics(i8* %addr) { +; CHECK-LABEL: name: test_load_store_atomics +; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0 +; CHECK: [[V0:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr) +; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr) +; CHECK: [[V1:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr) +; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr) +; CHECK: [[V2:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load singlethread seq_cst 1 from %ir.addr) +; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store singlethread monotonic 1 into %ir.addr) + %v0 = load atomic i8, i8* %addr unordered, align 1 + store atomic i8 %v0, i8* %addr monotonic, align 1 + + %v1 = load atomic i8, i8* %addr acquire, align 1 + store atomic i8 %v1, i8* %addr release, align 1 + + %v2 = load atomic i8, i8* %addr singlethread seq_cst, align 1 + store atomic i8 %v2, i8* %addr singlethread monotonic, align 1 + + ret void +}