bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags);
bool parseMemoryPseudoSourceValue(const PseudoSourceValue *&PSV);
bool parseMachinePointerInfo(MachinePointerInfo &Dest);
+ bool parseOptionalAtomicOrdering(AtomicOrdering &Order);
bool parseMachineMemoryOperand(MachineMemOperand *&Dest);
private:
return false;
}
+bool MIParser::parseOptionalAtomicOrdering(AtomicOrdering &Order) {
+ Order = AtomicOrdering::NotAtomic;
+ if (Token.isNot(MIToken::Identifier))
+ return false;
+
+ Order = StringSwitch<AtomicOrdering>(Token.stringValue())
+ .Case("unordered", AtomicOrdering::Unordered)
+ .Case("monotonic", AtomicOrdering::Monotonic)
+ .Case("acquire", AtomicOrdering::Acquire)
+ .Case("release", AtomicOrdering::Release)
+ .Case("acq_rel", AtomicOrdering::AcquireRelease)
+ .Case("seq_cst", AtomicOrdering::SequentiallyConsistent)
+ .Default(AtomicOrdering::NotAtomic);
+
+ if (Order != AtomicOrdering::NotAtomic) {
+ lex();
+ return false;
+ }
+
+ return error("expected an atomic scope, ordering or a size integer literal");
+}
+
bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
if (expectAndConsume(MIToken::lparen))
return true;
Flags |= MachineMemOperand::MOStore;
lex();
+ // Optional "singlethread" scope.
+ SynchronizationScope Scope = SynchronizationScope::CrossThread;
+ if (Token.is(MIToken::Identifier) && Token.stringValue() == "singlethread") {
+ Scope = SynchronizationScope::SingleThread;
+ lex();
+ }
+
+ // Up to two atomic orderings (cmpxchg provides guarantees on failure).
+ AtomicOrdering Order, FailureOrder;
+ if (parseOptionalAtomicOrdering(Order))
+ return true;
+
+ if (parseOptionalAtomicOrdering(FailureOrder))
+ return true;
+
if (Token.isNot(MIToken::IntegerLiteral))
return error("expected the size integer literal after memory operation");
uint64_t Size;
}
if (expectAndConsume(MIToken::rparen))
return true;
- Dest =
- MF.getMachineMemOperand(Ptr, Flags, Size, BaseAlignment, AAInfo, Range);
+ Dest = MF.getMachineMemOperand(Ptr, Flags, Size, BaseAlignment, AAInfo, Range,
+ Scope, Order, FailureOrder);
return false;
}
assert(Op.isStore() && "Non load machine operand must be a store");
OS << "store ";
}
+
+ if (Op.getSynchScope() == SynchronizationScope::SingleThread)
+ OS << "singlethread ";
+
+ if (Op.getOrdering() != AtomicOrdering::NotAtomic)
+ OS << toIRString(Op.getOrdering()) << ' ';
+ if (Op.getFailureOrdering() != AtomicOrdering::NotAtomic)
+ OS << toIRString(Op.getFailureOrdering()) << ' ';
+
OS << Op.getSize();
if (const Value *Val = Op.getValue()) {
OS << (Op.isLoad() ? " from " : " into ");
--- /dev/null
+# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s | FileCheck %s
+
+--- |
+
+ define void @atomic_memoperands() {
+ ret void
+ }
+
+...
+---
+# CHECK-LABEL: name: atomic_memoperands
+# CHECK: %1(s64) = G_LOAD %0(p0) :: (load unordered 8)
+# CHECK: %2(s32) = G_LOAD %0(p0) :: (load monotonic 4)
+# CHECK: %3(s16) = G_LOAD %0(p0) :: (load acquire 2)
+# CHECK: G_STORE %3(s16), %0(p0) :: (store release 2)
+# CHECK: G_STORE %2(s32), %0(p0) :: (store acq_rel 4)
+# CHECK: G_STORE %1(s64), %0(p0) :: (store singlethread seq_cst 8)
+name: atomic_memoperands
+body: |
+ bb.0:
+
+ %0:_(p0) = COPY %x0
+ %1:_(s64) = G_LOAD %0(p0) :: (load unordered 8)
+ %2:_(s32) = G_LOAD %0(p0) :: (load monotonic 4)
+ %3:_(s16) = G_LOAD %0(p0) :: (load acquire 2)
+ G_STORE %3(s16), %0(p0) :: (store release 2)
+ G_STORE %2(s32), %0(p0) :: (store acq_rel 4)
+ G_STORE %1(s64), %0(p0) :: (store singlethread seq_cst 8)
+ RET_ReallyLR
+...