/// - AddrSpaceN+1 ...
GIM_CheckMemoryAddressSpace,
+ /// Check the minimum alignment of the memory access for the given machine
+ /// memory operand.
+ /// - InsnID - Instruction ID
+ /// - MMOIdx - MMO index
+ /// - MinAlign - Minimum acceptable alignment
+ GIM_CheckMemoryAlignment,
+
/// Check the size of the memory access for the given machine memory operand
/// against the size of an operand.
/// - InsnID - Instruction ID
return false;
break;
}
+ case GIM_CheckMemoryAlignment: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t MMOIdx = MatchTable[CurrentIdx++];
+ unsigned MinAlign = MatchTable[CurrentIdx++];
+
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+
+ MachineMemOperand *MMO
+ = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
+ << "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+ << ")->getAlignment() >= " << MinAlign << ")\n");
+ if (MMO->getAlignment() < MinAlign && handleReject() == RejectAndGiveUp)
+ return false;
+
+ break;
+ }
case GIM_CheckMemorySizeEqualTo: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t MMOIdx = MatchTable[CurrentIdx++];
// If this empty, accept any address space.
list<int> AddressSpaces = ?;
+ // cast<MemSDNode>(N)->getAlignment() >=
+ // If this is empty, accept any alignment.
+ int MinAlignment = ?;
+
// cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Monotonic
bit IsAtomicOrderingMonotonic = ?;
// cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Acquire
// cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
// cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
ValueType ScalarMemoryVT = ?;
-
- // TODO: Add alignment
}
// PatFrag - A version of PatFrags matching only a single fragment.
list<int> AddrSpaces = AS;
}
-class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
- return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
-}]>;
-
-class Aligned16Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
- return cast<MemSDNode>(N)->getAlignment() >= 16;
-}]>;
+class Aligned<int Bytes> {
+ int MinAlignment = Bytes;
+}
class LoadFrag <SDPatternOperator op> : PatFrag<(ops node:$ptr), (op node:$ptr)>;
def truncstorei8_local_hi16 : StoreHi16<truncstorei8>, LocalAddress;
def atomic_store_local : LocalStore <atomic_store>;
-def load_align8_local : Aligned8Bytes <
- (ops node:$ptr), (load_local node:$ptr)
->;
-def load_align16_local : Aligned16Bytes <
- (ops node:$ptr), (load_local node:$ptr)
->;
+def load_align8_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> {
+ let IsLoad = 1;
+ let MinAlignment = 8;
+}
-def store_align8_local : Aligned8Bytes <
- (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
->;
+def load_align16_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> {
+ let IsLoad = 1;
+ let MinAlignment = 16;
+}
+
+def store_align8_local: PatFrag<(ops node:$val, node:$ptr),
+ (store_local node:$val, node:$ptr)>, Aligned<8> {
+ let IsStore = 1;
+
+}
+def store_align16_local: PatFrag<(ops node:$val, node:$ptr),
+ (store_local node:$val, node:$ptr)>, Aligned<16> {
+ let IsStore = 1;
+}
-def store_align16_local : Aligned16Bytes <
- (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
->;
def atomic_store_flat : FlatStore <atomic_store>;
def truncstorei8_hi16_flat : StoreHi16<truncstorei8>, FlatStoreAddress;
def unindexedload_glue : PatFrag <(ops node:$ptr), (AMDGPUld_glue node:$ptr)> {
let IsUnindexed = 1;
+ let IsLoad = 1;
}
def load_glue : PatFrag <(ops node:$ptr), (unindexedload_glue node:$ptr)> {
let IsNonExtLoad = 1;
+ let IsLoad = 1;
}
def atomic_load_32_glue : PatFrag<(ops node:$ptr),
let MemoryVT = i64;
}
-def extload_glue : PatFrag<(ops node:$ptr), (load_glue node:$ptr)> {
+def extload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
let IsLoad = 1;
let IsAnyExtLoad = 1;
}
let MemoryVT = i16;
}
-def load_glue_align8 : Aligned8Bytes <
- (ops node:$ptr), (load_glue node:$ptr)
->;
-def load_glue_align16 : Aligned16Bytes <
- (ops node:$ptr), (load_glue node:$ptr)
->;
+let IsLoad = 1, AddressSpaces = LoadAddress_local.AddrSpaces in {
+def load_local_m0 : PatFrag<(ops node:$ptr), (load_glue node:$ptr)>;
+
+let MemoryVT = i8 in {
+def extloadi8_local_m0 : PatFrag<(ops node:$ptr), (extloadi8_glue node:$ptr)>;
+def sextloadi8_local_m0 : PatFrag<(ops node:$ptr), (sextloadi8_glue node:$ptr)>;
+def zextloadi8_local_m0 : PatFrag<(ops node:$ptr), (zextloadi8_glue node:$ptr)>;
+}
+
+let MemoryVT = i16 in {
+def extloadi16_local_m0 : PatFrag<(ops node:$ptr), (extloadi16_glue node:$ptr)>;
+def sextloadi16_local_m0 : PatFrag<(ops node:$ptr), (sextloadi16_glue node:$ptr)>;
+def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>;
+}
+
+def load_align8_local_m0 : LoadFrag <load_glue>, LocalAddress {
+ let MinAlignment = 8;
+}
+def load_align16_local_m0 : LoadFrag <load_glue>, LocalAddress {
+ let MinAlignment = 16;
+}
+
+} // End IsLoad = 1
-def load_local_m0 : LoadFrag<load_glue>, LocalAddress;
-def sextloadi8_local_m0 : LoadFrag<sextloadi8_glue>, LocalAddress;
-def sextloadi16_local_m0 : LoadFrag<sextloadi16_glue>, LocalAddress;
-def extloadi8_local_m0 : LoadFrag<extloadi8_glue>, LocalAddress;
-def zextloadi8_local_m0 : LoadFrag<zextloadi8_glue>, LocalAddress;
-def extloadi16_local_m0 : LoadFrag<extloadi16_glue>, LocalAddress;
-def zextloadi16_local_m0 : LoadFrag<zextloadi16_glue>, LocalAddress;
-def load_align8_local_m0 : LoadFrag <load_glue_align8>, LocalAddress;
-def load_align16_local_m0 : LoadFrag <load_glue_align16>, LocalAddress;
def atomic_load_32_local_m0 : LoadFrag<atomic_load_32_glue>, LocalAddress;
def atomic_load_64_local_m0 : LoadFrag<atomic_load_64_glue>, LocalAddress;
let MemoryVT = i16;
}
-def store_glue_align8 : Aligned8Bytes <
- (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
->;
-
-def store_glue_align16 : Aligned16Bytes <
- (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
->;
+let IsStore = 1, AddressSpaces = StoreAddress_local.AddrSpaces in {
+def store_glue_align8 : PatFrag<(ops node:$val, node:$ptr),
+ (store_glue node:$val, node:$ptr)>, Aligned<8>;
+def store_glue_align16 : PatFrag<(ops node:$val, node:$ptr),
+ (store_glue node:$val, node:$ptr)>, Aligned<16>;
def store_local_m0 : StoreFrag<store_glue>, LocalAddress;
def truncstorei8_local_m0 : StoreFrag<truncstorei8_glue>, LocalAddress;
def store_align8_local_m0 : StoreFrag<store_glue_align8>, LocalAddress;
def store_align16_local_m0 : StoreFrag<store_glue_align16>, LocalAddress;
+}
def si_setcc_uniform : PatFrag <
(ops node:$lhs, node:$rhs, node:$cond),
let AddressSpaces = [ 999 ];
let IsLoad = 1; // FIXME: Can this be inferred?
let MemoryVT = i32;
+ let MinAlignment = 2;
}
// With multiple address spaces
}
// SDAG: case 2: {
-// SDAG: // Predicate_pat_frag_a
+// SDAG-NEXT: // Predicate_pat_frag_b
// SDAG-NEXT: SDNode *N = Node;
// SDAG-NEXT: (void)N;
// SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
-
-// SDAG-NEXT: if (AddrSpace != 999)
+// SDAG-NEXT: if (AddrSpace != 123 && AddrSpace != 455)
// SDAG-NEXT: return false;
// SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
// SDAG-NEXT: return true;
-// GISEL: GIM_Try, /*On fail goto*//*Label 0*/ 47, // Rule ID 0 //
+
+// GISEL: GIM_Try, /*On fail goto*//*Label 0*/ {{[0-9]+}}, // Rule ID 0 //
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
// GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
-// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/1, /*AddrSpace*/999,
+// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
def : Pat <
- (pat_frag_a GPR32:$src),
- (inst_a GPR32:$src)
+ (pat_frag_b GPR32:$src),
+ (inst_b GPR32:$src)
>;
+
// SDAG: case 3: {
-// SDAG-NEXT: // Predicate_pat_frag_b
+// SDAG: // Predicate_pat_frag_a
// SDAG-NEXT: SDNode *N = Node;
// SDAG-NEXT: (void)N;
// SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
-// SDAG-NEXT: if (AddrSpace != 123 && AddrSpace != 455)
+
+// SDAG-NEXT: if (AddrSpace != 999)
+// SDAG-NEXT: return false;
+// SDAG-NEXT: if (cast<MemSDNode>(N)->getAlignment() < 2)
// SDAG-NEXT: return false;
// SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
// SDAG-NEXT: return true;
-
-// GISEL: GIM_Try, /*On fail goto*//*Label 1*/ 95, // Rule ID 1 //
+// GISEL: GIM_Try, /*On fail goto*//*Label 1*/ {{[0-9]+}}, // Rule ID 1 //
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
// GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
-// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
+// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/1, /*AddrSpace*/999,
+// GISEL-NEXT: GIM_CheckMemoryAlignment, /*MI*/0, /*MMO*/0, /*MinAlign*/2,
// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
def : Pat <
- (pat_frag_b GPR32:$src),
- (inst_b GPR32:$src)
+ (pat_frag_a GPR32:$src),
+ (inst_a GPR32:$src)
>;
}
// Test truncstore without a specific MemoryVT
-// GISEL: GIM_Try, /*On fail goto*//*Label 2*/ 133, // Rule ID 2 //
+// GISEL: GIM_Try, /*On fail goto*//*Label 2*/ {{[0-9]+}}, // Rule ID 2 //
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_STORE,
// GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
>;
// Test truncstore with specific MemoryVT
-// GISEL: GIM_Try, /*On fail goto*//*Label 3*/ 181, // Rule ID 3 //
+// GISEL: GIM_Try, /*On fail goto*//*Label 3*/ {{[0-9]+}}, // Rule ID 3 //
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_STORE,
// GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
if (isLoad()) {
if (!isUnindexed() && !isNonExtLoad() && !isAnyExtLoad() &&
!isSignExtLoad() && !isZeroExtLoad() && getMemoryVT() == nullptr &&
- getScalarMemoryVT() == nullptr)
+ getScalarMemoryVT() == nullptr && getAddressSpaces() == nullptr &&
+ getMinAlignment() < 1)
PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
"IsLoad cannot be used by itself");
} else {
if (isStore()) {
if (!isUnindexed() && !isTruncStore() && !isNonTruncStore() &&
- getMemoryVT() == nullptr && getScalarMemoryVT() == nullptr)
+ getMemoryVT() == nullptr && getScalarMemoryVT() == nullptr &&
+ getAddressSpaces() == nullptr && getMinAlignment() < 1)
PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
"IsStore cannot be used by itself");
} else {
Code += ")\nreturn false;\n";
}
+ int64_t MinAlign = getMinAlignment();
+ if (MinAlign > 0) {
+ Code += "if (cast<MemSDNode>(N)->getAlignment() < ";
+ Code += utostr(MinAlign);
+ Code += ")\nreturn false;\n";
+ }
+
Record *MemoryVT = getMemoryVT();
if (MemoryVT)
return R->getValueAsListInit("AddressSpaces");
}
+int64_t TreePredicateFn::getMinAlignment() const {
+ Record *R = getOrigPatFragRecord()->getRecord();
+ if (R->isValueUnset("MinAlignment"))
+ return 0;
+ return R->getValueAsInt("MinAlignment");
+}
+
Record *TreePredicateFn::getScalarMemoryVT() const {
Record *R = getOrigPatFragRecord()->getRecord();
if (R->isValueUnset("ScalarMemoryVT"))
Record *getScalarMemoryVT() const;
ListInit *getAddressSpaces() const;
+ int64_t getMinAlignment() const;
// If true, indicates that GlobalISel-based C++ code was supplied.
bool hasGISelPredicateCode() const;
OS << ']';
}
+ int64_t MinAlign = P.getMinAlignment();
+ if (MinAlign > 0)
+ Explanation += " MinAlign=" + utostr(MinAlign);
+
if (P.isAtomicOrderingMonotonic())
Explanation += " monotonic";
if (P.isAtomicOrderingAcquire())
const ListInit *AddrSpaces = Predicate.getAddressSpaces();
if (AddrSpaces && !AddrSpaces->empty())
continue;
+
+ if (Predicate.getMinAlignment() > 0)
+ continue;
}
if (Predicate.isAtomic() && Predicate.getMemoryVT())
IPM_MemoryLLTSize,
IPM_MemoryVsLLTSize,
IPM_MemoryAddressSpace,
+ IPM_MemoryAlignment,
IPM_GenericPredicate,
OPM_SameOperand,
OPM_ComplexPattern,
}
};
+class MemoryAlignmentPredicateMatcher : public InstructionPredicateMatcher {
+protected:
+ unsigned MMOIdx;
+ int MinAlign;
+
+public:
+ MemoryAlignmentPredicateMatcher(unsigned InsnVarID, unsigned MMOIdx,
+ int MinAlign)
+ : InstructionPredicateMatcher(IPM_MemoryAlignment, InsnVarID),
+ MMOIdx(MMOIdx), MinAlign(MinAlign) {
+ assert(MinAlign > 0);
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_MemoryAlignment;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ if (!InstructionPredicateMatcher::isIdentical(B))
+ return false;
+ auto *Other = cast<MemoryAlignmentPredicateMatcher>(&B);
+ return MMOIdx == Other->MMOIdx && MinAlign == Other->MinAlign;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckMemoryAlignment")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("MMO") << MatchTable::IntValue(MMOIdx)
+ << MatchTable::Comment("MinAlign") << MatchTable::IntValue(MinAlign)
+ << MatchTable::LineBreak;
+ }
+};
+
/// Generates code to check that the size of an MMO is less-than, equal-to, or
/// greater than a given LLT.
class MemoryVsLLTSizePredicateMatcher : public InstructionPredicateMatcher {
0, ParsedAddrSpaces);
}
}
+
+ int64_t MinAlign = Predicate.getMinAlignment();
+ if (MinAlign > 0)
+ InsnMatcher.addPredicate<MemoryAlignmentPredicateMatcher>(0, MinAlign);
}
// G_LOAD is used for both non-extending and any-extending loads.