setMinFunctionAlignment(2);
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
+ setMaxAtomicSizeInBitsSupported(64);
+ setMinCmpXchgSizeInBits(32);
+
if (EnableHexSDNodeSched)
setSchedulingPreference(Sched::VLIW);
else
// Do not expand loads and stores that don't exceed 64 bits.
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
}
+
+bool HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
+ AtomicCmpXchgInst *AI) const {
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+ unsigned Size = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
+ return Size >= 4 && Size <= 8;
+}
Value *Addr, AtomicOrdering Ord) const override;
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
+ bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
+
AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {
return AtomicExpansionKind::LLSC;
return: ; preds = %entry
ret void
}
+
+
+define i64 @fred() nounwind {
+entry:
+ %s0 = cmpxchg i32* undef, i32 undef, i32 undef seq_cst seq_cst
+ %s1 = extractvalue { i32, i1 } %s0, 0
+ %t0 = cmpxchg i64* undef, i64 undef, i64 undef seq_cst seq_cst
+ %t1 = extractvalue { i64, i1 } %t0, 0
+ %u0 = zext i32 %s1 to i64
+ %u1 = add i64 %u0, %t1
+ ret i64 %u1
+}
+