bool HasLdsModifier = false;
OptionalImmIndexMap OptionalIdx;
assert(IsAtomicReturn ? IsAtomic : true);
- unsigned FirstOperandIdx = 1;
- for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
+ for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
// Add the register arguments
if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
- // Insert a tied src for atomic return dst.
- // This cannot be postponed as subsequent calls to
- // addImmOperands rely on correct number of MC operands.
- if (IsAtomicReturn && i == FirstOperandIdx)
- Op.addRegOperands(Inst, 1);
continue;
}
}
}
+ // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
+ if (IsAtomicReturn) {
+ MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
+ Inst.insert(I, *I);
+ }
+
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
if (!IsAtomic) { // glc is hard-coded.
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
// SICI: buffer_atomic_inc v1, v[2:3], s[8:11], 56 idxen offen offset:4 glc slc ; encoding: [0x04,0x70,0xf0,0xe0,0x02,0x01,0x42,0xb8]
// VI: buffer_atomic_inc v1, v[2:3], s[8:11], 56 idxen offen offset:4 glc slc ; encoding: [0x04,0x70,0x2e,0xe1,0x02,0x01,0x02,0xb8]
-buffer_atomic_add v5, off, s[8:11], 0.5 offset:4095 glc
-// SICI: buffer_atomic_add v5, off, s[8:11], 0.5 offset:4095 glc ; encoding: [0xff,0x4f,0xc8,0xe0,0x00,0x05,0x02,0xf0]
-// VI: buffer_atomic_add v5, off, s[8:11], 0.5 offset:4095 glc ; encoding: [0xff,0x4f,0x08,0xe1,0x00,0x05,0x02,0xf0]
-
-buffer_atomic_add v5, off, s[8:11], 0.15915494 offset:4095 glc
-// NOSICI: error: invalid operand for instruction
-// VI: buffer_atomic_add v5, off, s[8:11], 0.15915494 offset:4095 glc ; encoding: [0xff,0x4f,0x08,0xe1,0x00,0x05,0x02,0xf8]
-
//===----------------------------------------------------------------------===//
// Lds support
//===----------------------------------------------------------------------===//