case Mips::ATOMIC_SWAP_I64:
return emitAtomicBinary(MI, BB, 8, 0);
- case Mips::ATOMIC_CMP_SWAP_I8_PSEUDO:
+ case Mips::ATOMIC_CMP_SWAP_I8:
return emitAtomicCmpSwapPartword(MI, BB, 1);
- case Mips::ATOMIC_CMP_SWAP_I16_PSEUDO:
+ case Mips::ATOMIC_CMP_SWAP_I16:
return emitAtomicCmpSwapPartword(MI, BB, 2);
-
+ case Mips::ATOMIC_CMP_SWAP_I32:
+ return emitAtomicCmpSwap(MI, BB, 4);
+ case Mips::ATOMIC_CMP_SWAP_I64:
+ return emitAtomicCmpSwap(MI, BB, 8);
case Mips::PseudoSDIV:
case Mips::PseudoUDIV:
case Mips::DIV:
return exitMBB;
}
+MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ unsigned Size) const {
+ assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
+
+ MachineFunction *MF = BB->getParent();
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ const bool ArePtrs64bit = ABI.ArePtrs64bit();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned LL, SC, ZERO, BNE, BEQ;
+
+ if (Size == 4) {
+ if (isMicroMips) {
+ LL = Mips::LL_MM;
+ SC = Mips::SC_MM;
+ } else {
+ LL = Subtarget.hasMips32r6()
+ ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
+ : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
+ SC = Subtarget.hasMips32r6()
+ ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
+ : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
+ }
+
+ ZERO = Mips::ZERO;
+ BNE = Mips::BNE;
+ BEQ = Mips::BEQ;
+ } else {
+ LL = Subtarget.hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
+ SC = Subtarget.hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
+ ZERO = Mips::ZERO_64;
+ BNE = Mips::BNE64;
+ BEQ = Mips::BEQ64;
+ }
+
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Ptr = MI.getOperand(1).getReg();
+ unsigned OldVal = MI.getOperand(2).getReg();
+ unsigned NewVal = MI.getOperand(3).getReg();
+
+ unsigned Success = RegInfo.createVirtualRegister(RC);
+
+ // insert new blocks after the current block
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MachineFunction::iterator It = ++BB->getIterator();
+ MF->insert(It, loop1MBB);
+ MF->insert(It, loop2MBB);
+ MF->insert(It, exitMBB);
+
+ // Transfer the remainder of BB and its successor edges to exitMBB.
+ exitMBB->splice(exitMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ // thisMBB:
+ // ...
+ // fallthrough --> loop1MBB
+ BB->addSuccessor(loop1MBB);
+ loop1MBB->addSuccessor(exitMBB);
+ loop1MBB->addSuccessor(loop2MBB);
+ loop2MBB->addSuccessor(loop1MBB);
+ loop2MBB->addSuccessor(exitMBB);
+
+ // loop1MBB:
+ // ll dest, 0(ptr)
+ // bne dest, oldval, exitMBB
+ BB = loop1MBB;
+ BuildMI(BB, DL, TII->get(LL), Dest).addReg(Ptr).addImm(0);
+ BuildMI(BB, DL, TII->get(BNE))
+ .addReg(Dest).addReg(OldVal).addMBB(exitMBB);
+
+ // loop2MBB:
+ // sc success, newval, 0(ptr)
+ // beq success, $0, loop1MBB
+ BB = loop2MBB;
+ BuildMI(BB, DL, TII->get(SC), Success)
+ .addReg(NewVal).addReg(Ptr).addImm(0);
+ BuildMI(BB, DL, TII->get(BEQ))
+ .addReg(Success).addReg(ZERO).addMBB(loop1MBB);
+
+ MI.eraseFromParent(); // The instruction is gone now.
+
+ return exitMBB;
+}
+
MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
assert((Size == 1 || Size == 2) &&
unsigned Mask = RegInfo.createVirtualRegister(RC);
unsigned Mask2 = RegInfo.createVirtualRegister(RC);
unsigned ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
+ unsigned OldVal = RegInfo.createVirtualRegister(RC);
+ unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC);
unsigned ShiftedNewVal = RegInfo.createVirtualRegister(RC);
unsigned MaskLSB2 = RegInfo.createVirtualRegister(RCp);
unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
unsigned MaskedCmpVal = RegInfo.createVirtualRegister(RC);
unsigned MaskedNewVal = RegInfo.createVirtualRegister(RC);
- unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8_PSEUDO
- ? Mips::ATOMIC_CMP_SWAP_I8_FRAG
- : Mips::ATOMIC_CMP_SWAP_I16_FRAG;
+ unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC);
+ unsigned StoreVal = RegInfo.createVirtualRegister(RC);
+ unsigned SrlRes = RegInfo.createVirtualRegister(RC);
+ unsigned Success = RegInfo.createVirtualRegister(RC);
unsigned LL, SC;
if (isMicroMips) {
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB->getIterator();
+ MF->insert(It, loop1MBB);
+ MF->insert(It, loop2MBB);
+ MF->insert(It, sinkMBB);
MF->insert(It, exitMBB);
// Transfer the remainder of BB and its successor edges to exitMBB.
std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
- BB->addSuccessor(exitMBB);
+ BB->addSuccessor(loop1MBB);
+ loop1MBB->addSuccessor(sinkMBB);
+ loop1MBB->addSuccessor(loop2MBB);
+ loop2MBB->addSuccessor(loop1MBB);
+ loop2MBB->addSuccessor(sinkMBB);
+ sinkMBB->addSuccessor(exitMBB);
// FIXME: computation of newval2 can be moved to loop2MBB.
// thisMBB:
BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
.addReg(MaskedNewVal).addReg(ShiftAmt);
- // For correctness purpose, a new pseudo is introduced here. We need this
- // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
- // that is spread over >1 basic blocks. A register allocator which
- // introduces (or any codegen infact) a store, can violate the expactations
- // of the hardware.
- //
- // An atomic read-modify-write sequence starts with a linked load
- // instruction and ends with a store conditional instruction. The atomic
- // read-modify-write sequence failes if any of the following conditions
- // occur between the execution of ll and sc:
- // * A coherent store is completed by another process or coherent I/O
- // module into the block of synchronizable physical memory containing
- // the word. The size and alignment of the block is
- // implementation-dependent.
- // * A coherent store is executed between an LL and SC sequence on the
- // same processor to the block of synchornizable physical memory
- // containing the word.
- //
- BuildMI(BB, DL, TII->get(AtomicOp), Dest)
- .addReg(AlignedAddr)
- .addReg(Mask)
- .addReg(ShiftedCmpVal)
- .addReg(Mask2)
- .addReg(ShiftedNewVal)
- .addReg(ShiftAmt);
+ // loop1MBB:
+ // ll oldval,0(alginedaddr)
+ // and maskedoldval0,oldval,mask
+ // bne maskedoldval0,shiftedcmpval,sinkMBB
+ BB = loop1MBB;
+ BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
+ .addReg(OldVal).addReg(Mask);
+ BuildMI(BB, DL, TII->get(Mips::BNE))
+ .addReg(MaskedOldVal0).addReg(ShiftedCmpVal).addMBB(sinkMBB);
+
+ // loop2MBB:
+ // and maskedoldval1,oldval,mask2
+ // or storeval,maskedoldval1,shiftednewval
+ // sc success,storeval,0(alignedaddr)
+ // beq success,$0,loop1MBB
+ BB = loop2MBB;
+ BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal1)
+ .addReg(OldVal).addReg(Mask2);
+ BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
+ .addReg(MaskedOldVal1).addReg(ShiftedNewVal);
+ BuildMI(BB, DL, TII->get(SC), Success)
+ .addReg(StoreVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, DL, TII->get(Mips::BEQ))
+ .addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
+
+ // sinkMBB:
+ // srl srlres,maskedoldval0,shiftamt
+ // sign_extend dest,srlres
+ BB = sinkMBB;
+
+ BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
+ .addReg(MaskedOldVal0).addReg(ShiftAmt);
+ BB = emitSignExtendToI32InReg(MI, BB, Size, Dest, SrlRes);
MI.eraseFromParent(); // The instruction is gone now.
PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$cmp, DRC:$swap),
[(set DRC:$dst, (Op iPTR:$ptr, DRC:$cmp, DRC:$swap))]>;
-class AtomicCmpSwapSubword<RegisterClass RC> :
- PseudoSE<(outs RC:$dst), (ins PtrRC:$ptr, RC:$mask, RC:$ShiftCmpVal,
- RC:$mask2, RC:$ShiftNewVal, RC:$ShiftAmt), []>;
-
class LLBase<string opstr, RegisterOperand RO, DAGOperand MO = mem> :
InstSE<(outs RO:$rt), (ins MO:$addr), !strconcat(opstr, "\t$rt, $addr"),
[], II_LL, FrmI, opstr> {
def ATOMIC_SWAP_I16 : Atomic2Ops<atomic_swap_16, GPR32>;
def ATOMIC_SWAP_I32 : Atomic2Ops<atomic_swap_32, GPR32>;
- def ATOMIC_CMP_SWAP_I8_PSEUDO : AtomicCmpSwap<atomic_cmp_swap_8, GPR32>;
- def ATOMIC_CMP_SWAP_I16_PSEUDO : AtomicCmpSwap<atomic_cmp_swap_16, GPR32>;
-}
-
-let isPseudo = 1 in {
- // The expansion of ATOMIC_CMP_SWAP_I(8|16) occurs in two parts. First,
- // the *_PSEUDO is partially lowering during ISelLowering to compute the
- // aligned addresses and necessary masks, along with another pseudo which
- // represents the ll/sc loop. That pseudo is lowered after the basic
- // postRA pseudos have been lowered.
- def ATOMIC_CMP_SWAP_I8_FRAG : AtomicCmpSwapSubword<GPR32>;
- def ATOMIC_CMP_SWAP_I16_FRAG : AtomicCmpSwapSubword<GPR32>;
-
+ def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<atomic_cmp_swap_8, GPR32>;
+ def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<atomic_cmp_swap_16, GPR32>;
def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, GPR32>;
}
+
/// Pseudo instructions for loading and storing accumulator registers.
let isPseudo = 1, isCodeGenOnly = 1, hasNoSchedulingInfo = 1 in {
def LOAD_ACC64 : Load<"", ACC64>;