.Cases("clz.ll", "popc.ll", "h2f", true)
.Cases("max.i", "max.ll", "max.ui", "max.ull", true)
.Cases("min.i", "min.ll", "min.ui", "min.ull", true)
+ .StartsWith("atomic.load.add.f32.p", true)
+ .StartsWith("atomic.load.add.f64.p", true)
.Default(false);
if (Expand) {
NewFn = nullptr;
Value *Cmp = Builder.CreateICmpSGE(
Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
+ } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") ||
+ Name.startswith("atomic.load.add.f64.p"))) {
+ Value *Ptr = CI->getArgOperand(0);
+ Value *Val = CI->getArgOperand(1);
+ Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val,
+ AtomicOrdering::SequentiallyConsistent);
} else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
Name == "max.ui" || Name == "max.ull")) {
Value *Arg0 = CI->getArgOperand(0);
(atomic_load_add_64 node:$a, node:$b)>;
def atomic_load_add_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_add_64 node:$a, node:$b)>;
-def atomic_load_add_f32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
- (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>;
-def atomic_load_add_f32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
- (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>;
-def atomic_load_add_f32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
- (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>;
-def atomic_load_add_f64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
- (int_nvvm_atomic_load_add_f64 node:$a, node:$b)>;
-def atomic_load_add_f64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
- (int_nvvm_atomic_load_add_f64 node:$a, node:$b)>;
-def atomic_load_add_f64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
- (int_nvvm_atomic_load_add_f64 node:$a, node:$b)>;
+def atomic_load_add_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_fadd node:$a, node:$b)>;
+def atomic_load_add_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_fadd node:$a, node:$b)>;
+def atomic_load_add_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_fadd node:$a, node:$b)>;
defm INT_PTX_ATOM_ADD_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".add",
atomic_load_add_32_g, i32imm, imm>;
".add", atomic_load_add_64_gen, i64imm, imm>;
defm INT_PTX_ATOM_ADD_G_F32 : F_ATOMIC_2<Float32Regs, ".global", ".f32", ".add",
- atomic_load_add_f32_g, f32imm, fpimm>;
+ atomic_load_add_g, f32imm, fpimm>;
defm INT_PTX_ATOM_ADD_S_F32 : F_ATOMIC_2<Float32Regs, ".shared", ".f32", ".add",
- atomic_load_add_f32_s, f32imm, fpimm>;
+ atomic_load_add_s, f32imm, fpimm>;
defm INT_PTX_ATOM_ADD_GEN_F32 : F_ATOMIC_2<Float32Regs, "", ".f32", ".add",
- atomic_load_add_f32_gen, f32imm, fpimm>;
+ atomic_load_add_gen, f32imm, fpimm>;
defm INT_PTX_ATOM_ADD_G_F64 : F_ATOMIC_2<Float64Regs, ".global", ".f64", ".add",
- atomic_load_add_f64_g, f64imm, fpimm, [hasAtomAddF64]>;
+ atomic_load_add_g, f64imm, fpimm, [hasAtomAddF64]>;
defm INT_PTX_ATOM_ADD_S_F64 : F_ATOMIC_2<Float64Regs, ".shared", ".f64", ".add",
- atomic_load_add_f64_s, f64imm, fpimm, [hasAtomAddF64]>;
+ atomic_load_add_s, f64imm, fpimm, [hasAtomAddF64]>;
defm INT_PTX_ATOM_ADD_GEN_F64 : F_ATOMIC_2<Float64Regs, "", ".f64", ".add",
- atomic_load_add_f64_gen, f64imm, fpimm, [hasAtomAddF64]>;
+ atomic_load_add_gen, f64imm, fpimm, [hasAtomAddF64]>;
// atom_sub
ret float %ret
}
+; CHECK-LABEL: atomicrmw_add_f32_generic
+define float @atomicrmw_add_f32_generic(float* %addr, float %val) {
+; CHECK: atom.add.f32
+ %ret = atomicrmw fadd float* %addr, float %val seq_cst
+ ret float %ret
+}
+
+; CHECK-LABEL: atomicrmw_add_f32_addrspace1
+define float @atomicrmw_add_f32_addrspace1(float addrspace(1)* %addr, float %val) {
+; CHECK: atom.global.add.f32
+ %ret = atomicrmw fadd float addrspace(1)* %addr, float %val seq_cst
+ ret float %ret
+}
+
+; CHECK-LABEL: atomicrmw_add_f32_addrspace3
+define float @atomicrmw_add_f32_addrspace3(float addrspace(3)* %addr, float %val) {
+; CHECK: atom.shared.add.f32
+ %ret = atomicrmw fadd float addrspace(3)* %addr, float %val seq_cst
+ ret float %ret
+}
+
; CHECK-LABEL: atomic_cmpxchg_i32
define i32 @atomic_cmpxchg_i32(i32* %addr, i32 %cmp, i32 %new) {
; CHECK: atom.cas.b32