From: Tim Northover Date: Wed, 9 Jul 2014 09:24:43 +0000 (+0000) Subject: ARM: use LLVM's atomicrmw instructions when ldrex/strex are available. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=e25cedce46ef7521f6dd62c437d5798631aed617;p=clang ARM: use LLVM's atomicrmw instructions when ldrex/strex are available. Having some kind of weird kernel-assisted ABI for these when the native instructions are available appears to be (and should be) the exception; OSs have been gradually opting in for years and the code was getting silly. So let LLVM decide whether it's possible/profitable to inline them by default. Patch by Phoebe Buckheister. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@212598 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp index a28436cfe0..f048fa8227 100644 --- a/lib/Basic/Targets.cpp +++ b/lib/Basic/Targets.cpp @@ -3475,27 +3475,14 @@ class ARMTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; static bool shouldUseInlineAtomic(const llvm::Triple &T) { - if (T.isOSWindows()) - return true; - - // On linux, binaries targeting old cpus call functions in libgcc to - // perform atomic operations. The implementation in libgcc then calls into - // the kernel which on armv6 and newer uses ldrex and strex. The net result - // is that if we assume the kernel is at least as recent as the hardware, - // it is safe to use atomic instructions on armv6 and newer. - if (!T.isOSLinux() && - T.getOS() != llvm::Triple::FreeBSD && - T.getOS() != llvm::Triple::NetBSD && - T.getOS() != llvm::Triple::Bitrig) - return false; StringRef ArchName = T.getArchName(); if (T.getArch() == llvm::Triple::arm || T.getArch() == llvm::Triple::armeb) { StringRef VersionStr; if (ArchName.startswith("armv")) - VersionStr = ArchName.substr(4); + VersionStr = ArchName.substr(4, 1); else if (ArchName.startswith("armebv")) - VersionStr = ArchName.substr(6); + VersionStr = ArchName.substr(6, 1); else return false; unsigned Version; @@ -3507,9 +3494,9 @@ class ARMTargetInfo : public TargetInfo { T.getArch() == llvm::Triple::thumbeb); StringRef VersionStr; if (ArchName.startswith("thumbv")) - VersionStr = ArchName.substr(6); + VersionStr = ArchName.substr(6, 1); else if (ArchName.startswith("thumbebv")) - VersionStr = ArchName.substr(8); + VersionStr = ArchName.substr(8, 1); else return false; unsigned Version; @@ -3854,6 +3841,13 @@ public: if (!getCPUDefineSuffix(Name)) return false; + // Cortex M does not support 8 byte atomics, while general Thumb2 does. + StringRef Profile = getCPUProfile(Name); + if (Profile == "M" && MaxAtomicInlineWidth) { + MaxAtomicPromoteWidth = 32; + MaxAtomicInlineWidth = 32; + } + CPU = Name; return true; } diff --git a/test/CodeGen/arm-atomics-m.c b/test/CodeGen/arm-atomics-m.c new file mode 100644 index 0000000000..51e2d1d9eb --- /dev/null +++ b/test/CodeGen/arm-atomics-m.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 %s -emit-llvm -o - -triple=thumbv7m-none--eabi -target-cpu cortex-m3 | FileCheck %s + +int i; +long long l; + +typedef enum memory_order { + memory_order_relaxed, memory_order_consume, memory_order_acquire, + memory_order_release, memory_order_acq_rel, memory_order_seq_cst +} memory_order; + +void test_presence(void) +{ + // CHECK-LABEL: @test_presence + // CHECK: atomicrmw add i32* {{.*}} seq_cst + __atomic_fetch_add(&i, 1, memory_order_seq_cst); + // CHECK: atomicrmw sub i32* {{.*}} seq_cst + __atomic_fetch_sub(&i, 1, memory_order_seq_cst); + // CHECK: load atomic i32* {{.*}} seq_cst + int r; + __atomic_load(&i, &r, memory_order_seq_cst); + // CHECK: store atomic i32 {{.*}} seq_cst + r = 0; + __atomic_store(&i, &r, memory_order_seq_cst); + + // CHECK: __atomic_fetch_add_8 + __atomic_fetch_add(&l, 1, memory_order_seq_cst); + // CHECK: __atomic_fetch_sub_8 + __atomic_fetch_sub(&l, 1, memory_order_seq_cst); + // CHECK: __atomic_load_8 + long long rl; + __atomic_load(&l, &rl, memory_order_seq_cst); + // CHECK: __atomic_store_8 + rl = 0; + __atomic_store(&l, &rl, memory_order_seq_cst); +} diff --git a/test/CodeGen/arm-atomics-m0.c b/test/CodeGen/arm-atomics-m0.c new file mode 100644 index 0000000000..335a1d2711 --- /dev/null +++ b/test/CodeGen/arm-atomics-m0.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 %s -emit-llvm -o - -triple=thumbv6m-none--eabi -target-cpu cortex-m0 | FileCheck %s + +int i; +long long l; + +typedef enum memory_order { + memory_order_relaxed, memory_order_consume, memory_order_acquire, + memory_order_release, memory_order_acq_rel, memory_order_seq_cst +} memory_order; + +void test_presence(void) +{ + // CHECK-LABEL: @test_presence + // CHECK: __atomic_fetch_add_4 + __atomic_fetch_add(&i, 1, memory_order_seq_cst); + // CHECK: __atomic_fetch_sub_4 + __atomic_fetch_sub(&i, 1, memory_order_seq_cst); + // CHECK: __atomic_load_4 + int r; + __atomic_load(&i, &r, memory_order_seq_cst); + // CHECK: __atomic_store_4 + r = 0; + __atomic_store(&i, &r, memory_order_seq_cst); + + // CHECK: __atomic_fetch_add_8 + __atomic_fetch_add(&l, 1, memory_order_seq_cst); + // CHECK: __atomic_fetch_sub_8 + __atomic_fetch_sub(&l, 1, memory_order_seq_cst); + // CHECK: __atomic_load_8 + long long rl; + __atomic_load(&l, &rl, memory_order_seq_cst); + // CHECK: __atomic_store_8 + rl = 0; + __atomic_store(&l, &rl, memory_order_seq_cst); +} diff --git a/test/CodeGen/arm-atomics.c b/test/CodeGen/arm-atomics.c new file mode 100644 index 0000000000..b54e277120 --- /dev/null +++ b/test/CodeGen/arm-atomics.c @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 %s -emit-llvm -o - -triple=thumbv7-none--eabi | FileCheck %s +// RUN: %clang_cc1 %s -emit-llvm -o - -triple=armv6-none--eabi | FileCheck %s +// RUN: %clang_cc1 %s -emit-llvm -o - -triple=armv7-unknown-openbsd | FileCheck %s + +int i; +long long l; + +typedef enum memory_order { + memory_order_relaxed, memory_order_consume, memory_order_acquire, + memory_order_release, memory_order_acq_rel, memory_order_seq_cst +} memory_order; + +void test_presence(void) +{ + // CHECK-LABEL: @test_presence + // CHECK: atomicrmw add i32* {{.*}} seq_cst + __atomic_fetch_add(&i, 1, memory_order_seq_cst); + // CHECK: atomicrmw sub i32* {{.*}} seq_cst + __atomic_fetch_sub(&i, 1, memory_order_seq_cst); + // CHECK: load atomic i32* {{.*}} seq_cst + int r; + __atomic_load(&i, &r, memory_order_seq_cst); + // CHECK: store atomic i32 {{.*}} seq_cst + r = 0; + __atomic_store(&i, &r, memory_order_seq_cst); + + // CHECK: atomicrmw add i64* {{.*}} seq_cst + __atomic_fetch_add(&l, 1, memory_order_seq_cst); + // CHECK: atomicrmw sub i64* {{.*}} seq_cst + __atomic_fetch_sub(&l, 1, memory_order_seq_cst); + // CHECK: load atomic i64* {{.*}} seq_cst + long long rl; + __atomic_load(&l, &rl, memory_order_seq_cst); + // CHECK: store atomic i64 {{.*}} seq_cst + rl = 0; + __atomic_store(&l, &rl, memory_order_seq_cst); +}