]> granicus.if.org Git - postgresql/commitdiff
For all ppc compilers, implement pg_atomic_fetch_add_ with inline asm.
authorNoah Misch <noah@leadboat.com>
Sat, 14 Sep 2019 02:34:30 +0000 (19:34 -0700)
committerNoah Misch <noah@leadboat.com>
Sat, 14 Sep 2019 02:34:30 +0000 (19:34 -0700)
This is more like how we handle s_lock.h and arch-x86.h.  This does not
materially affect code generation for gcc 7.2.0 or xlc 13.1.3.

Reviewed by Tom Lane.

Discussion: https://postgr.es/m/20190831071157.GA3251746@rfd.leadboat.com

configure
configure.in
src/include/pg_config.h.in
src/include/port/atomics/arch-ppc.h
src/include/port/atomics/generic-xlc.h

index b3c92764be83917762e648c473870051ddd4acea..731d12c6acbad04e7aa770d927c942c313d8e686 100755 (executable)
--- a/configure
+++ b/configure
@@ -14593,6 +14593,46 @@ $as_echo "$pgac_cv_have_ppc_mutex_hint" >&6; }
 
 $as_echo "#define HAVE_PPC_LWARX_MUTEX_HINT 1" >>confdefs.h
 
+    fi
+    # Check if compiler accepts "i"(x) when __builtin_constant_p(x).
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __builtin_constant_p(x) implies \"i\"(x) acceptance" >&5
+$as_echo_n "checking whether __builtin_constant_p(x) implies \"i\"(x) acceptance... " >&6; }
+if ${pgac_cv_have_i_constraint__builtin_constant_p+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+static inline int
+     addi(int ra, int si)
+     {
+         int res = 0;
+         if (__builtin_constant_p(si))
+             __asm__ __volatile__(
+                 " addi %0,%1,%2\n" : "=r"(res) : "r"(ra), "i"(si));
+         return res;
+     }
+     int test_adds(int x) { return addi(3, x) + addi(x, 5); }
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  pgac_cv_have_i_constraint__builtin_constant_p=yes
+else
+  pgac_cv_have_i_constraint__builtin_constant_p=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_have_i_constraint__builtin_constant_p" >&5
+$as_echo "$pgac_cv_have_i_constraint__builtin_constant_p" >&6; }
+    if test x"$pgac_cv_have_i_constraint__builtin_constant_p" = xyes ; then
+
+$as_echo "#define HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P 1" >>confdefs.h
+
     fi
   ;;
 esac
index 0d16c1a9711e9ae1d44dec13b286eb928a992295..9d0e24f46bed1e309a818be07b624ce2191c4759 100644 (file)
@@ -1539,6 +1539,26 @@ case $host_cpu in
     if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then
        AC_DEFINE(HAVE_PPC_LWARX_MUTEX_HINT, 1, [Define to 1 if the assembler supports PPC's LWARX mutex hint bit.])
     fi
+    # Check if compiler accepts "i"(x) when __builtin_constant_p(x).
+    AC_CACHE_CHECK([whether __builtin_constant_p(x) implies "i"(x) acceptance],
+                   [pgac_cv_have_i_constraint__builtin_constant_p],
+    [AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+    [static inline int
+     addi(int ra, int si)
+     {
+         int res = 0;
+         if (__builtin_constant_p(si))
+             __asm__ __volatile__(
+                 " addi %0,%1,%2\n" : "=r"(res) : "r"(ra), "i"(si));
+         return res;
+     }
+     int test_adds(int x) { return addi(3, x) + addi(x, 5); }], [])],
+    [pgac_cv_have_i_constraint__builtin_constant_p=yes],
+    [pgac_cv_have_i_constraint__builtin_constant_p=no])])
+    if test x"$pgac_cv_have_i_constraint__builtin_constant_p" = xyes ; then
+      AC_DEFINE(HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P, 1,
+                [Define to 1 if __builtin_constant_p(x) implies "i"(x) acceptance.])
+    fi
   ;;
 esac
 
index c6014e83fa89ac11f6586af6eee217a3d67a928b..509cc92b989f585fcca1f99556455275f9b5cfad 100644 (file)
 /* Define to 1 if you have isinf(). */
 #undef HAVE_ISINF
 
+/* Define to 1 if __builtin_constant_p(x) implies "i"(x) acceptance. */
+#undef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
+
 /* Define to 1 if you have the <langinfo.h> header file. */
 #undef HAVE_LANGINFO_H
 
index 344b39449bd0d00358d23b1a448e080b676eafae..35d602e618f2bfd32d172a87d6d4b496b629b871 100644 (file)
 #define pg_write_barrier_impl()                __asm__ __volatile__ ("lwsync" : : : "memory")
 #endif
 
+#define PG_HAVE_ATOMIC_U32_SUPPORT
+typedef struct pg_atomic_uint32
+{
+       volatile uint32 value;
+} pg_atomic_uint32;
+
+/* 64bit atomics are only supported in 64bit mode */
+#ifdef __64BIT__
+#define PG_HAVE_ATOMIC_U64_SUPPORT
+typedef struct pg_atomic_uint64
+{
+       volatile uint64 value pg_attribute_aligned(8);
+} pg_atomic_uint64;
+
+#endif /* __64BIT__ */
+
+#define PG_HAVE_ATOMIC_FETCH_ADD_U32
+static inline uint32
+pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
+{
+       uint32 _t;
+       uint32 res;
+
+       /*
+        * xlc has a no-longer-documented __fetch_and_add() intrinsic.  In xlc
+        * 12.01.0000.0000, it emits a leading "sync" and trailing "isync".  In
+        * xlc 13.01.0003.0004, it emits neither.  Hence, using the intrinsic
+        * would add redundant syncs on xlc 12.
+        */
+#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
+       if (__builtin_constant_p(add_) &&
+               add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN)
+               __asm__ __volatile__(
+                       "       sync                            \n"
+                       "       lwarx   %1,0,%4         \n"
+                       "       addi    %0,%1,%3        \n"
+                       "       stwcx.  %0,0,%4         \n"
+                       "       bne     $-12            \n"             /* branch to lwarx */
+                       "       isync                           \n"
+:                      "=&r"(_t), "=&r"(res), "+m"(ptr->value)
+:                      "i"(add_), "r"(&ptr->value)
+:                      "memory", "cc");
+       else
+#endif
+               __asm__ __volatile__(
+                       "       sync                            \n"
+                       "       lwarx   %1,0,%4         \n"
+                       "       add     %0,%1,%3        \n"
+                       "       stwcx.  %0,0,%4         \n"
+                       "       bne     $-12            \n"             /* branch to lwarx */
+                       "       isync                           \n"
+:                      "=&r"(_t), "=&r"(res), "+m"(ptr->value)
+:                      "r"(add_), "r"(&ptr->value)
+:                      "memory", "cc");
+
+       return res;
+}
+
+#ifdef PG_HAVE_ATOMIC_U64_SUPPORT
+#define PG_HAVE_ATOMIC_FETCH_ADD_U64
+static inline uint64
+pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
+{
+       uint64 _t;
+       uint64 res;
+
+       /* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/ */
+#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
+       if (__builtin_constant_p(add_) &&
+               add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN)
+               __asm__ __volatile__(
+                       "       sync                            \n"
+                       "       ldarx   %1,0,%4         \n"
+                       "       addi    %0,%1,%3        \n"
+                       "       stdcx.  %0,0,%4         \n"
+                       "       bne     $-12            \n"             /* branch to ldarx */
+                       "       isync                           \n"
+:                      "=&r"(_t), "=&r"(res), "+m"(ptr->value)
+:                      "i"(add_), "r"(&ptr->value)
+:                      "memory", "cc");
+       else
+#endif
+               __asm__ __volatile__(
+                       "       sync                            \n"
+                       "       ldarx   %1,0,%4         \n"
+                       "       add     %0,%1,%3        \n"
+                       "       stdcx.  %0,0,%4         \n"
+                       "       bne     $-12            \n"             /* branch to ldarx */
+                       "       isync                           \n"
+:                      "=&r"(_t), "=&r"(res), "+m"(ptr->value)
+:                      "r"(add_), "r"(&ptr->value)
+:                      "memory", "cc");
+
+       return res;
+}
+
+#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+
 /* per architecture manual doubleword accesses have single copy atomicity */
 #define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY
index 8b5c7329706a2a19345a763f0211d4f44fcc93e6..8330b454953f991999ad3a65d8859df915a04de4 100644 (file)
 
 #if defined(HAVE_ATOMICS)
 
-#define PG_HAVE_ATOMIC_U32_SUPPORT
-typedef struct pg_atomic_uint32
-{
-       volatile uint32 value;
-} pg_atomic_uint32;
-
-
-/* 64bit atomics are only supported in 64bit mode */
-#ifdef __64BIT__
-#define PG_HAVE_ATOMIC_U64_SUPPORT
-typedef struct pg_atomic_uint64
-{
-       volatile uint64 value pg_attribute_aligned(8);
-} pg_atomic_uint64;
-
-#endif /* __64BIT__ */
-
 #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
 static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
@@ -69,33 +52,6 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
        return ret;
 }
 
-#define PG_HAVE_ATOMIC_FETCH_ADD_U32
-static inline uint32
-pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
-{
-       uint32 _t;
-       uint32 res;
-
-       /*
-        * xlc has a no-longer-documented __fetch_and_add() intrinsic.  In xlc
-        * 12.01.0000.0000, it emits a leading "sync" and trailing "isync".  In
-        * xlc 13.01.0003.0004, it emits neither.  Hence, using the intrinsic
-        * would add redundant syncs on xlc 12.
-        */
-       __asm__ __volatile__(
-               "       sync                            \n"
-               "       lwarx   %1,0,%4         \n"
-               "       add     %0,%1,%3        \n"
-               "       stwcx.  %0,0,%4         \n"
-               "       bne     $-12            \n"             /* branch to lwarx */
-               "       isync                           \n"
-:              "=&r"(_t), "=&r"(res), "+m"(ptr->value)
-:              "r"(add_), "r"(&ptr->value)
-:              "memory", "cc");
-
-       return res;
-}
-
 #ifdef PG_HAVE_ATOMIC_U64_SUPPORT
 
 #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
@@ -115,28 +71,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
        return ret;
 }
 
-#define PG_HAVE_ATOMIC_FETCH_ADD_U64
-static inline uint64
-pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
-{
-       uint64 _t;
-       uint64 res;
-
-       /* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/ */
-       __asm__ __volatile__(
-               "       sync                            \n"
-               "       ldarx   %1,0,%4         \n"
-               "       add     %0,%1,%3        \n"
-               "       stdcx.  %0,0,%4         \n"
-               "       bne     $-12            \n"             /* branch to ldarx */
-               "       isync                           \n"
-:              "=&r"(_t), "=&r"(res), "+m"(ptr->value)
-:              "r"(add_), "r"(&ptr->value)
-:              "memory", "cc");
-
-       return res;
-}
-
 #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
 
 #endif /* defined(HAVE_ATOMICS) */