]> granicus.if.org Git - postgresql/commitdiff
Further marginal hacking on generic atomic ops.
authorTom Lane <tgl@sss.pgh.pa.us>
Thu, 7 Sep 2017 12:50:01 +0000 (08:50 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Thu, 7 Sep 2017 12:50:01 +0000 (08:50 -0400)
In the generic atomic ops that rely on a loop around a CAS primitive,
there's no need to force the initial read of the "old" value to be atomic.
In the typically-rare case that we get a torn value, that simply means
that the first CAS attempt will fail; but it will update "old" to the
atomically-read value, so the next attempt has a chance of succeeding.
It was already being done that way in pg_atomic_exchange_u64_impl(),
but let's duplicate the approach in the rest.

(Given the current coding of the pg_atomic_read functions, this change
is a no-op anyway on popular platforms; it only makes a difference where
pg_atomic_read_u64_impl() is implemented as a CAS.)

In passing, also remove unnecessary take-a-pointer-and-dereference-it
coding in the pg_atomic_read functions.  That seems to have been based
on a misunderstanding of what the C standard requires.  What actually
matters is that the pointer be declared as pointing to volatile, which
it is.

I don't believe this will change the assembly code at all on x86
platforms (even ignoring the likelihood that these implementations
get overridden by others); but it may help on less-mainstream CPUs.

Discussion: https://postgr.es/m/13707.1504718238@sss.pgh.pa.us

src/include/port/atomics/generic.h

index 75ffaf6e874891518fb71edc386e1470ae4e6267..c7566919deec1a527d014eb0669aaed2d268de19 100644 (file)
@@ -45,7 +45,7 @@ typedef pg_atomic_uint32 pg_atomic_flag;
 static inline uint32
 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
 {
-       return *(&ptr->value);
+       return ptr->value;
 }
 #endif
 
@@ -170,7 +170,7 @@ static inline uint32
 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
 {
        uint32 old;
-       old = pg_atomic_read_u32_impl(ptr);
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_))
                /* skip */;
        return old;
@@ -183,7 +183,7 @@ static inline uint32
 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
 {
        uint32 old;
-       old = pg_atomic_read_u32_impl(ptr);
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_))
                /* skip */;
        return old;
@@ -205,7 +205,7 @@ static inline uint32
 pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
 {
        uint32 old;
-       old = pg_atomic_read_u32_impl(ptr);
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_))
                /* skip */;
        return old;
@@ -218,7 +218,7 @@ static inline uint32
 pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
 {
        uint32 old;
-       old = pg_atomic_read_u32_impl(ptr);
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_))
                /* skip */;
        return old;
@@ -249,7 +249,7 @@ static inline uint64
 pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
 {
        uint64 old;
-       old = ptr->value;
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_))
                /* skip */;
        return old;
@@ -299,12 +299,10 @@ static inline uint64
 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
 {
        /*
-        * On this platform aligned 64bit reads are guaranteed to be atomic,
-        * except if using the fallback implementation, where can't guarantee the
-        * required alignment.
+        * On this platform aligned 64-bit reads are guaranteed to be atomic.
         */
        AssertPointerAlignment(ptr, 8);
-       return *(&ptr->value);
+       return ptr->value;
 }
 
 #else
@@ -315,10 +313,10 @@ pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
        uint64 old = 0;
 
        /*
-        * 64 bit reads aren't safe on all platforms. In the generic
+        * 64-bit reads aren't atomic on all platforms. In the generic
         * implementation implement them as a compare/exchange with 0. That'll
-        * fail or succeed, but always return the old value. Possible might store
-        * a 0, but only if the prev. value also was a 0 - i.e. harmless.
+        * fail or succeed, but always return the old value. Possibly might store
+        * a 0, but only if the previous value also was a 0 - i.e. harmless.
         */
        pg_atomic_compare_exchange_u64_impl(ptr, &old, 0);
 
@@ -342,7 +340,7 @@ static inline uint64
 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 {
        uint64 old;
-       old = pg_atomic_read_u64_impl(ptr);
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_))
                /* skip */;
        return old;
@@ -364,7 +362,7 @@ static inline uint64
 pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
 {
        uint64 old;
-       old = pg_atomic_read_u64_impl(ptr);
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_))
                /* skip */;
        return old;
@@ -377,7 +375,7 @@ static inline uint64
 pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
 {
        uint64 old;
-       old = pg_atomic_read_u64_impl(ptr);
+       old = ptr->value;                       /* ok if read is not atomic */
        while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_))
                /* skip */;
        return old;