]> granicus.if.org Git - spl/commitdiff
Add atomic_swap_32() and atomic_swap_64()
authorTim Chase <tim@chase2k.com>
Sat, 26 Jul 2014 04:45:26 +0000 (23:45 -0500)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 28 Jul 2014 21:19:24 +0000 (14:19 -0700)
The atomic_swap_32() function maps to atomic_xchg(), and
the atomic_swap_64() function maps to atomic64_xchg().

Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #377

include/sys/atomic.h

index 31d35eb1437d9ff5c9c6d8b7fb3777b01a905dce..e034f2e2c836e55a24b16547e426266175ec85b5 100644 (file)
@@ -156,6 +156,19 @@ atomic_cas_32(volatile uint32_t *target,  uint32_t cmp,
        return rc;
 }
 
+static __inline__ uint32_t
+atomic_swap_32(volatile uint32_t *target,  uint32_t newval)
+{
+       uint32_t rc;
+
+       spin_lock(&atomic32_lock);
+       rc = *target;
+       *target = newval;
+       spin_unlock(&atomic32_lock);
+
+       return rc;
+}
+
 static __inline__ void
 atomic_inc_64(volatile uint64_t *target)
 {
@@ -253,6 +266,18 @@ atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
        return rc;
 }
 
+static __inline__ uint64_t
+atomic_swap_64(volatile uint64_t *target,  uint64_t newval)
+{
+       uint64_t rc;
+
+       spin_lock(&atomic64_lock);
+       rc = *target;
+       *target = newval;
+       spin_unlock(&atomic64_lock);
+
+       return rc;
+}
 
 #else /* ATOMIC_SPINLOCK */
 
@@ -265,6 +290,7 @@ atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
 #define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
 #define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
 #define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
+#define atomic_swap_32(v, x)   atomic_xchg((atomic_t *)(v), x)
 #define atomic_inc_64(v)       atomic64_inc((atomic64_t *)(v))
 #define atomic_dec_64(v)       atomic64_dec((atomic64_t *)(v))
 #define atomic_add_64(v, i)    atomic64_add((i), (atomic64_t *)(v))
@@ -274,6 +300,7 @@ atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
 #define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
 #define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
 #define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
+#define atomic_swap_64(v, x)   atomic64_xchg((atomic64_t *)(v), x)
 
 #endif /* ATOMIC_SPINLOCK */