return rc;
}
+static __inline__ uint32_t
+atomic_swap_32(volatile uint32_t *target, uint32_t newval)
+{
+ uint32_t rc;
+
+ spin_lock(&atomic32_lock);
+ rc = *target;
+ *target = newval;
+ spin_unlock(&atomic32_lock);
+
+ return rc;
+}
+
static __inline__ void
atomic_inc_64(volatile uint64_t *target)
{
return rc;
}
+static __inline__ uint64_t
+atomic_swap_64(volatile uint64_t *target, uint64_t newval)
+{
+ uint64_t rc;
+
+ spin_lock(&atomic64_lock);
+ rc = *target;
+ *target = newval;
+ spin_unlock(&atomic64_lock);
+
+ return rc;
+}
#else /* ATOMIC_SPINLOCK */
#define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
#define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
#define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
+#define atomic_swap_32(v, x) atomic_xchg((atomic_t *)(v), x)
#define atomic_inc_64(v) atomic64_inc((atomic64_t *)(v))
#define atomic_dec_64(v) atomic64_dec((atomic64_t *)(v))
#define atomic_add_64(v, i) atomic64_add((i), (atomic64_t *)(v))
#define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
#define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
#define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
+#define atomic_swap_64(v, x) atomic64_xchg((atomic64_t *)(v), x)
#endif /* ATOMIC_SPINLOCK */