]> granicus.if.org Git - spl/commitdiff
atomic_*_*_nv() functions need to return the new value atomically.
authorRicardo M. Correia <ricardo.correia@oracle.com>
Fri, 17 Sep 2010 23:03:15 +0000 (16:03 -0700)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 17 Sep 2010 23:03:25 +0000 (16:03 -0700)
A local variable must be used for the return value to avoid a
potential race once the spin lock is dropped.

Signed-off-by: Ricardo M. Correia <ricardo.correia@oracle.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
include/sys/atomic.h

index 9348ab97e3fdfd20e0bdffec6ec7eb496ab5e2bb..1d11738941b41d8c8b3b6d0516aa93e710a58957 100644 (file)
@@ -93,41 +93,51 @@ atomic_sub_32(volatile uint32_t *target, int32_t delta)
 static __inline__ uint32_t
 atomic_inc_32_nv(volatile uint32_t *target)
 {
+       uint32_t nv;
+
        spin_lock(&atomic32_lock);
-       (*target)++;
+       nv = ++(*target);
        spin_unlock(&atomic32_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint32_t
 atomic_dec_32_nv(volatile uint32_t *target)
 {
+       uint32_t nv;
+
        spin_lock(&atomic32_lock);
-       (*target)--;
+       nv = --(*target);
        spin_unlock(&atomic32_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint32_t
 atomic_add_32_nv(volatile uint32_t *target, uint32_t delta)
 {
+       uint32_t nv;
+
        spin_lock(&atomic32_lock);
        *target += delta;
+       nv = *target;
        spin_unlock(&atomic32_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint32_t
 atomic_sub_32_nv(volatile uint32_t *target, uint32_t delta)
 {
+       uint32_t nv;
+
        spin_lock(&atomic32_lock);
        *target -= delta;
+       nv = *target;
        spin_unlock(&atomic32_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint32_t
@@ -181,41 +191,51 @@ atomic_sub_64(volatile uint64_t *target, uint64_t delta)
 static __inline__ uint64_t
 atomic_inc_64_nv(volatile uint64_t *target)
 {
+       uint64_t nv;
+
        spin_lock(&atomic64_lock);
-       (*target)++;
+       nv = ++(*target);
        spin_unlock(&atomic64_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint64_t
 atomic_dec_64_nv(volatile uint64_t *target)
 {
+       uint64_t nv;
+
        spin_lock(&atomic64_lock);
-       (*target)--;
+       nv = --(*target);
        spin_unlock(&atomic64_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint64_t
 atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
 {
+       uint64_t nv;
+
        spin_lock(&atomic64_lock);
        *target += delta;
+       nv = *target;
        spin_unlock(&atomic64_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint64_t
 atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
 {
+       uint64_t nv;
+
        spin_lock(&atomic64_lock);
        *target -= delta;
+       nv = *target;
        spin_unlock(&atomic64_lock);
 
-       return *target;
+       return nv;
 }
 
 static __inline__ uint64_t