]> granicus.if.org Git - zfs/commitdiff
Add atomic_sub_* functions to libspl.
authorEtienne Dechamps <etienne.dechamps@ovh.net>
Wed, 27 Jun 2012 08:26:49 +0000 (10:26 +0200)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Wed, 17 Oct 2012 15:56:37 +0000 (08:56 -0700)
Both the SPL and the ZFS libspl export most of the atomic_* functions,
except atomic_sub_* functions which are only exported by the SPL, not by
libspl. This patch remedies that by implementing atomic_sub_* functions
in libspl.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #1013

lib/libspl/asm-generic/atomic.c
lib/libspl/asm-i386/atomic.S
lib/libspl/asm-x86_64/atomic.S
lib/libspl/include/atomic.h

index de4430f9f3d1480f2478973517128333c749756c..a3223eadc21b211e32982f51fd171f309319f3af 100644 (file)
@@ -103,6 +103,31 @@ void atomic_add_ptr(volatile void *target, ssize_t bits)
 }
 
 
+#define ATOMIC_SUB(name, type1, type2) \
+       void atomic_sub_##name(volatile type1 *target, type2 bits)      \
+       {                                                               \
+               VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);      \
+               *target -= bits;                                        \
+               VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);    \
+       }
+
+ATOMIC_SUB(8, uint8_t, int8_t)
+ATOMIC_SUB(char, uchar_t, signed char)
+ATOMIC_SUB(16, uint16_t, int16_t)
+ATOMIC_SUB(short, ushort_t, short)
+ATOMIC_SUB(32, uint32_t, int32_t)
+ATOMIC_SUB(int, uint_t, int)
+ATOMIC_SUB(long, ulong_t, long)
+ATOMIC_SUB(64, uint64_t, int64_t)
+
+void atomic_sub_ptr(volatile void *target, ssize_t bits)
+{
+       VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
+       *(caddr_t *)target -= bits;
+       VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
+}
+
+
 #define ATOMIC_OR(name, type) \
        void atomic_or_##name(volatile type *target, type bits)         \
        {                                                               \
@@ -216,6 +241,37 @@ void *atomic_add_ptr_nv(volatile void *target, ssize_t bits)
 }
 
 
+#define ATOMIC_SUB_NV(name, type1, type2) \
+       type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits)\
+       {                                                               \
+               type1 rc;                                               \
+               VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);      \
+               rc = (*target -= bits);                                 \
+               VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);    \
+               return rc;                                              \
+       }
+
+ATOMIC_SUB_NV(8, uint8_t, int8_t)
+ATOMIC_SUB_NV(char, uchar_t, signed char)
+ATOMIC_SUB_NV(16, uint16_t, int16_t)
+ATOMIC_SUB_NV(short, ushort_t, short)
+ATOMIC_SUB_NV(32, uint32_t, int32_t)
+ATOMIC_SUB_NV(int, uint_t, int)
+ATOMIC_SUB_NV(long, ulong_t, long)
+ATOMIC_SUB_NV(64, uint64_t, int64_t)
+
+void *atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
+{
+       void *ptr;
+
+       VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
+       ptr = (*(caddr_t *)target -= bits);
+       VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
+
+       return ptr;
+}
+
+
 #define ATOMIC_OR_NV(name, type) \
        type atomic_or_##name##_nv(volatile type *target, type bits)    \
        {                                                               \
index 93c04bfb825cd8530ace2a792b3747f15edf51e9..d3d425090e1c266e097a71eb10289a3eb31388bb 100644 (file)
        SET_SIZE(atomic_add_int)
        SET_SIZE(atomic_add_32)
 
+       ENTRY(atomic_sub_8)
+       ALTENTRY(atomic_sub_char)
+       movl    4(%esp), %eax
+       movl    8(%esp), %ecx
+       lock
+       subb    %cl, (%eax)
+       ret
+       SET_SIZE(atomic_sub_char)
+       SET_SIZE(atomic_sub_8)
+
+       ENTRY(atomic_sub_16)
+       ALTENTRY(atomic_sub_short)
+       movl    4(%esp), %eax
+       movl    8(%esp), %ecx
+       lock
+       subw    %cx, (%eax)
+       ret
+       SET_SIZE(atomic_sub_short)
+       SET_SIZE(atomic_sub_16)
+
+       ENTRY(atomic_sub_32)
+       ALTENTRY(atomic_sub_int)
+       ALTENTRY(atomic_sub_ptr)
+       ALTENTRY(atomic_sub_long)
+       movl    4(%esp), %eax
+       movl    8(%esp), %ecx
+       lock
+       subl    %ecx, (%eax)
+       ret
+       SET_SIZE(atomic_sub_long)
+       SET_SIZE(atomic_sub_ptr)
+       SET_SIZE(atomic_sub_int)
+       SET_SIZE(atomic_sub_32)
+
        ENTRY(atomic_or_8)
        ALTENTRY(atomic_or_uchar)
        movl    4(%esp), %eax
        SET_SIZE(atomic_add_int_nv)
        SET_SIZE(atomic_add_32_nv)
 
+       ENTRY(atomic_sub_8_nv)
+       ALTENTRY(atomic_sub_char_nv)
+       movl    4(%esp), %edx
+       movb    (%edx), %al
+1:
+       movl    8(%esp), %ecx
+       subb    %al, %cl
+       lock
+       cmpxchgb %cl, (%edx)
+       jne     1b
+       movzbl  %cl, %eax
+       ret
+       SET_SIZE(atomic_sub_char_nv)
+       SET_SIZE(atomic_sub_8_nv)
+
+       ENTRY(atomic_sub_16_nv)
+       ALTENTRY(atomic_sub_short_nv)
+       movl    4(%esp), %edx
+       movw    (%edx), %ax
+1:
+       movl    8(%esp), %ecx
+       subw    %ax, %cx
+       lock
+       cmpxchgw %cx, (%edx)
+       jne     1b
+       movzwl  %cx, %eax
+       ret
+       SET_SIZE(atomic_sub_short_nv)
+       SET_SIZE(atomic_sub_16_nv)
+
+       ENTRY(atomic_sub_32_nv)
+       ALTENTRY(atomic_sub_int_nv)
+       ALTENTRY(atomic_sub_ptr_nv)
+       ALTENTRY(atomic_sub_long_nv)
+       movl    4(%esp), %edx
+       movl    (%edx), %eax
+1:
+       movl    8(%esp), %ecx
+       subl    %eax, %ecx
+       lock
+       cmpxchgl %ecx, (%edx)
+       jne     1b
+       movl    %ecx, %eax
+       ret
+       SET_SIZE(atomic_sub_long_nv)
+       SET_SIZE(atomic_sub_ptr_nv)
+       SET_SIZE(atomic_sub_int_nv)
+       SET_SIZE(atomic_sub_32_nv)
+
        /*
         * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
         * separated, it is important to edit the libc i386 platform
        SET_SIZE(atomic_add_64_nv)
        SET_SIZE(atomic_add_64)
 
+       ENTRY(atomic_sub_64)
+       ALTENTRY(atomic_sub_64_nv)
+       pushl   %edi
+       pushl   %ebx
+       movl    12(%esp), %edi
+       movl    (%edi), %eax
+       movl    4(%edi), %edx
+1:
+       movl    16(%esp), %ebx
+       movl    20(%esp), %ecx
+       subl    %eax, %ebx
+       adcl    %edx, %ecx
+       lock
+       cmpxchg8b (%edi)
+       jne     1b
+       movl    %ebx, %eax
+       movl    %ecx, %edx
+       popl    %ebx
+       popl    %edi
+       ret
+       SET_SIZE(atomic_sub_64_nv)
+       SET_SIZE(atomic_sub_64)
+
        ENTRY(atomic_or_8_nv)
        ALTENTRY(atomic_or_uchar_nv)
        movl    4(%esp), %edx
index e321bf7321702c880605f1d6f262e1f62abf0ff8..49c9b2ad153594db43b29591e1cf4daebca1814e 100644 (file)
        SET_SIZE(atomic_add_ptr)
        SET_SIZE(atomic_add_64)
 
+       ENTRY(atomic_sub_8)
+       ALTENTRY(atomic_sub_char)
+       lock
+       subb    %sil, (%rdi)
+       ret
+       SET_SIZE(atomic_sub_char)
+       SET_SIZE(atomic_sub_8)
+
+       ENTRY(atomic_sub_16)
+       ALTENTRY(atomic_sub_short)
+       lock
+       subw    %si, (%rdi)
+       ret
+       SET_SIZE(atomic_sub_short)
+       SET_SIZE(atomic_sub_16)
+
+       ENTRY(atomic_sub_32)
+       ALTENTRY(atomic_sub_int)
+       lock
+       subl    %esi, (%rdi)
+       ret
+       SET_SIZE(atomic_sub_int)
+       SET_SIZE(atomic_sub_32)
+
+       ENTRY(atomic_sub_64)
+       ALTENTRY(atomic_sub_ptr)
+       ALTENTRY(atomic_sub_long)
+       lock
+       subq    %rsi, (%rdi)
+       ret
+       SET_SIZE(atomic_sub_long)
+       SET_SIZE(atomic_sub_ptr)
+       SET_SIZE(atomic_sub_64)
+
        ENTRY(atomic_or_8)
        ALTENTRY(atomic_or_uchar)
        lock
        SET_SIZE(atomic_add_ptr_nv)
        SET_SIZE(atomic_add_64_nv)
 
+       ENTRY(atomic_sub_8_nv)
+       ALTENTRY(atomic_sub_char_nv)
+       movb    (%rdi), %al
+1:
+       movb    %sil, %cl
+       subb    %al, %cl
+       lock
+       cmpxchgb %cl, (%rdi)
+       jne     1b
+       movzbl  %cl, %eax
+       ret
+       SET_SIZE(atomic_sub_char_nv)
+       SET_SIZE(atomic_sub_8_nv)
+
+       ENTRY(atomic_sub_16_nv)
+       ALTENTRY(atomic_sub_short_nv)
+       movw    (%rdi), %ax
+1:
+       movw    %si, %cx
+       subw    %ax, %cx
+       lock
+       cmpxchgw %cx, (%rdi)
+       jne     1b
+       movzwl  %cx, %eax
+       ret
+       SET_SIZE(atomic_sub_short_nv)
+       SET_SIZE(atomic_sub_16_nv)
+
+       ENTRY(atomic_sub_32_nv)
+       ALTENTRY(atomic_sub_int_nv)
+       movl    (%rdi), %eax
+1:
+       movl    %esi, %ecx
+       subl    %eax, %ecx
+       lock
+       cmpxchgl %ecx, (%rdi)
+       jne     1b
+       movl    %ecx, %eax
+       ret
+       SET_SIZE(atomic_sub_int_nv)
+       SET_SIZE(atomic_sub_32_nv)
+
+       ENTRY(atomic_sub_64_nv)
+       ALTENTRY(atomic_sub_ptr_nv)
+       ALTENTRY(atomic_sub_long_nv)
+       movq    (%rdi), %rax
+1:
+       movq    %rsi, %rcx
+       subq    %rax, %rcx
+       lock
+       cmpxchgq %rcx, (%rdi)
+       jne     1b
+       movq    %rcx, %rax
+       ret
+       SET_SIZE(atomic_sub_long_nv)
+       SET_SIZE(atomic_sub_ptr_nv)
+       SET_SIZE(atomic_sub_64_nv)
+
        ENTRY(atomic_and_8_nv)
        ALTENTRY(atomic_and_uchar_nv)
        movb    (%rdi), %al
index 5080001528f8eb3b36e63a4672e0652d8909f55d..9b0775bb9ec752134a91140e41ab73d80b714438 100644 (file)
@@ -78,6 +78,21 @@ extern void atomic_add_long(volatile ulong_t *, long);
 extern void atomic_add_64(volatile uint64_t *, int64_t);
 #endif
 
+/*
+ * Substract delta from target
+ */
+extern void atomic_sub_8(volatile uint8_t *, int8_t);
+extern void atomic_sub_char(volatile uchar_t *, signed char);
+extern void atomic_sub_16(volatile uint16_t *, int16_t);
+extern void atomic_sub_short(volatile ushort_t *, short);
+extern void atomic_sub_32(volatile uint32_t *, int32_t);
+extern void atomic_sub_int(volatile uint_t *, int);
+extern void atomic_sub_ptr(volatile void *, ssize_t);
+extern void atomic_sub_long(volatile ulong_t *, long);
+#if defined(_INT64_TYPE)
+extern void atomic_sub_64(volatile uint64_t *, int64_t);
+#endif
+
 /*
  * logical OR bits with target
  */
@@ -157,6 +172,21 @@ extern ulong_t atomic_add_long_nv(volatile ulong_t *, long);
 extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t);
 #endif
 
+/*
+ * Substract delta from target
+ */
+extern uint8_t atomic_sub_8_nv(volatile uint8_t *, int8_t);
+extern uchar_t atomic_sub_char_nv(volatile uchar_t *, signed char);
+extern uint16_t atomic_sub_16_nv(volatile uint16_t *, int16_t);
+extern ushort_t atomic_sub_short_nv(volatile ushort_t *, short);
+extern uint32_t atomic_sub_32_nv(volatile uint32_t *, int32_t);
+extern uint_t atomic_sub_int_nv(volatile uint_t *, int);
+extern void *atomic_sub_ptr_nv(volatile void *, ssize_t);
+extern ulong_t atomic_sub_long_nv(volatile ulong_t *, long);
+#if defined(_INT64_TYPE)
+extern uint64_t atomic_sub_64_nv(volatile uint64_t *, int64_t);
+#endif
+
 /*
  * logical OR bits with target and return new value.
  */