From 0ef22fb32dcf4c9d4c8ab719b8ae08e34982f6d2 Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Fri, 4 Jan 2013 21:27:42 +0400 Subject: [PATCH] Generate 'loadstore' headers from templates * src/Makefile.am (EXTRA_DIST): Add acquire_release_volatile.template, aligned_atomic_load_store.template, atomic_load_store.template entires. * src/Makefile.am (BUILT_SOURCES): Add acquire_release_volatile.h, aligned_atomic_load_store.h, atomic_load_store.h, char_acquire_release_volatile.h, char_atomic_load_store.h, int_acquire_release_volatile.h, int_aligned_atomic_load_store.h, int_atomic_load_store.h, short_acquire_release_volatile.h, short_aligned_atomic_load_store.h, short_atomic_load_store.h entires. * src/Makefile.am (atomic_ops/sysdeps/loadstore/acquire_release_volatile.h, atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h, atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h, atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h, atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h, atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h, atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h, atomic_ops/sysdeps/loadstore/atomic_load_store.h, atomic_ops/sysdeps/loadstore/char_atomic_load_store.h, atomic_ops/sysdeps/loadstore/int_atomic_load_store.h, atomic_ops/sysdeps/loadstore/short_atomic_load_store.h): New rules (to generate header from the corresponding template). * src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.template: New template header (code copied from the corresponding header of "loadstore" folder and parameterized with XSIZE and XCTYPE); update header comment. * src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template: Likewise. * src/atomic_ops/sysdeps/loadstore/atomic_load_store.template: Likewise. * src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.template (AO_GCC_BARRIER): Add TODO item. * src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.h: Regenerate. * src/atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h: Likewise. * src/atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h: Likewise. * src/atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h: Likewise. * src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/atomic_load_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/char_atomic_load_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/int_atomic_load_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/short_atomic_load_store.h: Likewise. --- src/Makefile.am | 67 +++++++++++++++++-- .../loadstore/acquire_release_volatile.h | 51 +++++++------- .../acquire_release_volatile.template | 61 +++++++++++++++++ .../loadstore/aligned_atomic_load_store.h | 14 ++-- .../aligned_atomic_load_store.template | 42 ++++++++++++ .../sysdeps/loadstore/atomic_load_store.h | 10 +-- .../loadstore/atomic_load_store.template | 40 +++++++++++ .../loadstore/char_acquire_release_volatile.h | 44 +++++++----- .../loadstore/char_atomic_load_store.h | 14 ++-- .../loadstore/int_acquire_release_volatile.h | 44 +++++++----- .../loadstore/int_aligned_atomic_load_store.h | 18 ++--- .../sysdeps/loadstore/int_atomic_load_store.h | 14 ++-- .../short_acquire_release_volatile.h | 44 +++++++----- .../short_aligned_atomic_load_store.h | 20 +++--- .../loadstore/short_atomic_load_store.h | 16 ++--- 15 files changed, 360 insertions(+), 139 deletions(-) create mode 100644 src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.template create mode 100644 src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template create mode 100644 src/atomic_ops/sysdeps/loadstore/atomic_load_store.template diff --git a/src/Makefile.am b/src/Makefile.am index 9ff8e73..bc3d5ed 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -1,7 +1,7 @@ AM_CFLAGS=@PICFLAG@ AM_CPPFLAGS = -I$(top_builddir)/src -I$(top_srcdir)/src -include_HEADERS=atomic_ops.h atomic_ops_stack.h atomic_ops_malloc.h +include_HEADERS = atomic_ops.h atomic_ops_stack.h atomic_ops_malloc.h lib_LTLIBRARIES = libatomic_ops.la libatomic_ops_gpl.la if NEED_ASM libatomic_ops_la_SOURCES = atomic_ops.c atomic_ops_sysdeps.S @@ -13,12 +13,27 @@ libatomic_ops_la_LDFLAGS = -version-info 1:3:0 -no-undefined libatomic_ops_gpl_la_SOURCES = atomic_ops_stack.c atomic_ops_malloc.c libatomic_ops_gpl_la_LDFLAGS = -version-info 1:3:0 -no-undefined -EXTRA_DIST=Makefile.msft atomic_ops/sysdeps/README \ +EXTRA_DIST = Makefile.msft atomic_ops/sysdeps/README \ atomic_ops/generalize-arithm.template \ - atomic_ops/generalize-small.template atomic_ops/sysdeps/sunc/sparc.S + atomic_ops/generalize-small.template \ + atomic_ops/sysdeps/loadstore/acquire_release_volatile.template \ + atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template \ + atomic_ops/sysdeps/loadstore/atomic_load_store.template \ + atomic_ops/sysdeps/sunc/sparc.S BUILT_SOURCES = atomic_ops/generalize-arithm.h \ - atomic_ops/generalize-small.h + atomic_ops/generalize-small.h \ + atomic_ops/sysdeps/loadstore/acquire_release_volatile.h \ + atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h \ + atomic_ops/sysdeps/loadstore/atomic_load_store.h \ + atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h \ + atomic_ops/sysdeps/loadstore/char_atomic_load_store.h \ + atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h \ + atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h \ + atomic_ops/sysdeps/loadstore/int_atomic_load_store.h \ + atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h \ + atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h \ + atomic_ops/sysdeps/loadstore/short_atomic_load_store.h #Private Headers privatedir=${includedir}/ @@ -95,3 +110,47 @@ atomic_ops/generalize-arithm.h: atomic_ops/generalize-arithm.template sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ + +atomic_ops/sysdeps/loadstore/acquire_release_volatile.h: \ + atomic_ops/sysdeps/loadstore/acquire_release_volatile.template + sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@ + +atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h: \ + atomic_ops/sysdeps/loadstore/acquire_release_volatile.template + sed -e s:XSIZE_:char_:g -e s:XCTYPE:unsigned/**/char:g $? > $@ + +atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h: \ + atomic_ops/sysdeps/loadstore/acquire_release_volatile.template + sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? > $@ + +atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h: \ + atomic_ops/sysdeps/loadstore/acquire_release_volatile.template + sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? > $@ + +atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h: \ + atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template + sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@ + +atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h: \ + atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template + sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? > $@ + +atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h: \ + atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template + sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? > $@ + +atomic_ops/sysdeps/loadstore/atomic_load_store.h: \ + atomic_ops/sysdeps/loadstore/atomic_load_store.template + sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@ + +atomic_ops/sysdeps/loadstore/char_atomic_load_store.h: \ + atomic_ops/sysdeps/loadstore/atomic_load_store.template + sed -e s:XSIZE_:char_:g -e s:XCTYPE:unsigned/**/char:g $? > $@ + +atomic_ops/sysdeps/loadstore/int_atomic_load_store.h: \ + atomic_ops/sysdeps/loadstore/atomic_load_store.template + sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? > $@ + +atomic_ops/sysdeps/loadstore/short_atomic_load_store.h: \ + atomic_ops/sysdeps/loadstore/atomic_load_store.template + sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? > $@ diff --git a/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.h b/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.h index 6d54af9..51c8560 100644 --- a/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -20,43 +20,42 @@ * SOFTWARE. */ -/* - * This file adds definitions appropriate for environments in which an AO_t - * volatile load has acquire semantics, and an AO_t volatile store has release - * semantics. This is arguably supposed to be true with the standard Itanium - * software conventions. - */ +/* This file adds definitions appropriate for environments in which */ +/* volatile load of a given type has acquire semantics, and volatile */ +/* store of a given type has release semantics. This is arguably */ +/* supposed to be true with the standard Itanium software conventions. */ +/* Empirically gcc/ia64 does some reordering of ordinary operations */ +/* around volatiles even when we think it should not. GCC v3.3 and */ +/* earlier could reorder a volatile store with another store. As of */ +/* March 2005, gcc pre-4 reuses some previously computed common */ +/* subexpressions across a volatile load; hence, we now add compiler */ +/* barriers for gcc. */ -/* - * Empirically gcc/ia64 does some reordering of ordinary operations around volatiles - * even when we think it shouldn't. Gcc 3.3 and earlier could reorder a volatile store - * with another store. As of March 2005, gcc pre-4 reused previously computed - * common subexpressions across a volatile load. - * Hence we now add compiler barriers for gcc. - */ -#if !defined(AO_GCC_BARRIER) -# if defined(__GNUC__) -# define AO_GCC_BARRIER() AO_compiler_barrier() -# else -# define AO_GCC_BARRIER() -# endif +#ifndef AO_GCC_BARRIER + /* TODO: Check GCC version (if workaround not needed for modern GCC). */ +# if defined(__GNUC__) +# define AO_GCC_BARRIER() AO_compiler_barrier() +# else +# define AO_GCC_BARRIER() (void)0 +# endif #endif AO_INLINE AO_t -AO_load_acquire(const volatile AO_t *p) +AO_load_acquire(const volatile AO_t *addr) { - AO_t result = *p; - /* A normal volatile load generates an ld.acq */ + AO_t result = *addr; + + /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_load_acquire AO_INLINE void -AO_store_release(volatile AO_t *p, AO_t val) +AO_store_release(volatile AO_t *addr, AO_t new_val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ - *p = val; + /* A normal volatile store generates an st.rel (on IA-64). */ + *addr = new_val; } #define AO_HAVE_store_release diff --git a/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.template b/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.template new file mode 100644 index 0000000..10f45a9 --- /dev/null +++ b/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.template @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* This file adds definitions appropriate for environments in which */ +/* volatile load of a given type has acquire semantics, and volatile */ +/* store of a given type has release semantics. This is arguably */ +/* supposed to be true with the standard Itanium software conventions. */ +/* Empirically gcc/ia64 does some reordering of ordinary operations */ +/* around volatiles even when we think it should not. GCC v3.3 and */ +/* earlier could reorder a volatile store with another store. As of */ +/* March 2005, gcc pre-4 reuses some previously computed common */ +/* subexpressions across a volatile load; hence, we now add compiler */ +/* barriers for gcc. */ + +#ifndef AO_GCC_BARRIER + /* TODO: Check GCC version (if workaround not needed for modern GCC). */ +# if defined(__GNUC__) +# define AO_GCC_BARRIER() AO_compiler_barrier() +# else +# define AO_GCC_BARRIER() (void)0 +# endif +#endif + +AO_INLINE XCTYPE +AO_XSIZE_load_acquire(const volatile XCTYPE *addr) +{ + XCTYPE result = *addr; + + /* A normal volatile load generates an ld.acq (on IA-64). */ + AO_GCC_BARRIER(); + return result; +} +#define AO_HAVE_XSIZE_load_acquire + +AO_INLINE void +AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val) +{ + AO_GCC_BARRIER(); + /* A normal volatile store generates an st.rel (on IA-64). */ + *addr = new_val; +} +#define AO_HAVE_XSIZE_store_release diff --git a/src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h index d24fe1d..509f342 100644 --- a/src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -20,14 +20,14 @@ * SOFTWARE. */ -/* Definitions for architectures on which loads and stores of AO_t are */ -/* atomic fo all legal alignments. */ +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ AO_INLINE AO_t AO_load(const volatile AO_t *addr) { - assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0); - /* Cast away the volatile for architectures where */ + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); + /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ return *(AO_t *)addr; } @@ -36,7 +36,7 @@ AO_load(const volatile AO_t *addr) AO_INLINE void AO_store(volatile AO_t *addr, AO_t new_val) { - assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0); - (*(AO_t *)addr) = new_val; + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); + *(AO_t *)addr = new_val; } #define AO_HAVE_store diff --git a/src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template b/src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template new file mode 100644 index 0000000..d10f670 --- /dev/null +++ b/src/atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ + +AO_INLINE XCTYPE +AO_XSIZE_load(const volatile XCTYPE *addr) +{ + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ + return *(XCTYPE *)addr; +} +#define AO_HAVE_XSIZE_load + +AO_INLINE void +AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val) +{ + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); + *(XCTYPE *)addr = new_val; +} +#define AO_HAVE_XSIZE_store diff --git a/src/atomic_ops/sysdeps/loadstore/atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/atomic_load_store.h index 1210891..9b83ced 100644 --- a/src/atomic_ops/sysdeps/loadstore/atomic_load_store.h +++ b/src/atomic_ops/sysdeps/loadstore/atomic_load_store.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -20,21 +20,21 @@ * SOFTWARE. */ -/* Definitions for architectures on which loads and stores of AO_t are */ -/* atomic for all legal alignments. */ +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ AO_INLINE AO_t AO_load(const volatile AO_t *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(const AO_t *)addr); + return *(const AO_t *)addr; } #define AO_HAVE_load AO_INLINE void AO_store(volatile AO_t *addr, AO_t new_val) { - (*(AO_t *)addr) = new_val; + *(AO_t *)addr = new_val; } #define AO_HAVE_store diff --git a/src/atomic_ops/sysdeps/loadstore/atomic_load_store.template b/src/atomic_ops/sysdeps/loadstore/atomic_load_store.template new file mode 100644 index 0000000..0364dc2 --- /dev/null +++ b/src/atomic_ops/sysdeps/loadstore/atomic_load_store.template @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ + +AO_INLINE XCTYPE +AO_XSIZE_load(const volatile XCTYPE *addr) +{ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ + return *(const XCTYPE *)addr; +} +#define AO_HAVE_XSIZE_load + +AO_INLINE void +AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val) +{ + *(XCTYPE *)addr = new_val; +} +#define AO_HAVE_XSIZE_store diff --git a/src/atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h b/src/atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h index c988488..6de6b8a 100644 --- a/src/atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h @@ -20,34 +20,42 @@ * SOFTWARE. */ -/* - * This file adds definitions appropriate for environments in which an unsigned char - * volatile load has acquire semantics, and an unsigned char volatile store has release - * semantics. This is true with the standard Itanium ABI. - */ -#if !defined(AO_GCC_BARRIER) -# if defined(__GNUC__) -# define AO_GCC_BARRIER() AO_compiler_barrier() -# else -# define AO_GCC_BARRIER() -# endif +/* This file adds definitions appropriate for environments in which */ +/* volatile load of a given type has acquire semantics, and volatile */ +/* store of a given type has release semantics. This is arguably */ +/* supposed to be true with the standard Itanium software conventions. */ +/* Empirically gcc/ia64 does some reordering of ordinary operations */ +/* around volatiles even when we think it should not. GCC v3.3 and */ +/* earlier could reorder a volatile store with another store. As of */ +/* March 2005, gcc pre-4 reuses some previously computed common */ +/* subexpressions across a volatile load; hence, we now add compiler */ +/* barriers for gcc. */ + +#ifndef AO_GCC_BARRIER + /* TODO: Check GCC version (if workaround not needed for modern GCC). */ +# if defined(__GNUC__) +# define AO_GCC_BARRIER() AO_compiler_barrier() +# else +# define AO_GCC_BARRIER() (void)0 +# endif #endif -AO_INLINE unsigned char -AO_char_load_acquire(const volatile unsigned char *p) +AO_INLINE unsigned/**/char +AO_char_load_acquire(const volatile unsigned/**/char *addr) { - unsigned char result = *p; - /* A normal volatile load generates an ld.acq */ + unsigned/**/char result = *addr; + + /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_char_load_acquire AO_INLINE void -AO_char_store_release(volatile unsigned char *p, unsigned char val) +AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char new_val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ - *p = val; + /* A normal volatile store generates an st.rel (on IA-64). */ + *addr = new_val; } #define AO_HAVE_char_store_release diff --git a/src/atomic_ops/sysdeps/loadstore/char_atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/char_atomic_load_store.h index ae7005a..1f9bef0 100644 --- a/src/atomic_ops/sysdeps/loadstore/char_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/loadstore/char_atomic_load_store.h @@ -20,21 +20,21 @@ * SOFTWARE. */ -/* Definitions for architectures on which loads and stores of unsigned */ -/* char are atomic for all legal alignments. */ +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ -AO_INLINE unsigned char -AO_char_load(const volatile unsigned char *addr) +AO_INLINE unsigned/**/char +AO_char_load(const volatile unsigned/**/char *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(const unsigned char *)addr); + return *(const unsigned/**/char *)addr; } #define AO_HAVE_char_load AO_INLINE void -AO_char_store(volatile unsigned char *addr, unsigned char new_val) +AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val) { - (*(unsigned char *)addr) = new_val; + *(unsigned/**/char *)addr = new_val; } #define AO_HAVE_char_store diff --git a/src/atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h b/src/atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h index 01037a2..6b4875d 100644 --- a/src/atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h @@ -20,34 +20,42 @@ * SOFTWARE. */ -/* - * This file adds definitions appropriate for environments in which an unsigned - * int volatile load has acquire semantics, and an unsigned short volatile - * store has release semantics. This is true with the standard Itanium ABI. - */ -#if !defined(AO_GCC_BARRIER) -# if defined(__GNUC__) -# define AO_GCC_BARRIER() AO_compiler_barrier() -# else -# define AO_GCC_BARRIER() -# endif +/* This file adds definitions appropriate for environments in which */ +/* volatile load of a given type has acquire semantics, and volatile */ +/* store of a given type has release semantics. This is arguably */ +/* supposed to be true with the standard Itanium software conventions. */ +/* Empirically gcc/ia64 does some reordering of ordinary operations */ +/* around volatiles even when we think it should not. GCC v3.3 and */ +/* earlier could reorder a volatile store with another store. As of */ +/* March 2005, gcc pre-4 reuses some previously computed common */ +/* subexpressions across a volatile load; hence, we now add compiler */ +/* barriers for gcc. */ + +#ifndef AO_GCC_BARRIER + /* TODO: Check GCC version (if workaround not needed for modern GCC). */ +# if defined(__GNUC__) +# define AO_GCC_BARRIER() AO_compiler_barrier() +# else +# define AO_GCC_BARRIER() (void)0 +# endif #endif -AO_INLINE unsigned int -AO_int_load_acquire(const volatile unsigned int *p) +AO_INLINE unsigned +AO_int_load_acquire(const volatile unsigned *addr) { - unsigned int result = *p; - /* A normal volatile load generates an ld.acq */ + unsigned result = *addr; + + /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_int_load_acquire AO_INLINE void -AO_int_store_release(volatile unsigned int *p, unsigned int val) +AO_int_store_release(volatile unsigned *addr, unsigned new_val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ - *p = val; + /* A normal volatile store generates an st.rel (on IA-64). */ + *addr = new_val; } #define AO_HAVE_int_store_release diff --git a/src/atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h index 1dcb3b2..4ef7930 100644 --- a/src/atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h @@ -20,23 +20,23 @@ * SOFTWARE. */ -/* Definitions for architectures on which loads and stores of unsigned */ -/* int are atomic for all legal alignments. */ +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ -AO_INLINE unsigned int -AO_int_load(const volatile unsigned int *addr) +AO_INLINE unsigned +AO_int_load(const volatile unsigned *addr) { - assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0); + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(unsigned int *)addr); + return *(unsigned *)addr; } #define AO_HAVE_int_load AO_INLINE void -AO_int_store(volatile unsigned int *addr, unsigned int new_val) +AO_int_store(volatile unsigned *addr, unsigned new_val) { - assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0); - (*(unsigned int *)addr) = new_val; + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); + *(unsigned *)addr = new_val; } #define AO_HAVE_int_store diff --git a/src/atomic_ops/sysdeps/loadstore/int_atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/int_atomic_load_store.h index 0c3777b..d7cca0f 100644 --- a/src/atomic_ops/sysdeps/loadstore/int_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/loadstore/int_atomic_load_store.h @@ -20,21 +20,21 @@ * SOFTWARE. */ -/* Definitions for architectures on which loads and stores of unsigned */ -/* int are atomic for all legal alignments. */ +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ -AO_INLINE unsigned int -AO_int_load(const volatile unsigned int *addr) +AO_INLINE unsigned +AO_int_load(const volatile unsigned *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(const unsigned int *)addr); + return *(const unsigned *)addr; } #define AO_HAVE_int_load AO_INLINE void -AO_int_store(volatile unsigned int *addr, unsigned int new_val) +AO_int_store(volatile unsigned *addr, unsigned new_val) { - (*(unsigned int *)addr) = new_val; + *(unsigned *)addr = new_val; } #define AO_HAVE_int_store diff --git a/src/atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h b/src/atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h index dcf3c04..e753133 100644 --- a/src/atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h @@ -20,34 +20,42 @@ * SOFTWARE. */ -/* - * This file adds definitions appropriate for environments in which an unsigned short - * volatile load has acquire semantics, and an unsigned short volatile store has release - * semantics. This is true with the standard Itanium ABI. - */ -#if !defined(AO_GCC_BARRIER) -# if defined(__GNUC__) -# define AO_GCC_BARRIER() AO_compiler_barrier() -# else -# define AO_GCC_BARRIER() -# endif +/* This file adds definitions appropriate for environments in which */ +/* volatile load of a given type has acquire semantics, and volatile */ +/* store of a given type has release semantics. This is arguably */ +/* supposed to be true with the standard Itanium software conventions. */ +/* Empirically gcc/ia64 does some reordering of ordinary operations */ +/* around volatiles even when we think it should not. GCC v3.3 and */ +/* earlier could reorder a volatile store with another store. As of */ +/* March 2005, gcc pre-4 reuses some previously computed common */ +/* subexpressions across a volatile load; hence, we now add compiler */ +/* barriers for gcc. */ + +#ifndef AO_GCC_BARRIER + /* TODO: Check GCC version (if workaround not needed for modern GCC). */ +# if defined(__GNUC__) +# define AO_GCC_BARRIER() AO_compiler_barrier() +# else +# define AO_GCC_BARRIER() (void)0 +# endif #endif -AO_INLINE unsigned short -AO_short_load_acquire(const volatile unsigned short *p) +AO_INLINE unsigned/**/short +AO_short_load_acquire(const volatile unsigned/**/short *addr) { - unsigned short result = *p; - /* A normal volatile load generates an ld.acq */ + unsigned/**/short result = *addr; + + /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_short_load_acquire AO_INLINE void -AO_short_store_release(volatile unsigned short *p, unsigned short val) +AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short new_val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ - *p = val; + /* A normal volatile store generates an st.rel (on IA-64). */ + *addr = new_val; } #define AO_HAVE_short_store_release diff --git a/src/atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h index 1340934..4d85028 100644 --- a/src/atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h @@ -20,25 +20,23 @@ * SOFTWARE. */ -/* - * Definitions for architectures on which loads and stores of unsigned short - * are atomic for all legal alignments. - */ +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ -AO_INLINE unsigned short -AO_short_load(const volatile unsigned short *addr) +AO_INLINE unsigned/**/short +AO_short_load(const volatile unsigned/**/short *addr) { - assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0); + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(unsigned short *)addr); + return *(unsigned/**/short *)addr; } #define AO_HAVE_short_load AO_INLINE void -AO_short_store(volatile unsigned short *addr, unsigned short new_val) +AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val) { - assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0); - (*(unsigned short *)addr) = new_val; + assert(((size_t)addr & (sizeof(*addr) - 1)) == 0); + *(unsigned/**/short *)addr = new_val; } #define AO_HAVE_short_store diff --git a/src/atomic_ops/sysdeps/loadstore/short_atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/short_atomic_load_store.h index 3f3794c..ee33964 100644 --- a/src/atomic_ops/sysdeps/loadstore/short_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/loadstore/short_atomic_load_store.h @@ -20,23 +20,21 @@ * SOFTWARE. */ -/* - * Definitions for architectures on which loads and stores of unsigned short - * are atomic for all legal alignments. - */ +/* Definitions for architectures on which loads and stores of given */ +/* type are atomic for all legal alignments. */ -AO_INLINE unsigned short -AO_short_load(const volatile unsigned short *addr) +AO_INLINE unsigned/**/short +AO_short_load(const volatile unsigned/**/short *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(const unsigned short *)addr); + return *(const unsigned/**/short *)addr; } #define AO_HAVE_short_load AO_INLINE void -AO_short_store(volatile unsigned short *addr, unsigned short new_val) +AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val) { - (*(unsigned short *)addr) = new_val; + *(unsigned/**/short *)addr = new_val; } #define AO_HAVE_short_store -- 2.40.0