AM_CFLAGS=@PICFLAG@
AM_CPPFLAGS = -I$(top_builddir)/src -I$(top_srcdir)/src
-include_HEADERS=atomic_ops.h atomic_ops_stack.h atomic_ops_malloc.h
+include_HEADERS = atomic_ops.h atomic_ops_stack.h atomic_ops_malloc.h
lib_LTLIBRARIES = libatomic_ops.la libatomic_ops_gpl.la
if NEED_ASM
libatomic_ops_la_SOURCES = atomic_ops.c atomic_ops_sysdeps.S
libatomic_ops_gpl_la_SOURCES = atomic_ops_stack.c atomic_ops_malloc.c
libatomic_ops_gpl_la_LDFLAGS = -version-info 1:3:0 -no-undefined
-EXTRA_DIST=Makefile.msft atomic_ops/sysdeps/README \
+EXTRA_DIST = Makefile.msft atomic_ops/sysdeps/README \
atomic_ops/generalize-arithm.template \
- atomic_ops/generalize-small.template atomic_ops/sysdeps/sunc/sparc.S
+ atomic_ops/generalize-small.template \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template \
+ atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template \
+ atomic_ops/sysdeps/loadstore/atomic_load_store.template \
+ atomic_ops/sysdeps/sunc/sparc.S
BUILT_SOURCES = atomic_ops/generalize-arithm.h \
- atomic_ops/generalize-small.h
+ atomic_ops/generalize-small.h \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h \
+ atomic_ops/sysdeps/loadstore/atomic_load_store.h \
+ atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/char_atomic_load_store.h \
+ atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h \
+ atomic_ops/sysdeps/loadstore/int_atomic_load_store.h \
+ atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h \
+ atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h \
+ atomic_ops/sysdeps/loadstore/short_atomic_load_store.h
#Private Headers
privatedir=${includedir}/
sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? >> $@
sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+
+atomic_ops/sysdeps/loadstore/acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@
+
+atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ sed -e s:XSIZE_:char_:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+
+atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? > $@
+
+atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h: \
+ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template
+ sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? > $@
+
+atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.h: \
+ atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@
+
+atomic_ops/sysdeps/loadstore/int_aligned_atomic_load_store.h: \
+ atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template
+ sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? > $@
+
+atomic_ops/sysdeps/loadstore/short_aligned_atomic_load_store.h: \
+ atomic_ops/sysdeps/loadstore/aligned_atomic_load_store.template
+ sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? > $@
+
+atomic_ops/sysdeps/loadstore/atomic_load_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load_store.template
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@
+
+atomic_ops/sysdeps/loadstore/char_atomic_load_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load_store.template
+ sed -e s:XSIZE_:char_:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+
+atomic_ops/sysdeps/loadstore/int_atomic_load_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load_store.template
+ sed -e s:XSIZE_:int_:g -e s:XCTYPE:unsigned:g $? > $@
+
+atomic_ops/sysdeps/loadstore/short_atomic_load_store.h: \
+ atomic_ops/sysdeps/loadstore/atomic_load_store.template
+ sed -e s:XSIZE_:short_:g -e s:XCTYPE:unsigned/**/short:g $? > $@
/*
- * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* SOFTWARE.
*/
-/*
- * This file adds definitions appropriate for environments in which an AO_t
- * volatile load has acquire semantics, and an AO_t volatile store has release
- * semantics. This is arguably supposed to be true with the standard Itanium
- * software conventions.
- */
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
-/*
- * Empirically gcc/ia64 does some reordering of ordinary operations around volatiles
- * even when we think it shouldn't. Gcc 3.3 and earlier could reorder a volatile store
- * with another store. As of March 2005, gcc pre-4 reused previously computed
- * common subexpressions across a volatile load.
- * Hence we now add compiler barriers for gcc.
- */
-#if !defined(AO_GCC_BARRIER)
-# if defined(__GNUC__)
-# define AO_GCC_BARRIER() AO_compiler_barrier()
-# else
-# define AO_GCC_BARRIER()
-# endif
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
#endif
AO_INLINE AO_t
-AO_load_acquire(const volatile AO_t *p)
+AO_load_acquire(const volatile AO_t *addr)
{
- AO_t result = *p;
- /* A normal volatile load generates an ld.acq */
+ AO_t result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_load_acquire
AO_INLINE void
-AO_store_release(volatile AO_t *p, AO_t val)
+AO_store_release(volatile AO_t *addr, AO_t new_val)
{
AO_GCC_BARRIER();
- /* A normal volatile store generates an st.rel */
- *p = val;
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
}
#define AO_HAVE_store_release
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
+#endif
+
+AO_INLINE XCTYPE
+AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
+{
+ XCTYPE result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
+ AO_GCC_BARRIER();
+ return result;
+}
+#define AO_HAVE_XSIZE_load_acquire
+
+AO_INLINE void
+AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val)
+{
+ AO_GCC_BARRIER();
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
+}
+#define AO_HAVE_XSIZE_store_release
/*
- * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* SOFTWARE.
*/
-/* Definitions for architectures on which loads and stores of AO_t are */
-/* atomic fo all legal alignments. */
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
{
- assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0);
- /* Cast away the volatile for architectures where */
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ /* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
return *(AO_t *)addr;
}
AO_INLINE void
AO_store(volatile AO_t *addr, AO_t new_val)
{
- assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0);
- (*(AO_t *)addr) = new_val;
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ *(AO_t *)addr = new_val;
}
#define AO_HAVE_store
--- /dev/null
+/*
+ * Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
+
+AO_INLINE XCTYPE
+AO_XSIZE_load(const volatile XCTYPE *addr)
+{
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier semantics. */
+ return *(XCTYPE *)addr;
+}
+#define AO_HAVE_XSIZE_load
+
+AO_INLINE void
+AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val)
+{
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ *(XCTYPE *)addr = new_val;
+}
+#define AO_HAVE_XSIZE_store
/*
- * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* SOFTWARE.
*/
-/* Definitions for architectures on which loads and stores of AO_t are */
-/* atomic for all legal alignments. */
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
{
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
- return (*(const AO_t *)addr);
+ return *(const AO_t *)addr;
}
#define AO_HAVE_load
AO_INLINE void
AO_store(volatile AO_t *addr, AO_t new_val)
{
- (*(AO_t *)addr) = new_val;
+ *(AO_t *)addr = new_val;
}
#define AO_HAVE_store
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
+
+AO_INLINE XCTYPE
+AO_XSIZE_load(const volatile XCTYPE *addr)
+{
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier semantics. */
+ return *(const XCTYPE *)addr;
+}
+#define AO_HAVE_XSIZE_load
+
+AO_INLINE void
+AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val)
+{
+ *(XCTYPE *)addr = new_val;
+}
+#define AO_HAVE_XSIZE_store
* SOFTWARE.
*/
-/*
- * This file adds definitions appropriate for environments in which an unsigned char
- * volatile load has acquire semantics, and an unsigned char volatile store has release
- * semantics. This is true with the standard Itanium ABI.
- */
-#if !defined(AO_GCC_BARRIER)
-# if defined(__GNUC__)
-# define AO_GCC_BARRIER() AO_compiler_barrier()
-# else
-# define AO_GCC_BARRIER()
-# endif
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
#endif
-AO_INLINE unsigned char
-AO_char_load_acquire(const volatile unsigned char *p)
+AO_INLINE unsigned/**/char
+AO_char_load_acquire(const volatile unsigned/**/char *addr)
{
- unsigned char result = *p;
- /* A normal volatile load generates an ld.acq */
+ unsigned/**/char result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_char_load_acquire
AO_INLINE void
-AO_char_store_release(volatile unsigned char *p, unsigned char val)
+AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char new_val)
{
AO_GCC_BARRIER();
- /* A normal volatile store generates an st.rel */
- *p = val;
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
}
#define AO_HAVE_char_store_release
* SOFTWARE.
*/
-/* Definitions for architectures on which loads and stores of unsigned */
-/* char are atomic for all legal alignments. */
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
-AO_INLINE unsigned char
-AO_char_load(const volatile unsigned char *addr)
+AO_INLINE unsigned/**/char
+AO_char_load(const volatile unsigned/**/char *addr)
{
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
- return (*(const unsigned char *)addr);
+ return *(const unsigned/**/char *)addr;
}
#define AO_HAVE_char_load
AO_INLINE void
-AO_char_store(volatile unsigned char *addr, unsigned char new_val)
+AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val)
{
- (*(unsigned char *)addr) = new_val;
+ *(unsigned/**/char *)addr = new_val;
}
#define AO_HAVE_char_store
* SOFTWARE.
*/
-/*
- * This file adds definitions appropriate for environments in which an unsigned
- * int volatile load has acquire semantics, and an unsigned short volatile
- * store has release semantics. This is true with the standard Itanium ABI.
- */
-#if !defined(AO_GCC_BARRIER)
-# if defined(__GNUC__)
-# define AO_GCC_BARRIER() AO_compiler_barrier()
-# else
-# define AO_GCC_BARRIER()
-# endif
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
#endif
-AO_INLINE unsigned int
-AO_int_load_acquire(const volatile unsigned int *p)
+AO_INLINE unsigned
+AO_int_load_acquire(const volatile unsigned *addr)
{
- unsigned int result = *p;
- /* A normal volatile load generates an ld.acq */
+ unsigned result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_int_load_acquire
AO_INLINE void
-AO_int_store_release(volatile unsigned int *p, unsigned int val)
+AO_int_store_release(volatile unsigned *addr, unsigned new_val)
{
AO_GCC_BARRIER();
- /* A normal volatile store generates an st.rel */
- *p = val;
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
}
#define AO_HAVE_int_store_release
* SOFTWARE.
*/
-/* Definitions for architectures on which loads and stores of unsigned */
-/* int are atomic for all legal alignments. */
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
-AO_INLINE unsigned int
-AO_int_load(const volatile unsigned int *addr)
+AO_INLINE unsigned
+AO_int_load(const volatile unsigned *addr)
{
- assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0);
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
- return (*(unsigned int *)addr);
+ return *(unsigned *)addr;
}
#define AO_HAVE_int_load
AO_INLINE void
-AO_int_store(volatile unsigned int *addr, unsigned int new_val)
+AO_int_store(volatile unsigned *addr, unsigned new_val)
{
- assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0);
- (*(unsigned int *)addr) = new_val;
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ *(unsigned *)addr = new_val;
}
#define AO_HAVE_int_store
* SOFTWARE.
*/
-/* Definitions for architectures on which loads and stores of unsigned */
-/* int are atomic for all legal alignments. */
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
-AO_INLINE unsigned int
-AO_int_load(const volatile unsigned int *addr)
+AO_INLINE unsigned
+AO_int_load(const volatile unsigned *addr)
{
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
- return (*(const unsigned int *)addr);
+ return *(const unsigned *)addr;
}
#define AO_HAVE_int_load
AO_INLINE void
-AO_int_store(volatile unsigned int *addr, unsigned int new_val)
+AO_int_store(volatile unsigned *addr, unsigned new_val)
{
- (*(unsigned int *)addr) = new_val;
+ *(unsigned *)addr = new_val;
}
#define AO_HAVE_int_store
* SOFTWARE.
*/
-/*
- * This file adds definitions appropriate for environments in which an unsigned short
- * volatile load has acquire semantics, and an unsigned short volatile store has release
- * semantics. This is true with the standard Itanium ABI.
- */
-#if !defined(AO_GCC_BARRIER)
-# if defined(__GNUC__)
-# define AO_GCC_BARRIER() AO_compiler_barrier()
-# else
-# define AO_GCC_BARRIER()
-# endif
+/* This file adds definitions appropriate for environments in which */
+/* volatile load of a given type has acquire semantics, and volatile */
+/* store of a given type has release semantics. This is arguably */
+/* supposed to be true with the standard Itanium software conventions. */
+/* Empirically gcc/ia64 does some reordering of ordinary operations */
+/* around volatiles even when we think it should not. GCC v3.3 and */
+/* earlier could reorder a volatile store with another store. As of */
+/* March 2005, gcc pre-4 reuses some previously computed common */
+/* subexpressions across a volatile load; hence, we now add compiler */
+/* barriers for gcc. */
+
+#ifndef AO_GCC_BARRIER
+ /* TODO: Check GCC version (if workaround not needed for modern GCC). */
+# if defined(__GNUC__)
+# define AO_GCC_BARRIER() AO_compiler_barrier()
+# else
+# define AO_GCC_BARRIER() (void)0
+# endif
#endif
-AO_INLINE unsigned short
-AO_short_load_acquire(const volatile unsigned short *p)
+AO_INLINE unsigned/**/short
+AO_short_load_acquire(const volatile unsigned/**/short *addr)
{
- unsigned short result = *p;
- /* A normal volatile load generates an ld.acq */
+ unsigned/**/short result = *addr;
+
+ /* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_short_load_acquire
AO_INLINE void
-AO_short_store_release(volatile unsigned short *p, unsigned short val)
+AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short new_val)
{
AO_GCC_BARRIER();
- /* A normal volatile store generates an st.rel */
- *p = val;
+ /* A normal volatile store generates an st.rel (on IA-64). */
+ *addr = new_val;
}
#define AO_HAVE_short_store_release
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned short
- * are atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
-AO_INLINE unsigned short
-AO_short_load(const volatile unsigned short *addr)
+AO_INLINE unsigned/**/short
+AO_short_load(const volatile unsigned/**/short *addr)
{
- assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0);
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
- return (*(unsigned short *)addr);
+ return *(unsigned/**/short *)addr;
}
#define AO_HAVE_short_load
AO_INLINE void
-AO_short_store(volatile unsigned short *addr, unsigned short new_val)
+AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val)
{
- assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0);
- (*(unsigned short *)addr) = new_val;
+ assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ *(unsigned/**/short *)addr = new_val;
}
#define AO_HAVE_short_store
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned short
- * are atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of given */
+/* type are atomic for all legal alignments. */
-AO_INLINE unsigned short
-AO_short_load(const volatile unsigned short *addr)
+AO_INLINE unsigned/**/short
+AO_short_load(const volatile unsigned/**/short *addr)
{
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
- return (*(const unsigned short *)addr);
+ return *(const unsigned/**/short *)addr;
}
#define AO_HAVE_short_load
AO_INLINE void
-AO_short_store(volatile unsigned short *addr, unsigned short new_val)
+AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val)
{
- (*(unsigned short *)addr) = new_val;
+ *(unsigned/**/short *)addr = new_val;
}
#define AO_HAVE_short_store