From 1fb52016d8a15ca3582346b3a491a05b30bf3aba Mon Sep 17 00:00:00 2001 From: hboehm Date: Fri, 18 Jul 2008 22:42:20 +0000 Subject: [PATCH] 2008-07-18 Hans Boehm * doc/README.txt, src/atomic_ops/generalize.h, src/atomic_ops/generalize-small.template, src/atomic_ops/generalize-small.h, src/atomic_ops/sysdeps/acquire_release_volatile.h, src/atomic_ops/sysdeps/char_acquire_release_volatile.h, src/atomic_ops/sysdeps/int_acquire_release_volatile.h, src/atomic_ops/sysdeps/short_acquire_release_volatile.h, src/atomic_ops/sysdeps/aligned_atomic_load_store.h, src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h, src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h, src/atomic_ops/sysdeps/ao_t_is_int.h, src/atomic_ops/sysdeps/atomic_load_store.h, src/atomic_ops/sysdeps/char_atomic_load_store.h, src/atomic_ops/sysdeps/int_atomic_load_store.h, src/atomic_ops/sysdeps/short_atomic_load_store.h, src/atomic_ops/sysdeps/generic_pthread.h, src/atomic_ops/sysdeps/read_ordered.h, src/atomic_ops/sysdeps/sysdeps/armcc/arm_v6.h, src/atomic_ops/sysdeps/gcc/arm.h, src/atomic_ops/sysdeps/icc/ia64.h, src/atomic_ops/sysdeps/ibmc/powerpc.h: Add const to first parameter of load calls. --- ChangeLog | 24 +++++++++++++++++++ doc/README.txt | 2 +- src/atomic_ops/generalize-small.h | 12 +++++----- src/atomic_ops/generalize-small.template | 4 ++-- src/atomic_ops/generalize.h | 4 ++-- .../sysdeps/acquire_release_volatile.h | 2 +- .../sysdeps/aligned_atomic_load_store.h | 2 +- src/atomic_ops/sysdeps/ao_t_is_int.h | 3 ++- src/atomic_ops/sysdeps/armcc/arm_v6.h | 4 ++-- src/atomic_ops/sysdeps/atomic_load_store.h | 4 ++-- .../sysdeps/char_acquire_release_volatile.h | 2 +- .../sysdeps/char_atomic_load_store.h | 4 ++-- src/atomic_ops/sysdeps/gcc/arm.h | 4 ++-- src/atomic_ops/sysdeps/generic_pthread.h | 8 +++---- src/atomic_ops/sysdeps/ibmc/powerpc.h | 2 +- src/atomic_ops/sysdeps/icc/ia64.h | 8 +++---- .../sysdeps/int_acquire_release_volatile.h | 2 +- .../sysdeps/int_aligned_atomic_load_store.h | 2 +- .../sysdeps/int_atomic_load_store.h | 4 ++-- src/atomic_ops/sysdeps/read_ordered.h | 8 +++---- .../sysdeps/short_acquire_release_volatile.h | 2 +- .../sysdeps/short_aligned_atomic_load_store.h | 2 +- .../sysdeps/short_atomic_load_store.h | 4 ++-- 23 files changed, 69 insertions(+), 44 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4107d13..b2d0d1a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,27 @@ +2008-07-18 Hans Boehm + * doc/README.txt, src/atomic_ops/generalize.h, + src/atomic_ops/generalize-small.template, + src/atomic_ops/generalize-small.h, + src/atomic_ops/sysdeps/acquire_release_volatile.h, + src/atomic_ops/sysdeps/char_acquire_release_volatile.h, + src/atomic_ops/sysdeps/int_acquire_release_volatile.h, + src/atomic_ops/sysdeps/short_acquire_release_volatile.h, + src/atomic_ops/sysdeps/aligned_atomic_load_store.h, + src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h, + src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h, + src/atomic_ops/sysdeps/ao_t_is_int.h, + src/atomic_ops/sysdeps/atomic_load_store.h, + src/atomic_ops/sysdeps/char_atomic_load_store.h, + src/atomic_ops/sysdeps/int_atomic_load_store.h, + src/atomic_ops/sysdeps/short_atomic_load_store.h, + src/atomic_ops/sysdeps/generic_pthread.h, + src/atomic_ops/sysdeps/read_ordered.h, + src/atomic_ops/sysdeps/sysdeps/armcc/arm_v6.h, + src/atomic_ops/sysdeps/gcc/arm.h, + src/atomic_ops/sysdeps/icc/ia64.h, + src/atomic_ops/sysdeps/ibmc/powerpc.h: + Add const to first parameter of load calls. + 2008-07-10 Hans Boehm * src/atomic_ops/sysdeps/gcc/m68k.h: Remove SMP-unsafe AO_or_full, and let it be autogenerated instead. diff --git a/doc/README.txt b/doc/README.txt index e7c2f0d..12c5856 100644 --- a/doc/README.txt +++ b/doc/README.txt @@ -80,7 +80,7 @@ are also specified: void nop() No atomic operation. The barrier may still be useful. -AO_t load(volatile AO_t * addr) +AO_t load(const volatile AO_t * addr) Atomic load of *addr. void store(volatile AO_t * addr, AO_t new_val) Atomically store new_val to *addr. diff --git a/src/atomic_ops/generalize-small.h b/src/atomic_ops/generalize-small.h index 4e45a0d..2b989a6 100644 --- a/src/atomic_ops/generalize-small.h +++ b/src/atomic_ops/generalize-small.h @@ -22,7 +22,7 @@ #if defined(AO_HAVE_char_load) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_char_load_acquire) AO_INLINE unsigned char - AO_char_load_acquire(volatile unsigned char *addr) + AO_char_load_acquire(const volatile unsigned char *addr) { unsigned char result = AO_char_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ @@ -36,7 +36,7 @@ #if defined(AO_HAVE_char_load) && defined(AO_HAVE_nop_read) && \ !defined(AO_HAVE_char_load_read) AO_INLINE unsigned char - AO_char_load_read(volatile unsigned char *addr) + AO_char_load_read(const volatile unsigned char *addr) { unsigned char result = AO_char_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ @@ -597,7 +597,7 @@ #if defined(AO_HAVE_short_load) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_short_load_acquire) AO_INLINE unsigned short - AO_short_load_acquire(volatile unsigned short *addr) + AO_short_load_acquire(const volatile unsigned short *addr) { unsigned short result = AO_short_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ @@ -611,7 +611,7 @@ #if defined(AO_HAVE_short_load) && defined(AO_HAVE_nop_read) && \ !defined(AO_HAVE_short_load_read) AO_INLINE unsigned short - AO_short_load_read(volatile unsigned short *addr) + AO_short_load_read(const volatile unsigned short *addr) { unsigned short result = AO_short_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ @@ -1172,7 +1172,7 @@ #if defined(AO_HAVE_int_load) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_int_load_acquire) AO_INLINE unsigned int - AO_int_load_acquire(volatile unsigned int *addr) + AO_int_load_acquire(const volatile unsigned int *addr) { unsigned int result = AO_int_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ @@ -1186,7 +1186,7 @@ #if defined(AO_HAVE_int_load) && defined(AO_HAVE_nop_read) && \ !defined(AO_HAVE_int_load_read) AO_INLINE unsigned int - AO_int_load_read(volatile unsigned int *addr) + AO_int_load_read(const volatile unsigned int *addr) { unsigned int result = AO_int_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ diff --git a/src/atomic_ops/generalize-small.template b/src/atomic_ops/generalize-small.template index b7e02b0..c9373df 100644 --- a/src/atomic_ops/generalize-small.template +++ b/src/atomic_ops/generalize-small.template @@ -22,7 +22,7 @@ #if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_XSIZE_load_acquire) AO_INLINE unsigned XCTYPE - AO_XSIZE_load_acquire(volatile unsigned XCTYPE *addr) + AO_XSIZE_load_acquire(const volatile unsigned XCTYPE *addr) { unsigned XCTYPE result = AO_XSIZE_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ @@ -36,7 +36,7 @@ #if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_read) && \ !defined(AO_HAVE_XSIZE_load_read) AO_INLINE unsigned XCTYPE - AO_XSIZE_load_read(volatile unsigned XCTYPE *addr) + AO_XSIZE_load_read(const volatile unsigned XCTYPE *addr) { unsigned XCTYPE result = AO_XSIZE_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ diff --git a/src/atomic_ops/generalize.h b/src/atomic_ops/generalize.h index 521a41e..8d09336 100644 --- a/src/atomic_ops/generalize.h +++ b/src/atomic_ops/generalize.h @@ -198,7 +198,7 @@ #if defined(AO_HAVE_load) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_load_acquire) AO_INLINE AO_t - AO_load_acquire(volatile AO_t *addr) + AO_load_acquire(const volatile AO_t *addr) { AO_t result = AO_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ @@ -212,7 +212,7 @@ #if defined(AO_HAVE_load) && defined(AO_HAVE_nop_read) && \ !defined(AO_HAVE_load_read) AO_INLINE AO_t - AO_load_read(volatile AO_t *addr) + AO_load_read(const volatile AO_t *addr) { AO_t result = AO_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ diff --git a/src/atomic_ops/sysdeps/acquire_release_volatile.h b/src/atomic_ops/sysdeps/acquire_release_volatile.h index 1a387d3..b84dc82 100644 --- a/src/atomic_ops/sysdeps/acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/acquire_release_volatile.h @@ -43,7 +43,7 @@ #endif AO_INLINE AO_t -AO_load_acquire(volatile AO_t *p) +AO_load_acquire(const volatile AO_t *p) { AO_t result = *p; /* A normal volatile load generates an ld.acq */ diff --git a/src/atomic_ops/sysdeps/aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/aligned_atomic_load_store.h index 485b7f4..342f02b 100644 --- a/src/atomic_ops/sysdeps/aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/aligned_atomic_load_store.h @@ -26,7 +26,7 @@ */ AO_INLINE AO_t -AO_load(volatile AO_t *addr) +AO_load(const volatile AO_t *addr) { assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0); /* Cast away the volatile for architectures where */ diff --git a/src/atomic_ops/sysdeps/ao_t_is_int.h b/src/atomic_ops/sysdeps/ao_t_is_int.h index 18156c8..ab9b662 100644 --- a/src/atomic_ops/sysdeps/ao_t_is_int.h +++ b/src/atomic_ops/sysdeps/ao_t_is_int.h @@ -78,7 +78,8 @@ #if defined(AO_HAVE_load_acquire) && \ !defined(AO_HAVE_int_load_acquire) -# define AO_int_load_acquire(addr) (int)AO_load_acquire((volatile AO_t *)addr) +# define AO_int_load_acquire(addr) \ + (int)AO_load_acquire((const volatile AO_t *)addr) # define AO_HAVE_int_load_acquire # endif diff --git a/src/atomic_ops/sysdeps/armcc/arm_v6.h b/src/atomic_ops/sysdeps/armcc/arm_v6.h index 3e2318b..326506c 100644 --- a/src/atomic_ops/sysdeps/armcc/arm_v6.h +++ b/src/atomic_ops/sysdeps/armcc/arm_v6.h @@ -52,10 +52,10 @@ AO_nop_full() #define AO_HAVE_nop_full AO_INLINE AO_t -AO_load(volatile AO_t *addr) +AO_load(const volatile AO_t *addr) { /* Cast away the volatile in case it adds fence semantics. */ - return (*(AO_t *)addr); + return (*(const AO_t *)addr); } #define AO_HAVE_load diff --git a/src/atomic_ops/sysdeps/atomic_load_store.h b/src/atomic_ops/sysdeps/atomic_load_store.h index 68c5179..d70d29f 100644 --- a/src/atomic_ops/sysdeps/atomic_load_store.h +++ b/src/atomic_ops/sysdeps/atomic_load_store.h @@ -26,11 +26,11 @@ */ AO_INLINE AO_t -AO_load(volatile AO_t *addr) +AO_load(const volatile AO_t *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(AO_t *)addr); + return (*(const AO_t *)addr); } #define AO_HAVE_load diff --git a/src/atomic_ops/sysdeps/char_acquire_release_volatile.h b/src/atomic_ops/sysdeps/char_acquire_release_volatile.h index 37aa021..a227c1b 100644 --- a/src/atomic_ops/sysdeps/char_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/char_acquire_release_volatile.h @@ -34,7 +34,7 @@ #endif AO_INLINE unsigned char -AO_char_load_acquire(volatile unsigned char *p) +AO_char_load_acquire(const volatile unsigned char *p) { unsigned char result = *p; /* A normal volatile load generates an ld.acq */ diff --git a/src/atomic_ops/sysdeps/char_atomic_load_store.h b/src/atomic_ops/sysdeps/char_atomic_load_store.h index 6eb0978..2d45e89 100644 --- a/src/atomic_ops/sysdeps/char_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/char_atomic_load_store.h @@ -26,11 +26,11 @@ */ AO_INLINE unsigned char -AO_char_load(volatile unsigned char *addr) +AO_char_load(const volatile unsigned char *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(unsigned char *)addr); + return (*(const unsigned char *)addr); } #define AO_HAVE_char_load diff --git a/src/atomic_ops/sysdeps/gcc/arm.h b/src/atomic_ops/sysdeps/gcc/arm.h index c502d4d..deff617 100644 --- a/src/atomic_ops/sysdeps/gcc/arm.h +++ b/src/atomic_ops/sysdeps/gcc/arm.h @@ -49,11 +49,11 @@ AO_nop_full() /* NEC LE-IT: AO_t load is simple reading */ AO_INLINE AO_t -AO_load(volatile AO_t *addr) +AO_load(const volatile AO_t *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(AO_t *)addr); + return (*(const AO_t *)addr); } #define AO_HAVE_load diff --git a/src/atomic_ops/sysdeps/generic_pthread.h b/src/atomic_ops/sysdeps/generic_pthread.h index 8d17c0a..519dbf8 100644 --- a/src/atomic_ops/sysdeps/generic_pthread.h +++ b/src/atomic_ops/sysdeps/generic_pthread.h @@ -47,7 +47,7 @@ AO_nop_full() #define AO_HAVE_nop_full AO_INLINE AO_t -AO_load_full(volatile AO_t *addr) +AO_load_full(const volatile AO_t *addr) { AO_t result; pthread_mutex_lock(&AO_pt_lock); @@ -69,7 +69,7 @@ AO_store_full(volatile AO_t *addr, AO_t val) #define AO_HAVE_store_full AO_INLINE unsigned char -AO_char_load_full(volatile unsigned char *addr) +AO_char_load_full(const volatile unsigned char *addr) { unsigned char result; pthread_mutex_lock(&AO_pt_lock); @@ -91,7 +91,7 @@ AO_char_store_full(volatile unsigned char *addr, unsigned char val) #define AO_HAVE_char_store_full AO_INLINE unsigned short -AO_short_load_full(volatile unsigned short *addr) +AO_short_load_full(const volatile unsigned short *addr) { unsigned short result; pthread_mutex_lock(&AO_pt_lock); @@ -113,7 +113,7 @@ AO_short_store_full(volatile unsigned short *addr, unsigned short val) #define AO_HAVE_short_store_full AO_INLINE unsigned int -AO_int_load_full(volatile unsigned int *addr) +AO_int_load_full(const volatile unsigned int *addr) { unsigned int result; pthread_mutex_lock(&AO_pt_lock); diff --git a/src/atomic_ops/sysdeps/ibmc/powerpc.h b/src/atomic_ops/sysdeps/ibmc/powerpc.h index d1f9c50..4a1badb 100644 --- a/src/atomic_ops/sysdeps/ibmc/powerpc.h +++ b/src/atomic_ops/sysdeps/ibmc/powerpc.h @@ -31,7 +31,7 @@ void AO_lwsync(void); /* We explicitly specify load_acquire and store_release, since these */ /* rely on the fact that lwsync is also a LoadStore barrier. */ AO_INLINE AO_t -AO_load_acquire(volatile AO_t *addr) +AO_load_acquire(const volatile AO_t *addr) { AO_t result = *addr; AO_lwsync(); diff --git a/src/atomic_ops/sysdeps/icc/ia64.h b/src/atomic_ops/sysdeps/icc/ia64.h index 358ddda..7eebe27 100644 --- a/src/atomic_ops/sysdeps/icc/ia64.h +++ b/src/atomic_ops/sysdeps/icc/ia64.h @@ -39,7 +39,7 @@ #define AO_INTEL_PTR_t void * AO_INLINE AO_t -AO_load_acquire(volatile AO_t *p) +AO_load_acquire(const volatile AO_t *p) { return (AO_t)(__ld8_acq((AO_INTEL_PTR_t)p)); } @@ -53,7 +53,7 @@ AO_store_release(volatile AO_t *p, AO_t val) #define AO_HAVE_store_release AO_INLINE unsigned char -AO_char_load_acquire(volatile unsigned char *p) +AO_char_load_acquire(const volatile unsigned char *p) { /* A normal volatile load generates an ld.acq */ return (__ld1_acq((AO_INTEL_PTR_t)p)); @@ -68,7 +68,7 @@ AO_char_store_release(volatile unsigned char *p, unsigned char val) #define AO_HAVE_char_store_release AO_INLINE unsigned short -AO_short_load_acquire(volatile unsigned short *p) +AO_short_load_acquire(const volatile unsigned short *p) { /* A normal volatile load generates an ld.acq */ return (__ld2_acq((AO_INTEL_PTR_t)p)); @@ -83,7 +83,7 @@ AO_short_store_release(volatile unsigned short *p, unsigned short val) #define AO_HAVE_short_store_release AO_INLINE unsigned int -AO_int_load_acquire(volatile unsigned int *p) +AO_int_load_acquire(const volatile unsigned int *p) { /* A normal volatile load generates an ld.acq */ return (__ld4_acq((AO_INTEL_PTR_t)p)); diff --git a/src/atomic_ops/sysdeps/int_acquire_release_volatile.h b/src/atomic_ops/sysdeps/int_acquire_release_volatile.h index e0a6ed0..44d0453 100644 --- a/src/atomic_ops/sysdeps/int_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/int_acquire_release_volatile.h @@ -34,7 +34,7 @@ #endif AO_INLINE unsigned int -AO_int_load_acquire(volatile unsigned int *p) +AO_int_load_acquire(const volatile unsigned int *p) { unsigned int result = *p; /* A normal volatile load generates an ld.acq */ diff --git a/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h index 62927d2..d9ce0b3 100644 --- a/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h @@ -26,7 +26,7 @@ */ AO_INLINE unsigned int -AO_int_load(volatile unsigned int *addr) +AO_int_load(const volatile unsigned int *addr) { assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0); /* Cast away the volatile for architectures like IA64 where */ diff --git a/src/atomic_ops/sysdeps/int_atomic_load_store.h b/src/atomic_ops/sysdeps/int_atomic_load_store.h index b2a4813..7c1ca11 100644 --- a/src/atomic_ops/sysdeps/int_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/int_atomic_load_store.h @@ -26,11 +26,11 @@ */ AO_INLINE unsigned int -AO_int_load(volatile unsigned int *addr) +AO_int_load(const volatile unsigned int *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(unsigned int *)addr); + return (*(const unsigned int *)addr); } #define AO_HAVE_int_load diff --git a/src/atomic_ops/sysdeps/read_ordered.h b/src/atomic_ops/sysdeps/read_ordered.h index e928881..ddf0988 100644 --- a/src/atomic_ops/sysdeps/read_ordered.h +++ b/src/atomic_ops/sysdeps/read_ordered.h @@ -38,7 +38,7 @@ AO_nop_read() #ifdef AO_HAVE_load AO_INLINE AO_t -AO_load_read(volatile AO_t *addr) +AO_load_read(const volatile AO_t *addr) { AO_t result = AO_load(addr); AO_compiler_barrier(); @@ -54,7 +54,7 @@ AO_load_read(volatile AO_t *addr) #ifdef AO_HAVE_char_load AO_INLINE AO_t -AO_char_load_read(volatile unsigned char *addr) +AO_char_load_read(const volatile unsigned char *addr) { AO_t result = AO_char_load(addr); AO_compiler_barrier(); @@ -70,7 +70,7 @@ AO_char_load_read(volatile unsigned char *addr) #ifdef AO_HAVE_short_load AO_INLINE AO_t -AO_short_load_read(volatile unsigned short *addr) +AO_short_load_read(const volatile unsigned short *addr) { AO_t result = AO_short_load(addr); AO_compiler_barrier(); @@ -86,7 +86,7 @@ AO_short_load_read(volatile unsigned short *addr) #ifdef AO_HAVE_int_load AO_INLINE AO_t -AO_int_load_read(volatile unsigned int *addr) +AO_int_load_read(const volatile unsigned int *addr) { AO_t result = AO_int_load(addr); AO_compiler_barrier(); diff --git a/src/atomic_ops/sysdeps/short_acquire_release_volatile.h b/src/atomic_ops/sysdeps/short_acquire_release_volatile.h index 035ada7..56db599 100644 --- a/src/atomic_ops/sysdeps/short_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/short_acquire_release_volatile.h @@ -34,7 +34,7 @@ #endif AO_INLINE unsigned short -AO_short_load_acquire(volatile unsigned short *p) +AO_short_load_acquire(const volatile unsigned short *p) { unsigned short result = *p; /* A normal volatile load generates an ld.acq */ diff --git a/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h index 3b285b8..a88fbf2 100644 --- a/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h @@ -26,7 +26,7 @@ */ AO_INLINE unsigned short -AO_short_load(volatile unsigned short *addr) +AO_short_load(const volatile unsigned short *addr) { assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0); /* Cast away the volatile for architectures like IA64 where */ diff --git a/src/atomic_ops/sysdeps/short_atomic_load_store.h b/src/atomic_ops/sysdeps/short_atomic_load_store.h index f1b5281..dcfbbd1 100644 --- a/src/atomic_ops/sysdeps/short_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/short_atomic_load_store.h @@ -26,11 +26,11 @@ */ AO_INLINE unsigned short -AO_short_load(volatile unsigned short *addr) +AO_short_load(const volatile unsigned short *addr) { /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier semantics. */ - return (*(unsigned short *)addr); + return (*(const unsigned short *)addr); } #define AO_HAVE_short_load -- 2.40.0