UTrie fTrie;
private:
- atomic_int32_t fRefCount;
+ u_atomic_int32_t fRefCount;
UDataMemory *fUDataMem;
UnicodeString fRuleString;
UBool fDontFreeData;
* platform independent set of mutex operations. For internal ICU use only.
*/
-#if U_PLATFORM_HAS_WIN32_API
+#if defined(U_USER_MUTEX_CPP)
+// Build time user mutex hook: #include "U_USER_MUTEX_CPP"
+#include U_MUTEX_XSTR(U_USER_MUTEX_CPP)
+
+#elif U_PLATFORM_HAS_WIN32_API
//-------------------------------------------------------------------------------------------
//
// False: the initializtion failed. The next call to umtx_initOnce()
// will retry the initialization.
-U_CAPI void U_EXPORT2 umtx_initImplPostInit(UInitOnce &uio, UBool success) {
- int32_t nextState = success? 2: 0;
- umtx_storeRelease(uio.fState, nextState);
+U_CAPI void U_EXPORT2 umtx_initImplPostInit(UInitOnce &uio) {
+ umtx_storeRelease(uio.fState, 2);
}
if (state == 0) {
umtx_storeRelease(uio.fState, 1);
pthread_mutex_unlock(&initMutex);
- return true; // Caller will next call the init function.
- } else if (state == 2) {
- // Another thread already completed the initialization, in
- // a race with this thread. We can simply return FALSE, indicating no
- // further action is needed by the caller.
- pthread_mutex_unlock(&initMutex);
- return FALSE;
+ return TRUE; // Caller will next call the init function.
} else {
- // Another thread is currently running the initialization.
- // Wait until it completes.
- U_ASSERT(state == 1);
while (uio.fState == 1) {
+ // Another thread is currently running the initialization.
+ // Wait until it completes.
pthread_cond_wait(&initCondition, &initMutex);
}
- UBool returnVal = uio.fState == 0;
- if (returnVal) {
- // Initialization that was running in another thread failed.
- // We will retry it in this thread.
- // (This is only used by SimpleSingleton)
- umtx_storeRelease(uio.fState, 1);
- }
pthread_mutex_unlock(&initMutex);
- return returnVal;
+ U_ASSERT(uio.fState == 2);
+ return FALSE;
}
}
+
+
// This function is called by the thread that ran an initialization function,
// just after completing the function.
// Some threads may be waiting on the condition, requiring the broadcast wakeup.
// Some threads may be racing to test the fState variable outside of the mutex,
// requiring the use of store/release when changing its value.
-//
-// success: True: the inialization succeeded. No further calls to the init
-// function will be made.
-// False: the initializtion failed. The next call to umtx_initOnce()
-// will retry the initialization.
-void umtx_initImplPostInit(UInitOnce &uio, UBool success) {
- int32_t nextState = success? 2: 0;
+void umtx_initImplPostInit(UInitOnce &uio) {
pthread_mutex_lock(&initMutex);
- umtx_storeRelease(uio.fState, nextState);
+ umtx_storeRelease(uio.fState, 2);
pthread_cond_broadcast(&initCondition);
pthread_mutex_unlock(&initMutex);
}
-
-void umtx_initOnceReset(UInitOnce &uio) {
- // Not a thread safe function, we can use an ordinary assignment.
- uio.fState = 0;
-}
-
// End of POSIX specific umutex implementation.
#else // Platform #define chain.
static UMutex gIncDecMutex = U_MUTEX_INITIALIZER;
U_INTERNAL int32_t U_EXPORT2
-umtx_atomic_inc(int32_t *p) {
+umtx_atomic_inc(u_atomic_int32_t *p) {
int32_t retVal;
umtx_lock(&gIncDecMutex);
retVal = ++(*p);
U_INTERNAL int32_t U_EXPORT2
-umtx_atomic_dec(int32_t *p) {
+umtx_atomic_dec(u_atomic_int32_t *p) {
int32_t retVal;
umtx_lock(&gIncDecMutex);
retVal = --(*p);
}
U_INTERNAL int32_t U_EXPORT2
-umtx_loadAcquire(atomic_int32_t &var) {
+umtx_loadAcquire(u_atomic_int32_t &var) {
int32_t val = var;
umtx_lock(&gIncDecMutex);
umtx_unlock(&gIncDecMutex);
}
U_INTERNAL void U_EXPORT2
-umtx_storeRelease(atomic_int32_t &var, int32_t val) {
+umtx_storeRelease(u_atomic_int32_t &var, int32_t val) {
umtx_lock(&gIncDecMutex);
umtx_unlock(&gIncDecMutex);
var = val;
struct UMutex;
struct UInitOnce;
+// Stringify macros, to allow #include of user supplied atomic & mutex files.
+#define U_MUTEX_STR(s) #s
+#define U_MUTEX_XSTR(s) U_MUTEX_STR(s)
/****************************************************************************
*
* Compiler dependent. Not operating system dependent.
*
****************************************************************************/
-#if U_HAVE_STD_ATOMICS
+#if defined (U_USER_ATOMICS_H)
+#include U_MUTEX_XSTR(U_USER_ATOMICS_H)
+
+#elif U_HAVE_STD_ATOMICS
// C++11 atomics are available.
#include <atomic>
-typedef std::atomic<int32_t> atomic_int32_t;
+typedef std::atomic<int32_t> u_atomic_int32_t;
#define ATOMIC_INT32_T_INITIALIZER(val) ATOMIC_VAR_INIT(val)
-inline int32_t umtx_loadAcquire(atomic_int32_t &var) {
+inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) {
return var.load(std::memory_order_acquire);
}
-inline void umtx_storeRelease(atomic_int32_t &var, int32_t val) {
+inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) {
var.store(val, std::memory_order_release);
}
-inline int32_t umtx_atomic_inc(atomic_int32_t *var) {
+inline int32_t umtx_atomic_inc(u_atomic_int32_t *var) {
return var->fetch_add(1) + 1;
}
-inline int32_t umtx_atomic_dec(atomic_int32_t *var) {
+inline int32_t umtx_atomic_dec(u_atomic_int32_t *var) {
return var->fetch_sub(1) - 1;
}
# endif
# include <windows.h>
-typedef volatile LONG atomic_int32_t;
+typedef volatile LONG u_atomic_int32_t;
#define ATOMIC_INT32_T_INITIALIZER(val) val
-inline int32_t umtx_loadAcquire(atomic_int32_t &var) {
+inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) {
return InterlockedCompareExchange(&var, 0, 0);
}
-inline void umtx_storeRelease(atomic_int32_t &var, int32_t val) {
+inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) {
InterlockedExchange(&var, val);
}
-inline int32_t umtx_atomic_inc(atomic_int32_t *var) {
+inline int32_t umtx_atomic_inc(u_atomic_int32_t *var) {
return InterlockedIncrement(var);
}
-inline int32_t umtx_atomic_dec(atomic_int32_t *var) {
+inline int32_t umtx_atomic_dec(u_atomic_int32_t *var) {
return InterlockedDecrement(var);
}
/*
* gcc atomic ops. These are available on several other compilers as well.
*/
-typedef int32_t atomic_int32_t;
+typedef int32_t u_atomic_int32_t;
#define ATOMIC_INT32_T_INITIALIZER(val) val
-inline int32_t umtx_loadAcquire(atomic_int32_t &var) {
+inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) {
int32_t val = var;
__sync_synchronize();
return val;
}
-inline void umtx_storeRelease(atomic_int32_t &var, int32_t val) {
+inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) {
__sync_synchronize();
var = val;
}
-inline int32_t umtx_atomic_inc(atomic_int32_t *p) {
+inline int32_t umtx_atomic_inc(u_atomic_int32_t *p) {
return __sync_add_and_fetch(p, 1);
}
-inline int32_t umtx_atomic_dec(atomic_int32_t *p) {
+inline int32_t umtx_atomic_dec(u_atomic_int32_t *p) {
return __sync_sub_and_fetch(p, 1);
}
#define U_NO_PLATFORM_ATOMICS
-typedef int32_t atomic_int32_t;
+typedef int32_t u_atomic_int32_t;
#define ATOMIC_INT32_T_INITIALIZER(val) val
-U_INTERNAL int32_t U_EXPORT2 umtx_loadAcquire(atomic_int32_t &var);
+U_INTERNAL int32_t U_EXPORT2 umtx_loadAcquire(u_atomic_int32_t &var);
-U_INTERNAL void U_EXPORT2 umtx_storeRelease(atomic_int32_t &var, int32_t val);
+U_INTERNAL void U_EXPORT2 umtx_storeRelease(u_atomic_int32_t &var, int32_t val);
-U_INTERNAL int32_t U_EXPORT2 umtx_atomic_inc(atomic_int32_t *p);
+U_INTERNAL int32_t U_EXPORT2 umtx_atomic_inc(u_atomic_int32_t *p);
-U_INTERNAL int32_t U_EXPORT2 umtx_atomic_dec(atomic_int32_t *p);
+U_INTERNAL int32_t U_EXPORT2 umtx_atomic_dec(u_atomic_int32_t *p);
#endif /* Low Level Atomic Ops Platfrom Chain */
*************************************************************************************************/
struct UInitOnce {
- atomic_int32_t fState;
+ u_atomic_int32_t fState;
UErrorCode fErrCode;
- void reset() {fState = 0; fState=0;};
+ void reset() {fState = 0;};
UBool isReset() {return umtx_loadAcquire(fState) == 0;};
// Note: isReset() is used by service registration code.
// Thread safety of this usage needs review.
U_CAPI UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce &);
-U_CAPI void U_EXPORT2 umtx_initImplPostInit(UInitOnce &, UBool success);
+U_CAPI void U_EXPORT2 umtx_initImplPostInit(UInitOnce &);
template<class T> void umtx_initOnce(UInitOnce &uio, T *obj, void (T::*fp)()) {
if (umtx_loadAcquire(uio.fState) == 2) {
}
if (umtx_initImplPreInit(uio)) {
(obj->*fp)();
- umtx_initImplPostInit(uio, TRUE);
+ umtx_initImplPostInit(uio);
}
}
}
if (umtx_initImplPreInit(uio)) {
(*fp)();
- umtx_initImplPostInit(uio, TRUE);
+ umtx_initImplPostInit(uio);
}
}
// We run the initialization.
(*fp)(errCode);
uio.fErrCode = errCode;
- umtx_initImplPostInit(uio, TRUE);
+ umtx_initImplPostInit(uio);
} else {
// Someone else already ran the initialization.
if (U_FAILURE(uio.fErrCode)) {
}
if (umtx_initImplPreInit(uio)) {
(*fp)(context);
- umtx_initImplPostInit(uio, TRUE);
+ umtx_initImplPostInit(uio);
}
}
// We run the initialization.
(*fp)(context, errCode);
uio.fErrCode = errCode;
- umtx_initImplPostInit(uio, TRUE);
+ umtx_initImplPostInit(uio);
} else {
// Someone else already ran the initialization.
if (U_FAILURE(uio.fErrCode)) {
*
*************************************************************************************************/
-#if U_PLATFORM_HAS_WIN32_API
+#if defined(U_USER_MUTEX_H)
+// #inlcude "U_USER_MUTEX_H"
+#include U_MUTEX_XSTR(U_USER_MUTEX_H)
+
+#elif U_PLATFORM_HAS_WIN32_API
/* Windows Definitions.
* Windows comes first in the platform chain.
void
UnicodeString::addRef()
-{ umtx_atomic_inc((atomic_int32_t *)fUnion.fFields.fArray - 1);}
+{ umtx_atomic_inc((u_atomic_int32_t *)fUnion.fFields.fArray - 1);}
int32_t
UnicodeString::removeRef()
-{ return umtx_atomic_dec((atomic_int32_t *)fUnion.fFields.fArray - 1);}
+{ return umtx_atomic_dec((u_atomic_int32_t *)fUnion.fFields.fArray - 1);}
int32_t
UnicodeString::refCount() const
// release the old array
if(flags & kRefCounted) {
// the array is refCounted; decrement and release if 0
- atomic_int32_t *pRefCount = ((atomic_int32_t *)oldArray - 1);
+ u_atomic_int32_t *pRefCount = ((u_atomic_int32_t *)oldArray - 1);
if(umtx_atomic_dec(pRefCount) == 0) {
if(pBufferToDelete == 0) {
- // Note: cast to (void *) is needed with MSVC, where atomic_int32_t
+ // Note: cast to (void *) is needed with MSVC, where u_atomic_int32_t
// is defined as volatile. (Volatile has useful non-standard behavior
// with this compiler.)
uprv_free((void *)pRefCount);
~RegularExpression();
int32_t fMagic;
RegexPattern *fPat;
- atomic_int32_t *fPatRefCount;
+ u_atomic_int32_t *fPatRefCount;
UChar *fPatString;
int32_t fPatStringLen;
RegexMatcher *fMatcher;
}
RegularExpression *re = new RegularExpression;
- atomic_int32_t *refC = (atomic_int32_t *)uprv_malloc(sizeof(int32_t));
+ u_atomic_int32_t *refC = (u_atomic_int32_t *)uprv_malloc(sizeof(int32_t));
UChar *patBuf = (UChar *)uprv_malloc(sizeof(UChar)*(actualPatLen+1));
if (re == NULL || refC == NULL || patBuf == NULL) {
*status = U_MEMORY_ALLOCATION_ERROR;
UErrorCode lengthStatus = U_ZERO_ERROR;
int32_t pattern16Length = utext_extract(pattern, 0, patternNativeLength, NULL, 0, &lengthStatus);
- atomic_int32_t *refC = (atomic_int32_t *)uprv_malloc(sizeof(int32_t));
+ u_atomic_int32_t *refC = (u_atomic_int32_t *)uprv_malloc(sizeof(int32_t));
UChar *patBuf = (UChar *)uprv_malloc(sizeof(UChar)*(pattern16Length+1));
if (re == NULL || refC == NULL || patBuf == NULL) {
*status = U_MEMORY_ALLOCATION_ERROR;
// we are done.
uint32_t fMemLimit; // Limit of available raw data space
- atomic_int32_t fRefCount;
+ u_atomic_int32_t fRefCount;
// Confusable data
int32_t *fCFUKeys;