U_CAPI UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce &uio) {
for (;;) {
- int32_t previousState = InterlockedCompareExchange(
+ int32_t previousState = InterlockedCompareExchange(
+#if (U_PLATFORM == U_PF_MINGW) || (U_PLATFORM == U_PF_CYGWIN)
+ (LONG volatile *) // this is the type given in the API doc for this function.
+#endif
&uio.fState, // Destination
1, // Exchange Value
0); // Compare value
//
//-------------------------------------------------------------------------------------------
-# include <pthread.h>
+# include <pthread.h>
// Each UMutex consists of a pthread_mutex_t.
// All are statically initialized and ready for use.
// This function is called by the thread that ran an initialization function,
// just after completing the function.
// Some threads may be waiting on the condition, requiring the broadcast wakeup.
-// Some threads may be racing to test the fState variable outside of the mutex,
+// Some threads may be racing to test the fState variable outside of the mutex,
// requiring the use of store/release when changing its value.
//
// success: True: the inialization succeeded. No further calls to the init
// Not a thread safe function, we can use an ordinary assignment.
uio.fState = 0;
}
-
+
// End of POSIX specific umutex implementation.
#else // Platform #define chain.
//
//--------------------------------------------------------------------------
-U_DEPRECATED void U_EXPORT2
-u_setMutexFunctions(const void * /*context */, UMtxInitFn *, UMtxFn *,
+U_DEPRECATED void U_EXPORT2
+u_setMutexFunctions(const void * /*context */, UMtxInitFn *, UMtxFn *,
UMtxFn *, UMtxFn *, UErrorCode *status) {
if (U_SUCCESS(*status)) {
*status = U_UNSUPPORTED_ERROR;
}
return;
}
-
/****************************************************************************
*
- * Low Level Atomic Operations.
+ * Low Level Atomic Operations.
* Compiler dependent. Not operating system dependent.
*
****************************************************************************/
inline int32_t umtx_loadAcquire(atomic_int32_t &var) {
return var.load(std::memory_order_acquire);
-};
+}
inline void umtx_storeRelease(atomic_int32_t &var, int32_t val) {
var.store(val, std::memory_order_release);
-};
+}
inline int32_t umtx_atomic_inc(atomic_int32_t *var) {
return var->fetch_add(1) + 1;
}
-
+
inline int32_t umtx_atomic_dec(atomic_int32_t *var) {
return var->fetch_sub(1) - 1;
}
-
+
#elif U_PLATFORM_HAS_WIN32_API
inline void umtx_initOnce(UInitOnce &uio, void (*fp)(UErrorCode &), UErrorCode &errCode) {
if (U_FAILURE(errCode)) {
return;
- }
+ }
if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) {
// We run the initialization.
(*fp)(errCode);
template<class T> void umtx_initOnce(UInitOnce &uio, void (*fp)(T, UErrorCode &), T context, UErrorCode &errCode) {
if (U_FAILURE(errCode)) {
return;
- }
+ }
if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) {
// We run the initialization.
(*fp)(context, errCode);
/* For CRITICAL_SECTION */
/*
- * Note: there is an earlier include of windows.h in this file, but it is in
+ * Note: there is an earlier include of windows.h in this file, but it is in
* different conditionals.
* This one is needed if we are using C++11 for atomic ops, but
* win32 APIs for Critical Sections.
*/
-
+
# define WIN32_LEAN_AND_MEAN
# define VC_EXTRALEAN
# define NOUSER
#else
-/*
- * Unknow platform type.
+/*
+ * Unknow platform type.
* This is an error condition. ICU requires mutexes.
*/
#endif
-
+
/**************************************************************************************
*
* Mutex Implementation function declaratations.
* the global ICU mutex. Recursive locks are an error
* and may cause a deadlock on some platforms.
*/
-U_INTERNAL void U_EXPORT2 umtx_lock(UMutex* mutex);
+U_INTERNAL void U_EXPORT2 umtx_lock(UMutex* mutex);
/* Unlock a mutex.
* @param mutex The given mutex to be unlocked. Pass NULL to specify