INITIALIZE_ONCE(&InitThinMutex);
#endif /* _DEBUG */
+void ThinMutex::LockSlowPath(void)
+{
+ LockSlowPath(false);
+}
+
+void ThinMutex::LockSlowPath(bool make_native)
+{
+ unsigned int it = 0;
+
+#ifdef _WIN32
+# ifdef _WIN64
+ while (InterlockedCompareExchange64(&m_Data, THINLOCK_LOCKED, THINLOCK_UNLOCKED) != THINLOCK_UNLOCKED) {
+# else /* _WIN64 */
+ while (InterlockedCompareExchange(&m_Data, THINLOCK_LOCKED, THINLOCK_UNLOCKED) != THINLOCK_UNLOCKED) {
+# endif /* _WIN64 */
+#else /* _WIN32 */
+ while (!__sync_bool_compare_and_swap(&m_Data, THINLOCK_UNLOCKED, THINLOCK_LOCKED)) {
+#endif /* _WIN32 */
+ if (m_Data > THINLOCK_LOCKED) {
+ LockNative();
+ return;
+ }
+
+ make_native = true;
+
+ Spin(it);
+ it++;
+ }
+
+ if (make_native)
+ MakeNative();
+}
+
void ThinMutex::MakeNative(void)
{
boost::mutex *mtx = new boost::mutex();
inline void Lock(bool make_native = false)
{
- bool contended = false;
- unsigned int it = 0;
-
#ifdef _WIN32
# ifdef _WIN64
- while (InterlockedCompareExchange64(&m_Data, THINLOCK_LOCKED, THINLOCK_UNLOCKED) != THINLOCK_UNLOCKED) {
+ if (InterlockedCompareExchange64(&m_Data, THINLOCK_LOCKED, THINLOCK_UNLOCKED) != THINLOCK_UNLOCKED) {
# else /* _WIN64 */
- while (InterlockedCompareExchange(&m_Data, THINLOCK_LOCKED, THINLOCK_UNLOCKED) != THINLOCK_UNLOCKED) {
+ if (InterlockedCompareExchange(&m_Data, THINLOCK_LOCKED, THINLOCK_UNLOCKED) != THINLOCK_UNLOCKED) {
# endif /* _WIN64 */
#else /* _WIN32 */
- while (!__sync_bool_compare_and_swap(&m_Data, THINLOCK_UNLOCKED, THINLOCK_LOCKED)) {
+ if (!__sync_bool_compare_and_swap(&m_Data, THINLOCK_UNLOCKED, THINLOCK_LOCKED)) {
#endif /* _WIN32 */
- if (m_Data > THINLOCK_LOCKED) {
- LockNative();
- return;
- }
-
- contended = true;
-
- Spin(it);
- it++;
+ LockSlowPath();
}
-
- if (contended || make_native)
- MakeNative();
}
+ void LockSlowPath(void);
+ void LockSlowPath(bool make_native);
+
inline void Unlock(void)
{
#ifdef _WIN32
inline void Inflate(void)
{
- Lock(true);
+ LockSlowPath(true);
Unlock();
}