]> granicus.if.org Git - esp-idf/commitdiff
freertos: use xTaskQueueMutex to protect tick count
authorIvan Grokhotkov <ivan@espressif.com>
Fri, 12 Oct 2018 06:18:49 +0000 (14:18 +0800)
committerbot <bot@espressif.com>
Mon, 5 Nov 2018 03:08:26 +0000 (03:08 +0000)
Having two different spinlocks is problematic due to possibly
different order in which the locks will be taken. Changing the order
would require significant restructuring of kernel code which is
undesirable.

An additional place where taking xTickCountMutex was needed was in
vApplicationSleep function. Not taking xTickCountMutex resulted in
other CPU sometimes possibly advancing tick count while light sleep
entry/exit was happening. Taking xTickCountMutex in addition to
xTaskQueueMutex has shown a problem that in different code paths,
these two spinlocks could be taken in different order, leading to
(unlikely, but possible) deadlocks.

components/freertos/tasks.c

index 0f42f3c474acda1a25f8bc0d9394da881a406167..21cd1b5e99d9143c679eeaf800517f0d6b1c6ddf 100644 (file)
@@ -302,10 +302,8 @@ when the scheduler is unsuspended.  The pending ready list itself can only be
 accessed from a critical section. */
 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
 
-/* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
-  that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
+/* We use just one spinlock for all the critical sections. */
 PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
-PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
 
 #if ( configGENERATE_RUN_TIME_STATS == 1 )
 
@@ -1346,9 +1344,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
                {
                        /* Minor optimisation.  The tick count cannot change in this
                        block. */
-//                     portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
                        const TickType_t xConstTickCount = xTickCount;
-//                     portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
 
                        /* Generate the tick time at which the task wants to wake. */
                        xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
@@ -1455,9 +1451,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
 
                                /* Calculate the time to wake - this may overflow but this is
                                not a problem. */
-//                             portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
                                xTimeToWake = xTickCount + xTicksToDelay;
-//                             portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
 
                                /* We must remove ourselves from the ready list before adding
                                ourselves to the blocked list as the same list item is used for
@@ -2200,9 +2194,7 @@ void vTaskSuspendAll( void )
                }
                else
                {
-                       portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
                        xReturn = xNextTaskUnblockTime - xTickCount;
-                       portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
                }
                taskEXIT_CRITICAL(&xTaskQueueMutex);
 
@@ -2308,31 +2300,13 @@ BaseType_t xAlreadyYielded = pdFALSE;
 
 TickType_t xTaskGetTickCount( void )
 {
-TickType_t xTicks;
-
-       /* Critical section required if running on a 16 bit processor. */
-       portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
-       {
-               xTicks = xTickCount;
-       }
-       portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
-
-       return xTicks;
+       return xTickCount;
 }
 /*-----------------------------------------------------------*/
 
 TickType_t xTaskGetTickCountFromISR( void )
 {
-TickType_t xReturn;
-
-       taskENTER_CRITICAL_ISR(&xTickCountMutex);
-       {
-               xReturn = xTickCount;
-//             vPortCPUReleaseMutex( &xTickCountMutex );
-       }
-       taskEXIT_CRITICAL_ISR(&xTickCountMutex);
-
-       return xReturn;
+       return xTickCount;
 }
 /*-----------------------------------------------------------*/
 
@@ -2467,10 +2441,10 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than
                /* Correct the tick count value after a period during which the tick
                was suppressed.  Note this does *not* call the tick hook function for
                each stepped tick. */
-               portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
+               portENTER_CRITICAL( &xTaskQueueMutex );
                configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
                xTickCount += xTicksToJump;
-               portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
+               portEXIT_CRITICAL( &xTaskQueueMutex );
                traceINCREASE_TICK_COUNT( xTicksToJump );
        }
 
@@ -2512,14 +2486,10 @@ BaseType_t xSwitchRequired = pdFALSE;
 
        if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
        {
-               portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
+               taskENTER_CRITICAL_ISR( &xTaskQueueMutex );
                /* Increment the RTOS tick, switching the delayed and overflowed
                delayed lists if it wraps to 0. */
                ++xTickCount;
-               portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
-
-               //The other CPU may decide to mess with the task queues, so this needs a mux.
-               taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
                {
                        /* Minor optimisation.  The tick count cannot change in this
                        block. */
@@ -3288,7 +3258,7 @@ BaseType_t xReturn;
        configASSERT( pxTimeOut );
        configASSERT( pxTicksToWait );
 
-       taskENTER_CRITICAL(&xTickCountMutex);
+       taskENTER_CRITICAL(&xTaskQueueMutex);
        {
                /* Minor optimisation.  The tick count cannot change in this block. */
                const TickType_t xConstTickCount = xTickCount;
@@ -3324,7 +3294,7 @@ BaseType_t xReturn;
                        xReturn = pdTRUE;
                }
        }
-       taskEXIT_CRITICAL(&xTickCountMutex);
+       taskEXIT_CRITICAL(&xTaskQueueMutex);
 
        return xReturn;
 }
@@ -4081,7 +4051,7 @@ TCB_t *pxTCB;
        {
        TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
 
-               taskENTER_CRITICAL(&xTickCountMutex);
+               taskENTER_CRITICAL(&xTaskQueueMutex);
                /* If the mutex was given back by an interrupt while the queue was
                locked then the mutex holder might now be NULL. */
                if( pxMutexHolder != NULL )
@@ -4138,7 +4108,7 @@ TCB_t *pxTCB;
                        mtCOVERAGE_TEST_MARKER();
                }
 
-               taskEXIT_CRITICAL(&xTickCountMutex);
+               taskEXIT_CRITICAL(&xTaskQueueMutex);
 
        }
 
@@ -4151,7 +4121,7 @@ TCB_t *pxTCB;
        {
        TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
        BaseType_t xReturn = pdFALSE;
-               taskENTER_CRITICAL(&xTickCountMutex);
+               taskENTER_CRITICAL(&xTaskQueueMutex);
 
                if( pxMutexHolder != NULL )
                {
@@ -4215,7 +4185,7 @@ TCB_t *pxTCB;
                        mtCOVERAGE_TEST_MARKER();
                }
 
-               taskEXIT_CRITICAL(&xTickCountMutex);
+               taskEXIT_CRITICAL(&xTaskQueueMutex);
                return xReturn;
        }