accessed from a critical section. */
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
-/* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
- that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
+/* We use just one spinlock for all the critical sections. */
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
-PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
#if ( configGENERATE_RUN_TIME_STATS == 1 )
{
/* Minor optimisation. The tick count cannot change in this
block. */
-// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
const TickType_t xConstTickCount = xTickCount;
-// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
/* Generate the tick time at which the task wants to wake. */
xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
/* Calculate the time to wake - this may overflow but this is
not a problem. */
-// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
xTimeToWake = xTickCount + xTicksToDelay;
-// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
/* We must remove ourselves from the ready list before adding
ourselves to the blocked list as the same list item is used for
}
else
{
- portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
xReturn = xNextTaskUnblockTime - xTickCount;
- portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
}
taskEXIT_CRITICAL(&xTaskQueueMutex);
TickType_t xTaskGetTickCount( void )
{
-TickType_t xTicks;
-
- /* Critical section required if running on a 16 bit processor. */
- portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
- {
- xTicks = xTickCount;
- }
- portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
-
- return xTicks;
+ return xTickCount;
}
/*-----------------------------------------------------------*/
TickType_t xTaskGetTickCountFromISR( void )
{
-TickType_t xReturn;
-
- taskENTER_CRITICAL_ISR(&xTickCountMutex);
- {
- xReturn = xTickCount;
-// vPortCPUReleaseMutex( &xTickCountMutex );
- }
- taskEXIT_CRITICAL_ISR(&xTickCountMutex);
-
- return xReturn;
+ return xTickCount;
}
/*-----------------------------------------------------------*/
/* Correct the tick count value after a period during which the tick
was suppressed. Note this does *not* call the tick hook function for
each stepped tick. */
- portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
+ portENTER_CRITICAL( &xTaskQueueMutex );
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
xTickCount += xTicksToJump;
- portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
+ portEXIT_CRITICAL( &xTaskQueueMutex );
traceINCREASE_TICK_COUNT( xTicksToJump );
}
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
{
- portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
+ taskENTER_CRITICAL_ISR( &xTaskQueueMutex );
/* Increment the RTOS tick, switching the delayed and overflowed
delayed lists if it wraps to 0. */
++xTickCount;
- portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
-
- //The other CPU may decide to mess with the task queues, so this needs a mux.
- taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
{
/* Minor optimisation. The tick count cannot change in this
block. */
configASSERT( pxTimeOut );
configASSERT( pxTicksToWait );
- taskENTER_CRITICAL(&xTickCountMutex);
+ taskENTER_CRITICAL(&xTaskQueueMutex);
{
/* Minor optimisation. The tick count cannot change in this block. */
const TickType_t xConstTickCount = xTickCount;
xReturn = pdTRUE;
}
}
- taskEXIT_CRITICAL(&xTickCountMutex);
+ taskEXIT_CRITICAL(&xTaskQueueMutex);
return xReturn;
}
{
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
- taskENTER_CRITICAL(&xTickCountMutex);
+ taskENTER_CRITICAL(&xTaskQueueMutex);
/* If the mutex was given back by an interrupt while the queue was
locked then the mutex holder might now be NULL. */
if( pxMutexHolder != NULL )
mtCOVERAGE_TEST_MARKER();
}
- taskEXIT_CRITICAL(&xTickCountMutex);
+ taskEXIT_CRITICAL(&xTaskQueueMutex);
}
{
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
BaseType_t xReturn = pdFALSE;
- taskENTER_CRITICAL(&xTickCountMutex);
+ taskENTER_CRITICAL(&xTaskQueueMutex);
if( pxMutexHolder != NULL )
{
mtCOVERAGE_TEST_MARKER();
}
- taskEXIT_CRITICAL(&xTickCountMutex);
+ taskEXIT_CRITICAL(&xTaskQueueMutex);
return xReturn;
}