#if ( configUSE_TICKLESS_IDLE != 0 )
+ static BaseType_t xHaveReadyTasks()
+ {
+ for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
+ {
+ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
+ {
+ return pdTRUE;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ return pdFALSE;
+ }
+
+
static TickType_t prvGetExpectedIdleTime( void )
{
TickType_t xReturn;
{
xReturn = 0;
}
- else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
+#if portNUM_PROCESSORS > 1
+ /* This function is called from Idle task; in single core case this
+ * means that no higher priority tasks are ready to run, and we can
+ * enter sleep. In SMP case, there might be ready tasks waiting for
+ * the other CPU, so need to check all ready lists.
+ */
+ else if( xHaveReadyTasks() )
+ {
+ xReturn = 0;
+ }
+#endif // portNUM_PROCESSORS > 1
+ else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > portNUM_PROCESSORS )
{
/* There are other idle priority tasks in the ready state. If
time slicing is used then the very next tick interrupt must be
#endif /* configUSE_IDLE_HOOK */
{
/* Call the esp-idf hook system */
- extern void esp_vApplicationIdleHook( void );
esp_vApplicationIdleHook();
}
#if ( configUSE_TICKLESS_IDLE != 0 )
{
TickType_t xExpectedIdleTime;
+ BaseType_t xEnteredSleep = pdFALSE;
/* It is not desirable to suspend then resume the scheduler on
each iteration of the idle task. Therefore, a preliminary
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
{
-// vTaskSuspendAll();
taskENTER_CRITICAL(&xTaskQueueMutex);
{
/* Now the scheduler is suspended, the expected idle
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
{
traceLOW_POWER_IDLE_BEGIN();
- portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
+ xEnteredSleep = portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
traceLOW_POWER_IDLE_END();
}
else
}
}
taskEXIT_CRITICAL(&xTaskQueueMutex);
-// ( void ) xTaskResumeAll();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
+ /* It might be possible to enter tickless idle again, so skip
+ * the fallback sleep hook if tickless idle was successful
+ */
+ if ( !xEnteredSleep )
+ {
+ esp_vApplicationWaitiHook();
+ }
}
+ #else
+ esp_vApplicationWaitiHook();
#endif /* configUSE_TICKLESS_IDLE */
}
}
pxTaskStatusArray[ uxTask ].eCurrentState = eState;
pxTaskStatusArray[ uxTask ].uxCurrentPriority = pxNextTCB->uxPriority;
+ #if ( configTASKLIST_INCLUDE_COREID == 1 )
+ pxTaskStatusArray[ uxTask ].xCoreID = pxNextTCB->xCoreID;
+ #endif /* configTASKLIST_INCLUDE_COREID */
+
#if ( INCLUDE_vTaskSuspend == 1 )
{
/* If the task is in the suspended list then there is a chance
static void prvDeleteTCB( TCB_t *pxTCB )
{
+ /* This call is required for any port specific cleanup related to task.
+ It must be above the vPortFree() calls. */
+ portCLEAN_UP_TCB( pxTCB );
+
/* Free up the memory allocated by the scheduler for the task. It is up
to the task to free any memory allocated at the application level. */
#if ( configUSE_NEWLIB_REENTRANT == 1 )
/* Neither the stack nor the TCB were allocated dynamically, so
nothing needs to be freed. */
configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
- portCLEAN_UP_TCB( pxTCB );
mtCOVERAGE_TEST_MARKER();
}
}
pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
/* Write the rest of the string. */
+#if configTASKLIST_INCLUDE_COREID
+ sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\t%hd\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber, ( int ) pxTaskStatusArray[ x ].xCoreID );
+#else
sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
+#endif
pcWriteBuffer += strlen( pcWriteBuffer );
}