the ISR will cause it to switch _away_ from it. portYIELD_FROM_ISR will probably just schedule the task again, but have to check that.
*/
static void esp_crosscore_isr(void *arg) {
- volatile uint32_t myReasonVal;
+ volatile uint32_t myReasonVal;
#if 0
- //A pointer to the correct reason array item is passed to this ISR.
- volatile uint32_t *myReason=arg;
+ //A pointer to the correct reason array item is passed to this ISR.
+ volatile uint32_t *myReason=arg;
#else
- //Does not work yet, the interrupt code needs work to understand two separate interrupt and argument
- //tables...
- volatile uint32_t *myReason=&reason[xPortGetCoreID()];
+ //Does not work yet, the interrupt code needs work to understand two separate interrupt and argument
+ //tables...
+ volatile uint32_t *myReason=&reason[xPortGetCoreID()];
#endif
- //Clear the interrupt first.
- if (xPortGetCoreID()==0) {
- WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
- } else {
- WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
- }
- //Grab the reason and clear it.
- portENTER_CRITICAL(&reasonSpinlock);
- myReasonVal=*myReason;
- *myReason=0;
- portEXIT_CRITICAL(&reasonSpinlock);
+ //Clear the interrupt first.
+ if (xPortGetCoreID()==0) {
+ WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
+ } else {
+ WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
+ }
+ //Grab the reason and clear it.
+ portENTER_CRITICAL(&reasonSpinlock);
+ myReasonVal=*myReason;
+ *myReason=0;
+ portEXIT_CRITICAL(&reasonSpinlock);
- //Check what we need to do.
- if (myReasonVal&REASON_YIELD) {
- portYIELD_FROM_ISR();
- }
-
- ets_printf("recv yield\n");
+ //Check what we need to do.
+ if (myReasonVal&REASON_YIELD) {
+ portYIELD_FROM_ISR();
+ }
}
//Initialize the crosscore interrupt on this core. Call this once
//on each active core.
void esp_crosscore_int_init() {
- portENTER_CRITICAL(&reasonSpinlock);
- ets_printf("init cpu %d\n", xPortGetCoreID());
- reason[xPortGetCoreID()]=0;
- portEXIT_CRITICAL(&reasonSpinlock);
- ESP_INTR_DISABLE(ETS_FROM_CPU_INUM);
- if (xPortGetCoreID()==0) {
- intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM);
- } else {
- intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM);
- }
- xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]);
- ESP_INTR_ENABLE(ETS_FROM_CPU_INUM);
+ portENTER_CRITICAL(&reasonSpinlock);
+ reason[xPortGetCoreID()]=0;
+ portEXIT_CRITICAL(&reasonSpinlock);
+ ESP_INTR_DISABLE(ETS_FROM_CPU_INUM);
+ if (xPortGetCoreID()==0) {
+ intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM);
+ } else {
+ intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM);
+ }
+ xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]);
+ ESP_INTR_ENABLE(ETS_FROM_CPU_INUM);
}
void esp_crosscore_int_send_yield(int coreId) {
- ets_printf("send yield\n");
- assert(coreId<portNUM_PROCESSORS);
- //Mark the reason we interrupt the other CPU
- portENTER_CRITICAL(&reasonSpinlock);
- reason[coreId]|=REASON_YIELD;
- portEXIT_CRITICAL(&reasonSpinlock);
- //Poke the other CPU.
- if (coreId==0) {
- WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
- } else {
- WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
- }
+ assert(coreId<portNUM_PROCESSORS);
+ //Mark the reason we interrupt the other CPU
+ portENTER_CRITICAL(&reasonSpinlock);
+ reason[coreId]|=REASON_YIELD;
+ portEXIT_CRITICAL(&reasonSpinlock);
+ //Poke the other CPU.
+ if (coreId==0) {
+ WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
+ } else {
+ WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
+ }
}
PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
-PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
+PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
#if ( INCLUDE_vTaskDelete == 1 )
* is possible that it is inaccurate because the other CPU just did a task switch, but in that case
* at most a superfluous interrupt is generated.
*/
-static void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
+void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
{
BaseType_t i;
if (xCoreID != tskNO_AFFINITY) {
{
/* Scheduler is running. If the created task is of a higher priority than an executing task
then it should run now.
- ToDo: This only works for the current core. If a task is scheduled on an other processor,
- the other processor will keep running the task it's working on, and only switch to the newer
- task on a timer interrupt. */
- //No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
- if( xCoreID != xPortGetCoreID() ) {
- taskYIELD_OTHER_CORE(xCoreID, uxPriority);
- }
- else if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority )
+ No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
+ */
+ if( tskCAN_RUN_HERE( xCoreID ) && pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority )
{
taskYIELD_IF_USING_PREEMPTION();
}
+ else if( xCoreID != xPortGetCoreID() ) {
+ taskYIELD_OTHER_CORE(xCoreID, uxPriority);
+ }
else
{
mtCOVERAGE_TEST_MARKER();
if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE )
{
/* Has the task already been resumed from within an ISR? */
- if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
+ if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE )
{
/* Is it in the suspended list because it is in the Suspended
state, or because is is blocked with no timeout? */
#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
-/* ToDo: Make this multicore-compatible. */
BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
{
BaseType_t xYieldRequired = pdFALSE;
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxTCB );
- if ( pxTCB->xCoreID == xPortGetCoreID() )
+ if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
- taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
+ xYieldRequired = pdTRUE;
}
- else if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
+ else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
- xYieldRequired = pdTRUE;
+ taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
}
else
{
/* The delayed or ready lists cannot be accessed so the task
is held in the pending ready list until the scheduler is
unsuspended. */
- vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
+ vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
}
}
else
{
/* Move any readied tasks from the pending list into the
appropriate ready list. */
- while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
+ while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE )
{
- pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) );
+ pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) );
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxTCB );
xYieldPending[xPortGetCoreID()] = pdTRUE;
break;
}
- else if ( pxTCB->xCoreID != xPortGetCoreID() )
- {
- taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
- }
else
{
mtCOVERAGE_TEST_MARKER();
/* The delayed and ready lists cannot be accessed, so hold this task
pending until the scheduler is resumed. */
taskENTER_CRITICAL(&xTaskQueueMutex);
- vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
+ vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxUnblockedTCB->xEventListItem ) );
taskEXIT_CRITICAL(&xTaskQueueMutex);
}
if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
- /* We can schedule the awoken task on this CPU. */
- xYieldPending[xPortGetCoreID()] = pdTRUE;
+ /* Return true if the task removed from the event list has a higher
+ priority than the calling task. This allows the calling task to know if
+ it should force a context switch now. */
xReturn = pdTRUE;
+
+ /* Mark that a yield is pending in case the user is not using the
+ "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
+ xYieldPending[ xPortGetCoreID() ] = pdTRUE;
}
else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
{
if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
- /* We can schedule the awoken task on this CPU. */
- xYieldPending[xPortGetCoreID()] = pdTRUE;
+ /* Return true if the task removed from the event list has
+ a higher priority than the calling task. This allows
+ the calling task to know if it should force a context
+ switch now. */
xReturn = pdTRUE;
+
+ /* Mark that a yield is pending in case the user is not using the
+ "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
+ xYieldPending[ xPortGetCoreID() ] = pdTRUE;
}
else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
{
eSleepModeStatus eReturn = eStandardSleep;
taskENTER_CRITICAL(&xTaskQueueMutex);
- if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
+ if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
{
/* A task was made ready while the scheduler was suspended. */
eReturn = eAbortSleep;
vListInitialise( &xDelayedTaskList1 );
vListInitialise( &xDelayedTaskList2 );
- vListInitialise( &xPendingReadyList );
+ vListInitialise( &xPendingReadyList[ xPortGetCoreID() ] );
#if ( INCLUDE_vTaskDelete == 1 )
{
{
/* The delayed and ready lists cannot be accessed, so hold
this task pending until the scheduler is resumed. */
- vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
+ vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
}
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
/* The delayed and ready lists cannot be accessed, so hold
this task pending until the scheduler is resumed. */
- vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
+ vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
}
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )