2 FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
7 This file is part of the FreeRTOS distribution.
9 FreeRTOS is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License (version 2) as published by the
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
13 ***************************************************************************
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
15 >>! distribute a combined work that includes FreeRTOS without being !<<
16 >>! obliged to provide the source code for proprietary components !<<
17 >>! outside of the FreeRTOS kernel. !<<
18 ***************************************************************************
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
23 link: http://www.freertos.org/a00114.html
25 ***************************************************************************
27 * FreeRTOS provides completely free yet professionally developed, *
28 * robust, strictly quality controlled, supported, and cross *
29 * platform software that is more than just the market leader, it *
30 * is the industry's de facto standard. *
32 * Help yourself get started quickly while simultaneously helping *
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
34 * tutorial book, reference manual, or both: *
35 * http://www.FreeRTOS.org/Documentation *
37 ***************************************************************************
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
40 the FAQ page "My application does not run, what could be wrong?". Have you
41 defined configASSERT()?
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
44 embedded software for free we request you assist our global community by
45 participating in the support forum.
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
48 be as productive as possible as early as possible. Now you can receive
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
50 Ltd, and the world's leading authority on the world's leading RTOS.
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
61 licenses offer ticketed support, indemnification and commercial middleware.
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
64 engineered and independently SIL3 certified version for use in safety and
65 mission critical applications that require provable dependability.
70 /* Standard includes. */
74 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
75 all the API functions to use the MPU wrappers. That should only be done when
76 task.h is included from an application file. */
77 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
79 #include "rom/ets_sys.h"
80 #include "esp_newlib.h"
81 #include "esp_panic.h"
83 /* FreeRTOS includes. */
87 #include "StackMacros.h"
88 #include "portmacro.h"
91 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
92 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
93 header files above, but not in this file, in order to generate the correct
94 privileged Vs unprivileged linkage and placement. */
95 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
97 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
98 functions but without including stdio.h here. */
99 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
100 /* At the bottom of this file are two optional functions that can be used
101 to generate human readable text from the raw data generated by the
102 uxTaskGetSystemState() function. Note the formatting functions are provided
103 for convenience only, and are NOT considered part of the kernel. */
105 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
107 /* Sanity check the configuration. */
108 #if configUSE_TICKLESS_IDLE != 0
109 #if INCLUDE_vTaskSuspend != 1
110 #error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0
111 #endif /* INCLUDE_vTaskSuspend */
112 #endif /* configUSE_TICKLESS_IDLE */
115 * Defines the size, in bytes, of the stack allocated to the idle task.
117 #define tskIDLE_STACK_SIZE configIDLE_TASK_STACK_SIZE
119 #if( configUSE_PREEMPTION == 0 )
120 /* If the cooperative scheduler is being used then a yield should not be
121 performed just because a higher priority task has been woken. */
122 #define taskYIELD_IF_USING_PREEMPTION()
124 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
130 /* Value that can be assigned to the eNotifyState member of the TCB. */
133 eNotWaitingNotification = 0,
134 eWaitingNotification,
138 /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using
139 dynamically allocated RAM, in which case when any task is deleted it is known
140 that both the task's stack and TCB need to be freed. Sometimes the
141 FreeRTOSConfig.h settings only allow a task to be created using statically
142 allocated RAM, in which case when any task is deleted it is known that neither
143 the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h
144 settings allow a task to be created using either statically or dynamically
145 allocated RAM, in which case a member of the TCB is used to record whether the
146 stack and/or TCB were allocated statically or dynamically, so when a task is
147 deleted the RAM that was allocated dynamically is freed again and no attempt is
148 made to free the RAM that was allocated statically.
149 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a
150 task to be created using either statically or dynamically allocated RAM. Note
151 that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with
152 a statically allocated stack and a dynamically allocated TCB. */
153 #define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) || ( portUSING_MPU_WRAPPERS == 1 ) )
154 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
155 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
156 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
159 * Task control block. A task control block (TCB) is allocated for each task,
160 * and stores task state information, including a pointer to the task's context
161 * (the task's run time environment, including register values)
163 typedef struct tskTaskControlBlock
165 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
167 #if ( portUSING_MPU_WRAPPERS == 1 )
168 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
171 ListItem_t xGenericListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
172 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
173 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
174 StackType_t *pxStack; /*< Points to the start of the stack. */
175 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
176 BaseType_t xCoreID; /*< Core this task is pinned to */
177 /* If this moves around (other than pcTaskName size changes), please change the define in xtensa_vectors.S as well. */
178 #if ( portSTACK_GROWTH > 0 || configENABLE_TASK_SNAPSHOT == 1 )
179 StackType_t *pxEndOfStack; /*< Points to the end of the stack on architectures where the stack grows up from low memory. */
182 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
183 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
184 uint32_t uxOldInterruptState; /*< Interrupt state before the outer taskEnterCritical was called */
187 #if ( configUSE_TRACE_FACILITY == 1 )
188 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
189 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
192 #if ( configUSE_MUTEXES == 1 )
193 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
194 UBaseType_t uxMutexesHeld;
197 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
198 TaskHookFunction_t pxTaskTag;
201 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
202 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
203 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
204 TlsDeleteCallbackFunction_t pvThreadLocalStoragePointersDelCallback[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
208 #if ( configGENERATE_RUN_TIME_STATS == 1 )
209 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
212 #if ( configUSE_NEWLIB_REENTRANT == 1 )
213 /* Allocate a Newlib reent structure that is specific to this task.
214 Note Newlib support has been included by popular demand, but is not
215 used by the FreeRTOS maintainers themselves. FreeRTOS is not
216 responsible for resulting newlib operation. User must be familiar with
217 newlib and must provide system-wide implementations of the necessary
218 stubs. Be warned that (at the time of writing) the current newlib design
219 implements a system-wide malloc() that must be provided with locks. */
220 struct _reent xNewLib_reent;
223 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
224 volatile uint32_t ulNotifiedValue;
225 volatile eNotifyValue eNotifyState;
228 /* See the comments above the definition of
229 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
230 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
231 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
236 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
237 below to enable the use of older kernel aware debuggers. */
238 typedef tskTCB TCB_t;
240 #if __GNUC_PREREQ(4, 6)
241 _Static_assert(sizeof(StaticTask_t) == sizeof(TCB_t), "StaticTask_t != TCB_t");
245 * Some kernel aware debuggers require the data the debugger needs access to to
246 * be global, rather than file scope.
248 #ifdef portREMOVE_STATIC_QUALIFIER
252 /*lint -e956 A manual analysis and inspection has been used to determine which
253 static variables must be declared volatile. */
255 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB[ portNUM_PROCESSORS ] = { NULL };
257 /* Lists for ready and blocked tasks. --------------------*/
258 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
259 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
260 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
261 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
262 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
263 PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
265 #if ( INCLUDE_vTaskDelete == 1 )
267 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. Protected by xTaskQueueMutex.*/
268 PRIVILEGED_DATA static volatile UBaseType_t uxTasksDeleted = ( UBaseType_t ) 0U;
272 #if ( INCLUDE_vTaskSuspend == 1 )
274 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
278 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
280 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[portNUM_PROCESSORS] = {NULL}; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
284 /* Other file private variables. --------------------------------*/
285 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
286 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) 0U;
287 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
288 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
289 PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
290 PRIVILEGED_DATA static volatile BaseType_t xYieldPending[portNUM_PROCESSORS] = {pdFALSE};
291 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
292 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
293 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = portMAX_DELAY;
295 /* Context switches are held pending while the scheduler is suspended. Also,
296 interrupts must not manipulate the xGenericListItem of a TCB, or any of the
297 lists the xGenericListItem can be referenced from, if the scheduler is suspended.
298 If an interrupt needs to unblock a task while the scheduler is suspended then it
299 moves the task's event list item into the xPendingReadyList, ready for the
300 kernel to move the task from the pending ready list into the real ready list
301 when the scheduler is unsuspended. The pending ready list itself can only be
302 accessed from a critical section. */
303 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
305 /* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
306 that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
307 PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
308 PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
310 #if ( configGENERATE_RUN_TIME_STATS == 1 )
312 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime[portNUM_PROCESSORS] = {0U}; /*< Holds the value of a timer/counter the last time a task was switched in on a particular core. */
313 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
318 // per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules
319 // in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting
320 // for locks to be free or for host to read full trace buffer
321 PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ portNUM_PROCESSORS ] = { pdFALSE };
325 /* Debugging and trace facilities private variables and macros. ------------*/
328 * The value used to fill the stack of a task when the task is created. This
329 * is used purely for checking the high water mark for tasks.
331 #define tskSTACK_FILL_BYTE ( 0xa5U )
334 * Macros used by vListTask to indicate which state a task is in.
336 #define tskBLOCKED_CHAR ( 'B' )
337 #define tskREADY_CHAR ( 'R' )
338 #define tskDELETED_CHAR ( 'D' )
339 #define tskSUSPENDED_CHAR ( 'S' )
341 /*-----------------------------------------------------------*/
344 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
346 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
347 performed in a generic way that is not optimised to any particular
348 microcontroller architecture. */
350 /* uxTopReadyPriority holds the priority of the highest priority ready
352 #define taskRECORD_READY_PRIORITY( uxPriority ) \
354 if( ( uxPriority ) > uxTopReadyPriority ) \
356 uxTopReadyPriority = ( uxPriority ); \
358 } /* taskRECORD_READY_PRIORITY */
360 /*-----------------------------------------------------------*/
362 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
364 /* Find the highest priority queue that contains ready tasks. */ \
365 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopReadyPriority ] ) ) ) \
367 configASSERT( uxTopReadyPriority ); \
368 --uxTopReadyPriority; \
371 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
372 the same priority get an equal share of the processor time. */ \
373 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopReadyPriority ] ) ); \
374 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
376 /*-----------------------------------------------------------*/
378 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
379 they are only required when a port optimised method of task selection is
381 #define taskRESET_READY_PRIORITY( uxPriority )
382 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
384 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
386 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
387 performed in a way that is tailored to the particular microcontroller
388 architecture being used. */
390 /* A port optimised version is provided. Call the port defined macros. */
391 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
393 /*-----------------------------------------------------------*/
395 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
397 UBaseType_t uxTopPriority; \
399 /* Find the highest priority queue that contains ready tasks. */ \
400 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
401 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
402 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopPriority ] ) ); \
403 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
405 /*-----------------------------------------------------------*/
407 /* A port optimised version is provided, call it only if the TCB being reset
408 is being referenced from a ready list. If it is referenced from a delayed
409 or suspended list then it won't be in a ready list. */
410 #define taskRESET_READY_PRIORITY( uxPriority ) \
412 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
414 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
418 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
420 /*-----------------------------------------------------------*/
422 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
424 #define taskSWITCH_DELAYED_LISTS() \
428 /* The delayed tasks list should be empty when the lists are switched. */ \
429 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
431 pxTemp = pxDelayedTaskList; \
432 pxDelayedTaskList = pxOverflowDelayedTaskList; \
433 pxOverflowDelayedTaskList = pxTemp; \
435 prvResetNextTaskUnblockTime(); \
438 /*-----------------------------------------------------------*/
441 * Place the task represented by pxTCB into the appropriate ready list for
442 * the task. It is inserted at the end of the list.
444 #define prvAddTaskToReadyList( pxTCB ) \
445 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
446 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
447 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
449 * Place the task represented by pxTCB which has been in a ready list before
450 * into the appropriate ready list for the task.
451 * It is inserted at the end of the list.
453 #define prvReaddTaskToReadyList( pxTCB ) \
454 traceREADDED_TASK_TO_READY_STATE( pxTCB ); \
455 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
456 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
457 /*-----------------------------------------------------------*/
459 #define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
462 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
463 * where NULL is used to indicate that the handle of the currently executing
464 * task should be used in place of the parameter. This macro simply checks to
465 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
467 /* ToDo: See if this still works for multicore. */
468 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? ( TCB_t * ) xTaskGetCurrentTaskHandle() : ( TCB_t * ) ( pxHandle ) )
470 /* The item value of the event list item is normally used to hold the priority
471 of the task to which it belongs (coded to allow it to be held in reverse
472 priority order). However, it is occasionally borrowed for other purposes. It
473 is important its value is not updated due to a task priority change while it is
474 being used for another purpose. The following bit definition is used to inform
475 the scheduler that the value should not be changed - in which case it is the
476 responsibility of whichever module is using the value to ensure it gets set back
477 to its original value when it is released. */
478 #if configUSE_16_BIT_TICKS == 1
479 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
481 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
484 /* Callback function prototypes. --------------------------*/
485 #if configCHECK_FOR_STACK_OVERFLOW > 0
486 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
489 #if configUSE_TICK_HOOK > 0
490 extern void vApplicationTickHook( void );
492 extern void esp_vApplicationTickHook( void );
494 #if portFIRST_TASK_HOOK
495 extern void vPortFirstTaskHook(TaskFunction_t taskfn);
499 /* File private functions. --------------------------------*/
502 * Utility task that simply returns pdTRUE if the task referenced by xTask is
503 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
504 * is in any other state.
506 * Caller must hold xTaskQueueMutex before calling this function.
508 #if ( INCLUDE_vTaskSuspend == 1 )
509 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
510 #endif /* INCLUDE_vTaskSuspend */
513 * Utility to ready all the lists used by the scheduler. This is called
514 * automatically upon the creation of the first task.
516 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
519 * The idle task, which as all tasks is implemented as a never ending loop.
520 * The idle task is automatically created and added to the ready lists upon
521 * creation of the first user task.
523 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
524 * language extensions. The equivalent prototype for this function is:
526 * void prvIdleTask( void *pvParameters );
529 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
532 * Utility to free all memory allocated by the scheduler to hold a TCB,
533 * including the stack pointed to by the TCB.
535 * This does not free memory allocated by the task itself (i.e. memory
536 * allocated by calls to pvPortMalloc from within the tasks application code).
538 #if ( INCLUDE_vTaskDelete == 1 )
540 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
544 //Function to call the Thread Local Storage Pointer Deletion Callbacks. Will be
545 //called during task deletion before prvDeleteTCB is called.
546 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
547 static void prvDeleteTLS( TCB_t *pxTCB );
551 * Used only by the idle task. This checks to see if anything has been placed
552 * in the list of tasks waiting to be deleted. If so the task is cleaned up
553 * and its TCB deleted.
555 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
558 * The currently executing task is entering the Blocked state. Add the task to
559 * either the current or the overflow delayed task list.
561 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTimeToWake ) PRIVILEGED_FUNCTION;
564 * Fills an TaskStatus_t structure with information on each task that is
565 * referenced from the pxList list (which may be a ready list, a delayed list,
566 * a suspended list, etc.).
568 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
569 * NORMAL APPLICATION CODE.
571 #if ( configUSE_TRACE_FACILITY == 1 )
573 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
578 * When a task is created, the stack of the task is filled with a known value.
579 * This function determines the 'high water mark' of the task stack by
580 * determining how much of the stack remains at the original preset value.
582 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
584 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
589 * Return the amount of time, in ticks, that will pass before the kernel will
590 * next move a task from the Blocked state to the Running state.
592 * This conditional compilation should use inequality to 0, not equality to 1.
593 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
594 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
595 * set to a value other than 1.
597 #if ( configUSE_TICKLESS_IDLE != 0 )
599 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
604 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
605 * will exit the Blocked state.
607 static void prvResetNextTaskUnblockTime( void );
609 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
612 * Helper function used to pad task names with spaces when printing out
613 * human readable tables of task information.
615 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName );
620 * Called after a Task_t structure has been allocated either statically or
621 * dynamically to fill in the structure's members.
623 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
624 const char * const pcName,
625 const uint32_t ulStackDepth,
626 void * const pvParameters,
627 UBaseType_t uxPriority,
628 TaskHandle_t * const pxCreatedTask,
630 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
633 * Called after a new task has been created and initialised to place the task
634 * under the control of the scheduler.
636 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
640 /*-----------------------------------------------------------*/
643 * This routine tries to send an interrupt to another core if needed to make it execute a task
644 * of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
645 * other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
646 * is possible that it is inaccurate because the other CPU just did a task switch, but in that case
647 * at most a superfluous interrupt is generated.
649 void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
651 TCB_t *curTCB = pxCurrentTCB[xCoreID];
654 if (xCoreID != tskNO_AFFINITY) {
655 if ( curTCB->uxPriority < uxPriority ) {
656 vPortYieldOtherCore( xCoreID );
661 /* The task has no affinity. See if we can find a CPU to put it on.*/
662 for (i=0; i<portNUM_PROCESSORS; i++) {
663 if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
665 vPortYieldOtherCore( i );
672 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
674 TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode,
675 const char * const pcName,
676 const uint32_t ulStackDepth,
677 void * const pvParameters,
678 UBaseType_t uxPriority,
679 StackType_t * const puxStackBuffer,
680 StaticTask_t * const pxTaskBuffer,
681 const BaseType_t xCoreID )
684 TaskHandle_t xReturn;
686 configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
687 configASSERT( portVALID_STACK_MEM(puxStackBuffer) );
688 configASSERT( (xCoreID>=0 && xCoreID<portNUM_PROCESSORS) || (xCoreID==tskNO_AFFINITY) );
690 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
692 /* The memory used for the task's TCB and stack are passed into this
693 function - use them. */
694 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
695 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
697 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
699 /* Tasks can be created statically or dynamically, so note this
700 task was created statically in case the task is later deleted. */
701 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
703 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
705 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
706 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
716 #endif /* SUPPORT_STATIC_ALLOCATION */
717 /*-----------------------------------------------------------*/
719 #if( portUSING_MPU_WRAPPERS == 1 )
721 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
724 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
726 configASSERT( pxTaskDefinition->puxStackBuffer );
728 if( pxTaskDefinition->puxStackBuffer != NULL )
730 /* Allocate space for the TCB. Where the memory comes from depends
731 on the implementation of the port malloc function and whether or
732 not static allocation is being used. */
733 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
735 if( pxNewTCB != NULL )
737 /* Store the stack location in the TCB. */
738 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
740 /* Tasks can be created statically or dynamically, so note
741 this task had a statically allocated stack in case it is
742 later deleted. The TCB was allocated dynamically. */
743 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
745 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
746 pxTaskDefinition->pcName,
747 pxTaskDefinition->usStackDepth,
748 pxTaskDefinition->pvParameters,
749 pxTaskDefinition->uxPriority,
750 pxCreatedTask, pxNewTCB,
751 pxTaskDefinition->xRegions,
754 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY );
762 #endif /* portUSING_MPU_WRAPPERS */
763 /*-----------------------------------------------------------*/
765 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
767 BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode,
768 const char * const pcName,
769 const uint32_t usStackDepth,
770 void * const pvParameters,
771 UBaseType_t uxPriority,
772 TaskHandle_t * const pxCreatedTask,
773 const BaseType_t xCoreID )
778 /* If the stack grows down then allocate the stack then the TCB so the stack
779 does not grow into the TCB. Likewise if the stack grows up then allocate
780 the TCB then the stack. */
781 #if( portSTACK_GROWTH > 0 )
783 /* Allocate space for the TCB. Where the memory comes from depends on
784 the implementation of the port malloc function and whether or not static
785 allocation is being used. */
786 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
788 if( pxNewTCB != NULL )
790 /* Allocate space for the stack used by the task being created.
791 The base of the stack memory stored in the TCB so the task can
792 be deleted later if required. */
793 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
795 if( pxNewTCB->pxStack == NULL )
797 /* Could not allocate the stack. Delete the allocated TCB. */
798 vPortFree( pxNewTCB );
803 #else /* portSTACK_GROWTH */
805 StackType_t *pxStack;
807 /* Allocate space for the stack used by the task being created. */
808 pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
810 if( pxStack != NULL )
812 /* Allocate space for the TCB. */
813 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) ); /*lint !e961 MISRA exception as the casts are only redundant for some paths. */
815 if( pxNewTCB != NULL )
817 /* Store the stack location in the TCB. */
818 pxNewTCB->pxStack = pxStack;
822 /* The stack cannot be used as the TCB was not created. Free
824 vPortFree( pxStack );
832 #endif /* portSTACK_GROWTH */
834 if( pxNewTCB != NULL )
836 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
838 /* Tasks can be created statically or dynamically, so note this
839 task was created dynamically in case it is later deleted. */
840 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
842 #endif /* configSUPPORT_STATIC_ALLOCATION */
844 prvInitialiseNewTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL, xCoreID );
845 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
850 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
856 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
857 /*-----------------------------------------------------------*/
859 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
860 const char * const pcName,
861 const uint32_t ulStackDepth,
862 void * const pvParameters,
863 UBaseType_t uxPriority,
864 TaskHandle_t * const pxCreatedTask,
866 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
868 StackType_t *pxTopOfStack;
871 #if( portUSING_MPU_WRAPPERS == 1 )
872 /* Should the task be created in privileged mode? */
873 BaseType_t xRunPrivileged;
874 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
876 xRunPrivileged = pdTRUE;
880 xRunPrivileged = pdFALSE;
882 uxPriority &= ~portPRIVILEGE_BIT;
883 #endif /* portUSING_MPU_WRAPPERS == 1 */
885 /* Avoid dependency on memset() if it is not required. */
886 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
888 /* Fill the stack with a known value to assist debugging. */
889 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
891 #endif /* ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) ) */
893 /* Calculate the top of stack address. This depends on whether the stack
894 grows from high memory to low (as per the 80x86) or vice versa.
895 portSTACK_GROWTH is used to make the result positive or negative as required
897 #if( portSTACK_GROWTH < 0 )
899 pxTopOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
900 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */
902 /* Check the alignment of the calculated top of stack is correct. */
903 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
904 #if ( configENABLE_TASK_SNAPSHOT == 1 )
906 /* need stack end for core dumps */
907 pxNewTCB->pxEndOfStack = pxTopOfStack;
911 #else /* portSTACK_GROWTH */
913 pxTopOfStack = pxNewTCB->pxStack;
915 /* Check the alignment of the stack buffer is correct. */
916 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
918 /* The other extreme of the stack space is required if stack checking is
920 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
922 #endif /* portSTACK_GROWTH */
924 /* Store the task name in the TCB. */
925 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
927 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
929 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
930 configMAX_TASK_NAME_LEN characters just in case the memory after the
931 string is not accessible (extremely unlikely). */
932 if( pcName[ x ] == 0x00 )
938 mtCOVERAGE_TEST_MARKER();
942 /* Ensure the name string is terminated in the case that the string length
943 was greater or equal to configMAX_TASK_NAME_LEN. */
944 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
946 /* This is used as an array index so must ensure it's not too large. First
947 remove the privilege bit if one is present. */
948 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
950 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
954 mtCOVERAGE_TEST_MARKER();
957 pxNewTCB->uxPriority = uxPriority;
958 pxNewTCB->xCoreID = xCoreID;
959 #if ( configUSE_MUTEXES == 1 )
961 pxNewTCB->uxBasePriority = uxPriority;
962 pxNewTCB->uxMutexesHeld = 0;
964 #endif /* configUSE_MUTEXES */
966 vListInitialiseItem( &( pxNewTCB->xGenericListItem ) );
967 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
969 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
970 back to the containing TCB from a generic item in a list. */
971 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xGenericListItem ), pxNewTCB );
973 /* Event lists are always in priority order. */
974 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
975 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
977 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
979 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
981 #endif /* portCRITICAL_NESTING_IN_TCB */
983 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
985 pxNewTCB->pxTaskTag = NULL;
987 #endif /* configUSE_APPLICATION_TASK_TAG */
989 #if ( configGENERATE_RUN_TIME_STATS == 1 )
991 pxNewTCB->ulRunTimeCounter = 0UL;
993 #endif /* configGENERATE_RUN_TIME_STATS */
995 #if ( portUSING_MPU_WRAPPERS == 1 )
997 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1001 /* Avoid compiler warning about unreferenced parameter. */
1006 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1008 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1010 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1011 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1)
1012 pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
1018 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1020 pxNewTCB->ulNotifiedValue = 0;
1021 pxNewTCB->eNotifyState = eNotWaitingNotification;
1025 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1027 /* Initialise this task's Newlib reent structure. */
1028 esp_reent_init(&pxNewTCB->xNewLib_reent);
1032 #if( INCLUDE_xTaskAbortDelay == 1 )
1034 pxNewTCB->ucDelayAborted = pdFALSE;
1038 /* Initialize the TCB stack to look as if the task was already running,
1039 but had been interrupted by the scheduler. The return address is set
1040 to the start of the task function. Once the stack has been initialised
1041 the top of stack variable is updated. */
1042 #if( portUSING_MPU_WRAPPERS == 1 )
1044 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1046 #else /* portUSING_MPU_WRAPPERS */
1048 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1050 #endif /* portUSING_MPU_WRAPPERS */
1052 if( ( void * ) pxCreatedTask != NULL )
1054 /* Pass the handle out in an anonymous way. The handle can be used to
1055 change the created task's priority, delete the created task, etc.*/
1056 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1060 mtCOVERAGE_TEST_MARKER();
1063 /*-----------------------------------------------------------*/
1065 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID )
1067 TCB_t *curTCB, *tcb0, *tcb1;
1069 /* Assure that xCoreID is valid or we'll have an out-of-bounds on pxCurrentTCB
1070 You will assert here if e.g. you only have one CPU enabled in menuconfig and
1071 are trying to start a task on core 1. */
1072 configASSERT( xCoreID == tskNO_AFFINITY || xCoreID < portNUM_PROCESSORS);
1074 /* Ensure interrupts don't access the task lists while the lists are being
1076 taskENTER_CRITICAL(&xTaskQueueMutex);
1078 uxCurrentNumberOfTasks++;
1080 // Determine which core this task starts on
1081 if ( xCoreID == tskNO_AFFINITY )
1083 if ( portNUM_PROCESSORS == 1 )
1089 // if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
1090 // put it on the core where it will preempt the lowest priority running task. If neither of these are true,
1091 // queue it on the currently running core.
1092 tcb0 = pxCurrentTCB[0];
1093 tcb1 = pxCurrentTCB[1];
1098 else if ( tcb1 == NULL )
1102 else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
1106 else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
1112 xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
1117 // If nothing is running on this core, put the new task there now
1118 if( pxCurrentTCB[ xCoreID ] == NULL )
1120 /* There are no other tasks, or all the other tasks are in
1121 the suspended state - make this the current task. */
1122 pxCurrentTCB[ xCoreID ] = pxNewTCB;
1124 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1126 #if portFIRST_TASK_HOOK
1127 if ( xPortGetCoreID() == 0 ) {
1128 vPortFirstTaskHook(pxTaskCode);
1130 #endif /* configFIRST_TASK_HOOK */
1131 /* This is the first task to be created so do the preliminary
1132 initialisation required. We will not recover if this call
1133 fails, but we will report the failure. */
1134 prvInitialiseTaskLists();
1138 mtCOVERAGE_TEST_MARKER();
1143 /* If the scheduler is not already running, make this task the
1144 current task if it is the highest priority task to be created
1146 if( xSchedulerRunning == pdFALSE )
1148 /* Scheduler isn't running yet. We need to determine on which CPU to run this task.
1149 Schedule now if either nothing is scheduled yet or we can replace a task of lower prio. */
1150 if ( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
1152 pxCurrentTCB[xCoreID] = pxNewTCB;
1157 mtCOVERAGE_TEST_MARKER();
1163 #if ( configUSE_TRACE_FACILITY == 1 )
1165 /* Add a counter into the TCB for tracing only. */
1166 pxNewTCB->uxTCBNumber = uxTaskNumber;
1168 #endif /* configUSE_TRACE_FACILITY */
1169 traceTASK_CREATE( pxNewTCB );
1171 prvAddTaskToReadyList( pxNewTCB );
1173 portSETUP_TCB( pxNewTCB );
1176 taskEXIT_CRITICAL(&xTaskQueueMutex);
1178 if( xSchedulerRunning != pdFALSE )
1180 taskENTER_CRITICAL(&xTaskQueueMutex);
1182 curTCB = pxCurrentTCB[ xCoreID ];
1183 /* Scheduler is running. If the created task is of a higher priority than an executing task
1184 then it should run now.
1186 if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
1188 if( xCoreID == xPortGetCoreID() )
1190 taskYIELD_IF_USING_PREEMPTION();
1193 taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
1198 mtCOVERAGE_TEST_MARKER();
1200 taskEXIT_CRITICAL(&xTaskQueueMutex);
1204 mtCOVERAGE_TEST_MARKER();
1207 /*-----------------------------------------------------------*/
1209 #if ( INCLUDE_vTaskDelete == 1 )
1211 void vTaskDelete( TaskHandle_t xTaskToDelete )
1213 //The following vTaskDelete() is backported from FreeRTOS v9.0.0 and modified for SMP.
1214 //v9.0.0 vTaskDelete() will immediately free task memory if the task being deleted is
1215 //NOT currently running and not pinned to the other core. Otherwise, freeing of task memory
1216 //will still be delegated to the Idle Task.
1219 int core = xPortGetCoreID(); //Current core
1220 UBaseType_t free_now; //Flag to indicate if task memory can be freed immediately
1222 taskENTER_CRITICAL(&xTaskQueueMutex);
1224 /* If null is passed in here then it is the calling task that is
1226 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1228 /* Remove task from the ready list. */
1229 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1231 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1235 mtCOVERAGE_TEST_MARKER();
1238 /* Is the task waiting on an event also? */
1239 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1241 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1245 mtCOVERAGE_TEST_MARKER();
1248 /* Increment the uxTaskNumber also so kernel aware debuggers can
1249 detect that the task lists need re-generating. This is done before
1250 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1254 //If task to be deleted is currently running on either core or is pinned to the other core. Let Idle free memory
1255 if( pxTCB == pxCurrentTCB[ core ] ||
1256 (portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
1257 (portNUM_PROCESSORS > 1 && pxTCB->xCoreID == (!core)) )
1259 /* Deleting a currently running task. This cannot complete
1260 within the task itself, as a context switch to another task is
1261 required. Place the task in the termination list. The idle task
1262 will check the termination list and free up any memory allocated
1263 by the scheduler for the TCB and stack of the deleted task. */
1264 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xGenericListItem ) );
1266 /* Increment the ucTasksDeleted variable so the idle task knows
1267 there is a task that has been deleted and that it should therefore
1268 check the xTasksWaitingTermination list. */
1271 /* The pre-delete hook is primarily for the Windows simulator,
1272 in which Windows specific clean up operations are performed,
1273 after which it is not possible to yield away from this task -
1274 hence xYieldPending is used to latch that a context switch is
1276 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1278 free_now = pdFALSE; //Let Idle Task free task memory
1280 else //Task is not currently running and not pinned to the other core
1282 --uxCurrentNumberOfTasks;
1284 /* Reset the next expected unblock time in case it referred to
1285 the task that has just been deleted. */
1286 prvResetNextTaskUnblockTime();
1287 free_now = pdTRUE; //Set flag to free task memory immediately
1290 traceTASK_DELETE( pxTCB );
1292 taskEXIT_CRITICAL(&xTaskQueueMutex);
1294 if(free_now == pdTRUE){ //Free task memory. Outside critical section due to deletion callbacks
1295 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
1296 prvDeleteTLS( pxTCB ); //Run deletion callbacks before deleting TCB
1298 prvDeleteTCB( pxTCB ); //Must only be called after del cb
1301 /* Force a reschedule if it is the currently running task that has just
1303 if( xSchedulerRunning != pdFALSE )
1305 //No mux; no harm done if this misfires. The deleted task won't get scheduled anyway.
1306 if( pxTCB == pxCurrentTCB[ core ] ) //If task was currently running on this core
1308 configASSERT( uxSchedulerSuspended[ core ] == 0 );
1310 /* The pre-delete hook is primarily for the Windows simulator,
1311 in which Windows specific clean up operations are performed,
1312 after which it is not possible to yield away from this task -
1313 hence xYieldPending is used to latch that a context switch is
1315 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[xPortGetCoreID()] );
1316 portYIELD_WITHIN_API();
1318 else if ( portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core] ) //If task was currently running on the other core
1320 /* if task is running on the other CPU, force a yield on that CPU to take it off */
1321 vPortYieldOtherCore( !core );
1325 mtCOVERAGE_TEST_MARKER();
1330 #endif /* INCLUDE_vTaskDelete */
1331 /*-----------------------------------------------------------*/
1333 #if ( INCLUDE_vTaskDelayUntil == 1 )
1335 /* ToDo: Make this multicore-compatible. */
1336 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1338 TickType_t xTimeToWake;
1339 BaseType_t xAlreadyYielded=pdFALSE, xShouldDelay = pdFALSE;
1341 configASSERT( pxPreviousWakeTime );
1342 configASSERT( ( xTimeIncrement > 0U ) );
1343 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1345 taskENTER_CRITICAL(&xTaskQueueMutex);
1346 // vTaskSuspendAll();
1348 /* Minor optimisation. The tick count cannot change in this
1350 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1351 const TickType_t xConstTickCount = xTickCount;
1352 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1354 /* Generate the tick time at which the task wants to wake. */
1355 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1357 if( xConstTickCount < *pxPreviousWakeTime )
1359 /* The tick count has overflowed since this function was
1360 lasted called. In this case the only time we should ever
1361 actually delay is if the wake time has also overflowed,
1362 and the wake time is greater than the tick time. When this
1363 is the case it is as if neither time had overflowed. */
1364 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1366 xShouldDelay = pdTRUE;
1370 mtCOVERAGE_TEST_MARKER();
1375 /* The tick time has not overflowed. In this case we will
1376 delay if either the wake time has overflowed, and/or the
1377 tick time is less than the wake time. */
1378 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1380 xShouldDelay = pdTRUE;
1384 mtCOVERAGE_TEST_MARKER();
1388 /* Update the wake time ready for the next call. */
1389 *pxPreviousWakeTime = xTimeToWake;
1391 if( xShouldDelay != pdFALSE )
1393 traceTASK_DELAY_UNTIL();
1395 /* Remove the task from the ready list before adding it to the
1396 blocked list as the same list item is used for both lists. */
1397 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1399 /* The current task must be in a ready list, so there is
1400 no need to check, and the port reset macro can be called
1402 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1406 mtCOVERAGE_TEST_MARKER();
1409 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1413 mtCOVERAGE_TEST_MARKER();
1416 // xAlreadyYielded = xTaskResumeAll();
1417 taskEXIT_CRITICAL(&xTaskQueueMutex);
1419 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1420 have put ourselves to sleep. */
1421 if( xAlreadyYielded == pdFALSE )
1423 portYIELD_WITHIN_API();
1427 mtCOVERAGE_TEST_MARKER();
1431 #endif /* INCLUDE_vTaskDelayUntil */
1432 /*-----------------------------------------------------------*/
1434 #if ( INCLUDE_vTaskDelay == 1 )
1435 void vTaskDelay( const TickType_t xTicksToDelay )
1437 TickType_t xTimeToWake;
1438 BaseType_t xAlreadyYielded = pdFALSE;
1440 /* A delay time of zero just forces a reschedule. */
1441 if( xTicksToDelay > ( TickType_t ) 0U )
1443 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1444 taskENTER_CRITICAL(&xTaskQueueMutex);
1445 // vTaskSuspendAll();
1449 /* A task that is removed from the event list while the
1450 scheduler is suspended will not get placed in the ready
1451 list or removed from the blocked list until the scheduler
1454 This task cannot be in an event list as it is the currently
1457 /* Calculate the time to wake - this may overflow but this is
1459 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1460 xTimeToWake = xTickCount + xTicksToDelay;
1461 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1463 /* We must remove ourselves from the ready list before adding
1464 ourselves to the blocked list as the same list item is used for
1466 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1468 /* The current task must be in a ready list, so there is
1469 no need to check, and the port reset macro can be called
1471 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1475 mtCOVERAGE_TEST_MARKER();
1477 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1479 // xAlreadyYielded = xTaskResumeAll();
1480 taskEXIT_CRITICAL(&xTaskQueueMutex);
1484 mtCOVERAGE_TEST_MARKER();
1487 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1488 have put ourselves to sleep. */
1489 if( xAlreadyYielded == pdFALSE )
1491 portYIELD_WITHIN_API();
1495 mtCOVERAGE_TEST_MARKER();
1499 #endif /* INCLUDE_vTaskDelay */
1500 /*-----------------------------------------------------------*/
1502 #if ( INCLUDE_eTaskGetState == 1 )
1503 eTaskState eTaskGetState( TaskHandle_t xTask )
1506 List_t *pxStateList;
1507 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1508 TCB_t * curTCBcurCore = xTaskGetCurrentTaskHandle();
1509 TCB_t * curTCBothrCore = xTaskGetCurrentTaskHandleForCPU(!xPortGetCoreID()); //Returns NULL if Unicore
1511 configASSERT( pxTCB );
1513 if( pxTCB == curTCBcurCore || pxTCB == curTCBothrCore )
1515 /* The task calling this function is querying its own state. */
1520 taskENTER_CRITICAL(&xTaskQueueMutex);
1522 pxStateList = ( List_t * ) listLIST_ITEM_CONTAINER( &( pxTCB->xGenericListItem ) );
1524 taskEXIT_CRITICAL(&xTaskQueueMutex);
1526 if( ( pxStateList == pxDelayedTaskList ) || ( pxStateList == pxOverflowDelayedTaskList ) )
1528 /* The task being queried is referenced from one of the Blocked
1533 #if ( INCLUDE_vTaskSuspend == 1 )
1534 else if( pxStateList == &xSuspendedTaskList )
1536 /* The task being queried is referenced from the suspended
1537 list. Is it genuinely suspended or is it block
1539 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1541 eReturn = eSuspended;
1550 #if ( INCLUDE_vTaskDelete == 1 )
1551 else if( pxStateList == &xTasksWaitingTermination )
1553 /* The task being queried is referenced from the deleted
1559 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1561 /* If the task is not in any other state, it must be in the
1562 Ready (including pending ready) state. */
1568 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1570 #endif /* INCLUDE_eTaskGetState */
1571 /*-----------------------------------------------------------*/
1573 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1574 UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask )
1577 UBaseType_t uxReturn;
1579 taskENTER_CRITICAL(&xTaskQueueMutex);
1581 /* If null is passed in here then we are changing the
1582 priority of the calling function. */
1583 pxTCB = prvGetTCBFromHandle( xTask );
1584 uxReturn = pxTCB->uxPriority;
1586 taskEXIT_CRITICAL(&xTaskQueueMutex);
1591 #endif /* INCLUDE_uxTaskPriorityGet */
1592 /*-----------------------------------------------------------*/
1594 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1595 UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask )
1598 UBaseType_t uxReturn;
1600 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1602 /* If null is passed in here then it is the priority of the calling
1603 task that is being queried. */
1604 pxTCB = prvGetTCBFromHandle( xTask );
1605 uxReturn = pxTCB->uxPriority;
1607 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
1612 #endif /* INCLUDE_uxTaskPriorityGet */
1613 /*-----------------------------------------------------------*/
1615 #if ( INCLUDE_vTaskPrioritySet == 1 )
1617 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1620 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1621 BaseType_t xYieldRequired = pdFALSE;
1623 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1625 /* Ensure the new priority is valid. */
1626 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1628 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1632 mtCOVERAGE_TEST_MARKER();
1635 taskENTER_CRITICAL(&xTaskQueueMutex);
1637 /* If null is passed in here then it is the priority of the calling
1638 task that is being changed. */
1639 pxTCB = prvGetTCBFromHandle( xTask );
1641 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1643 #if ( configUSE_MUTEXES == 1 )
1645 uxCurrentBasePriority = pxTCB->uxBasePriority;
1649 uxCurrentBasePriority = pxTCB->uxPriority;
1653 if( uxCurrentBasePriority != uxNewPriority )
1655 /* The priority change may have readied a task of higher
1656 priority than the calling task. */
1657 if( uxNewPriority > uxCurrentBasePriority )
1659 if( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] )
1661 /* The priority of a task other than the currently
1662 running task is being raised. Is the priority being
1663 raised above that of the running task? */
1664 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1666 xYieldRequired = pdTRUE;
1668 else if ( pxTCB->xCoreID != xPortGetCoreID() )
1670 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1674 mtCOVERAGE_TEST_MARKER();
1679 /* The priority of the running task is being raised,
1680 but the running task must already be the highest
1681 priority task able to run so no yield is required. */
1684 else if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
1686 /* Setting the priority of the running task down means
1687 there may now be another task of higher priority that
1688 is ready to execute. */
1689 xYieldRequired = pdTRUE;
1693 /* Setting the priority of any other task down does not
1694 require a yield as the running task must be above the
1695 new priority of the task being modified. */
1698 /* Remember the ready list the task might be referenced from
1699 before its uxPriority member is changed so the
1700 taskRESET_READY_PRIORITY() macro can function correctly. */
1701 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1703 #if ( configUSE_MUTEXES == 1 )
1705 /* Only change the priority being used if the task is not
1706 currently using an inherited priority. */
1707 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1709 pxTCB->uxPriority = uxNewPriority;
1713 mtCOVERAGE_TEST_MARKER();
1716 /* The base priority gets set whatever. */
1717 pxTCB->uxBasePriority = uxNewPriority;
1721 pxTCB->uxPriority = uxNewPriority;
1725 /* Only reset the event list item value if the value is not
1726 being used for anything else. */
1727 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1729 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1733 mtCOVERAGE_TEST_MARKER();
1736 /* If the task is in the blocked or suspended list we need do
1737 nothing more than change it's priority variable. However, if
1738 the task is in a ready list it needs to be removed and placed
1739 in the list appropriate to its new priority. */
1740 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
1742 /* The task is currently in its ready list - remove before adding
1743 it to it's new ready list. As we are in a critical section we
1744 can do this even if the scheduler is suspended. */
1745 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1747 /* It is known that the task is in its ready list so
1748 there is no need to check again and the port level
1749 reset macro can be called directly. */
1750 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1754 mtCOVERAGE_TEST_MARKER();
1756 prvReaddTaskToReadyList( pxTCB );
1760 mtCOVERAGE_TEST_MARKER();
1763 if( xYieldRequired == pdTRUE )
1765 taskYIELD_IF_USING_PREEMPTION();
1769 mtCOVERAGE_TEST_MARKER();
1772 /* Remove compiler warning about unused variables when the port
1773 optimised task selection is not being used. */
1774 ( void ) uxPriorityUsedOnEntry;
1777 taskEXIT_CRITICAL(&xTaskQueueMutex);
1780 #endif /* INCLUDE_vTaskPrioritySet */
1781 /*-----------------------------------------------------------*/
1783 #if ( INCLUDE_vTaskSuspend == 1 )
1784 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1789 taskENTER_CRITICAL(&xTaskQueueMutex);
1791 /* If null is passed in here then it is the running task that is
1793 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1795 traceTASK_SUSPEND( pxTCB );
1797 /* Remove task from the ready/delayed list and place in the
1799 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1801 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1805 mtCOVERAGE_TEST_MARKER();
1808 /* Is the task waiting on an event also? */
1809 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1811 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1815 mtCOVERAGE_TEST_MARKER();
1817 traceMOVED_TASK_TO_SUSPENDED_LIST(pxTCB);
1818 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) );
1819 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
1821 taskEXIT_CRITICAL(&xTaskQueueMutex);
1823 if( pxTCB == curTCB )
1825 if( xSchedulerRunning != pdFALSE )
1827 /* The current task has just been suspended. */
1828 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1829 portYIELD_WITHIN_API();
1833 /* The scheduler is not running, but the task that was pointed
1834 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1835 must be adjusted to point to a different task. */
1836 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
1838 /* No other tasks are ready, so set pxCurrentTCB back to
1839 NULL so when the next task is created pxCurrentTCB will
1840 be set to point to it no matter what its relative priority
1842 taskENTER_CRITICAL(&xTaskQueueMutex);
1843 pxCurrentTCB[ xPortGetCoreID() ] = NULL;
1844 taskEXIT_CRITICAL(&xTaskQueueMutex);
1848 vTaskSwitchContext();
1854 if( xSchedulerRunning != pdFALSE )
1856 /* A task other than the currently running task was suspended,
1857 reset the next expected unblock time in case it referred to the
1858 task that is now in the Suspended state. */
1859 taskENTER_CRITICAL(&xTaskQueueMutex);
1861 prvResetNextTaskUnblockTime();
1863 taskEXIT_CRITICAL(&xTaskQueueMutex);
1867 mtCOVERAGE_TEST_MARKER();
1872 #endif /* INCLUDE_vTaskSuspend */
1873 /*-----------------------------------------------------------*/
1875 #if ( INCLUDE_vTaskSuspend == 1 )
1876 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1878 BaseType_t xReturn = pdFALSE;
1879 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1881 /* Accesses xPendingReadyList so must be called from a critical
1882 section (caller is required to hold xTaskQueueMutex). */
1884 /* It does not make sense to check if the calling task is suspended. */
1885 configASSERT( xTask );
1887 /* Is the task being resumed actually in the suspended list? */
1888 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE )
1890 /* Has the task already been resumed from within an ISR? */
1891 if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE )
1893 /* Is it in the suspended list because it is in the Suspended
1894 state, or because is is blocked with no timeout? */
1895 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE )
1901 mtCOVERAGE_TEST_MARKER();
1906 mtCOVERAGE_TEST_MARKER();
1911 mtCOVERAGE_TEST_MARKER();
1915 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1917 #endif /* INCLUDE_vTaskSuspend */
1918 /*-----------------------------------------------------------*/
1920 #if ( INCLUDE_vTaskSuspend == 1 )
1922 void vTaskResume( TaskHandle_t xTaskToResume )
1924 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1926 /* It does not make sense to resume the calling task. */
1927 configASSERT( xTaskToResume );
1929 taskENTER_CRITICAL(&xTaskQueueMutex);
1930 /* The parameter cannot be NULL as it is impossible to resume the
1931 currently executing task. */
1932 if( ( pxTCB != NULL ) && ( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] ) )
1935 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1937 traceTASK_RESUME( pxTCB );
1939 /* As we are in a critical section we can access the ready
1940 lists even if the scheduler is suspended. */
1941 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
1942 prvAddTaskToReadyList( pxTCB );
1944 /* We may have just resumed a higher priority task. */
1945 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1947 /* This yield may not cause the task just resumed to run,
1948 but will leave the lists in the correct state for the
1950 taskYIELD_IF_USING_PREEMPTION();
1952 else if( pxTCB->xCoreID != xPortGetCoreID() )
1954 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
1958 mtCOVERAGE_TEST_MARKER();
1963 mtCOVERAGE_TEST_MARKER();
1969 mtCOVERAGE_TEST_MARKER();
1971 taskEXIT_CRITICAL(&xTaskQueueMutex);
1974 #endif /* INCLUDE_vTaskSuspend */
1976 /*-----------------------------------------------------------*/
1978 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1980 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1982 BaseType_t xYieldRequired = pdFALSE;
1983 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1985 configASSERT( xTaskToResume );
1987 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1990 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1992 traceTASK_RESUME_FROM_ISR( pxTCB );
1994 /* Check the ready lists can be accessed. */
1995 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
1997 /* Ready lists can be accessed so move the task from the
1998 suspended list to the ready list directly. */
1999 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2000 prvAddTaskToReadyList( pxTCB );
2002 if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2004 xYieldRequired = pdTRUE;
2006 else if ( pxTCB->xCoreID != xPortGetCoreID() )
2008 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
2012 mtCOVERAGE_TEST_MARKER();
2017 /* The delayed or ready lists cannot be accessed so the task
2018 is held in the pending ready list until the scheduler is
2020 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
2025 mtCOVERAGE_TEST_MARKER();
2028 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2030 return xYieldRequired;
2033 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2034 /*-----------------------------------------------------------*/
2036 void vTaskStartScheduler( void )
2041 /* Add the per-core idle tasks at the lowest priority. */
2042 for ( i=0; i<portNUM_PROCESSORS; i++) {
2043 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2045 /* Create the idle task, storing its handle in xIdleTaskHandle so it can
2046 be returned by the xTaskGetIdleTaskHandle() function. */
2047 xReturn = xTaskCreatePinnedToCore( prvIdleTask, "IDLE", tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), &xIdleTaskHandle[i], i ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2051 /* Create the idle task without storing its handle. */
2052 xReturn = xTaskCreatePinnedToCore( prvIdleTask, "IDLE", tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), NULL, i); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2054 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2057 #if ( configUSE_TIMERS == 1 )
2059 if( xReturn == pdPASS )
2061 xReturn = xTimerCreateTimerTask();
2065 mtCOVERAGE_TEST_MARKER();
2068 #endif /* configUSE_TIMERS */
2070 if( xReturn == pdPASS )
2072 /* Interrupts are turned off here, to ensure a tick does not occur
2073 before or during the call to xPortStartScheduler(). The stacks of
2074 the created tasks contain a status word with interrupts switched on
2075 so interrupts will automatically get re-enabled when the first task
2077 portDISABLE_INTERRUPTS();
2080 xTickCount = ( TickType_t ) 0U;
2082 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2083 macro must be defined to configure the timer/counter used to generate
2084 the run time counter time base. */
2085 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2086 xSchedulerRunning = pdTRUE;
2088 /* Setting up the timer tick is hardware specific and thus in the
2089 portable interface. */
2090 if( xPortStartScheduler() != pdFALSE )
2092 /* Should not reach here as if the scheduler is running the
2093 function will not return. */
2097 /* Should only reach here if a task calls xTaskEndScheduler(). */
2102 /* This line will only be reached if the kernel could not be started,
2103 because there was not enough FreeRTOS heap to create the idle task
2104 or the timer task. */
2105 configASSERT( xReturn );
2108 /*-----------------------------------------------------------*/
2110 void vTaskEndScheduler( void )
2112 /* Stop the scheduler interrupts and call the portable scheduler end
2113 routine so the original ISRs can be restored if necessary. The port
2114 layer must ensure interrupts enable bit is left in the correct state. */
2115 portDISABLE_INTERRUPTS();
2116 xSchedulerRunning = pdFALSE;
2117 vPortEndScheduler();
2119 /*----------------------------------------------------------*/
2122 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2123 //Return global reent struct if FreeRTOS isn't running,
2124 struct _reent* __getreent() {
2125 //No lock needed because if this changes, we won't be running anymore.
2126 TCB_t *currTask=xTaskGetCurrentTaskHandle();
2127 if (currTask==NULL) {
2128 //No task running. Return global struct.
2129 return _GLOBAL_REENT;
2131 //We have a task; return its reentrant struct.
2132 return &currTask->xNewLib_reent;
2138 void vTaskSuspendAll( void )
2140 /* A critical section is not required as the variable is of type
2141 BaseType_t. Please read Richard Barry's reply in the following link to a
2142 post in the FreeRTOS support forum before reporting this as a bug! -
2143 http://goo.gl/wu4acr */
2146 state = portENTER_CRITICAL_NESTED();
2147 ++uxSchedulerSuspended[ xPortGetCoreID() ];
2148 portEXIT_CRITICAL_NESTED(state);
2150 /*----------------------------------------------------------*/
2152 #if ( configUSE_TICKLESS_IDLE != 0 )
2154 static TickType_t prvGetExpectedIdleTime( void )
2159 taskENTER_CRITICAL(&xTaskQueueMutex);
2160 if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
2164 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
2166 /* There are other idle priority tasks in the ready state. If
2167 time slicing is used then the very next tick interrupt must be
2173 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2174 xReturn = xNextTaskUnblockTime - xTickCount;
2175 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2177 taskEXIT_CRITICAL(&xTaskQueueMutex);
2182 #endif /* configUSE_TICKLESS_IDLE */
2183 /*----------------------------------------------------------*/
2185 BaseType_t xTaskResumeAll( void )
2188 BaseType_t xAlreadyYielded = pdFALSE;
2190 /* If uxSchedulerSuspended[ xPortGetCoreID() ] is zero then this function does not match a
2191 previous call to vTaskSuspendAll(). */
2192 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] );
2193 /* It is possible that an ISR caused a task to be removed from an event
2194 list while the scheduler was suspended. If this was the case then the
2195 removed task will have been added to the xPendingReadyList. Once the
2196 scheduler has been resumed it is safe to move all the pending ready
2197 tasks from this list into their appropriate ready list. */
2199 taskENTER_CRITICAL(&xTaskQueueMutex);
2201 --uxSchedulerSuspended[ xPortGetCoreID() ];
2203 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2205 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2207 /* Move any readied tasks from the pending list into the
2208 appropriate ready list. */
2209 while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE )
2211 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) );
2212 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2213 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2214 prvAddTaskToReadyList( pxTCB );
2216 /* If the moved task has a priority higher than the current
2217 task then a yield must be performed. */
2218 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2220 /* We can schedule the awoken task on this CPU. */
2221 xYieldPending[xPortGetCoreID()] = pdTRUE;
2225 mtCOVERAGE_TEST_MARKER();
2229 /* If any ticks occurred while the scheduler was suspended then
2230 they should be processed now. This ensures the tick count does
2231 not slip, and that any delayed tasks are resumed at the correct
2233 if( uxPendedTicks > ( UBaseType_t ) 0U )
2235 while( uxPendedTicks > ( UBaseType_t ) 0U )
2237 if( xTaskIncrementTick() != pdFALSE )
2239 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2243 mtCOVERAGE_TEST_MARKER();
2250 mtCOVERAGE_TEST_MARKER();
2253 if( xYieldPending[ xPortGetCoreID() ] == pdTRUE )
2255 #if( configUSE_PREEMPTION != 0 )
2257 xAlreadyYielded = pdTRUE;
2260 taskYIELD_IF_USING_PREEMPTION();
2264 mtCOVERAGE_TEST_MARKER();
2270 mtCOVERAGE_TEST_MARKER();
2273 taskEXIT_CRITICAL(&xTaskQueueMutex);
2275 return xAlreadyYielded;
2277 /*-----------------------------------------------------------*/
2279 TickType_t xTaskGetTickCount( void )
2283 /* Critical section required if running on a 16 bit processor. */
2284 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2286 xTicks = xTickCount;
2288 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2292 /*-----------------------------------------------------------*/
2294 TickType_t xTaskGetTickCountFromISR( void )
2298 taskENTER_CRITICAL_ISR(&xTickCountMutex);
2300 xReturn = xTickCount;
2301 // vPortCPUReleaseMutex( &xTickCountMutex );
2303 taskEXIT_CRITICAL_ISR(&xTickCountMutex);
2307 /*-----------------------------------------------------------*/
2309 UBaseType_t uxTaskGetNumberOfTasks( void )
2311 /* A critical section is not required because the variables are of type
2313 return uxCurrentNumberOfTasks;
2315 /*-----------------------------------------------------------*/
2317 #if ( INCLUDE_pcTaskGetTaskName == 1 )
2318 char *pcTaskGetTaskName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2322 /* If null is passed in here then the name of the calling task is being queried. */
2323 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2324 configASSERT( pxTCB );
2325 return &( pxTCB->pcTaskName[ 0 ] );
2328 #endif /* INCLUDE_pcTaskGetTaskName */
2329 /*-----------------------------------------------------------*/
2331 #if ( configUSE_TRACE_FACILITY == 1 )
2333 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2335 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2337 taskENTER_CRITICAL(&xTaskQueueMutex);
2339 /* Is there a space in the array for each task in the system? */
2340 if( uxArraySize >= uxCurrentNumberOfTasks )
2342 /* Fill in an TaskStatus_t structure with information on each
2343 task in the Ready state. */
2347 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2349 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2351 /* Fill in an TaskStatus_t structure with information on each
2352 task in the Blocked state. */
2353 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2354 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2356 #if( INCLUDE_vTaskDelete == 1 )
2358 /* Fill in an TaskStatus_t structure with information on
2359 each task that has been deleted but not yet cleaned up. */
2360 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2364 #if ( INCLUDE_vTaskSuspend == 1 )
2366 /* Fill in an TaskStatus_t structure with information on
2367 each task in the Suspended state. */
2368 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2372 #if ( configGENERATE_RUN_TIME_STATS == 1)
2374 if( pulTotalRunTime != NULL )
2376 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2377 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2379 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2385 if( pulTotalRunTime != NULL )
2387 *pulTotalRunTime = 0;
2394 mtCOVERAGE_TEST_MARKER();
2397 taskEXIT_CRITICAL(&xTaskQueueMutex);
2401 #endif /* configUSE_TRACE_FACILITY */
2402 /*----------------------------------------------------------*/
2404 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2406 TaskHandle_t xTaskGetIdleTaskHandle( void )
2408 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2409 started, then xIdleTaskHandle will be NULL. */
2410 configASSERT( ( xIdleTaskHandle[ xPortGetCoreID() ] != NULL ) );
2411 return xIdleTaskHandle[ xPortGetCoreID() ];
2414 TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid )
2416 TaskHandle_t xReturn = NULL;
2417 /* If xTaskGetIdleTaskHandleForCPU() is called before the scheduler has been
2418 started, then xIdleTaskHandle will be NULL. */
2419 if (cpuid < portNUM_PROCESSORS) {
2420 configASSERT( ( xIdleTaskHandle[ cpuid ] != NULL ) );
2421 xReturn = xIdleTaskHandle[ cpuid ];
2426 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2427 /*----------------------------------------------------------*/
2429 /* This conditional compilation should use inequality to 0, not equality to 1.
2430 This is to ensure vTaskStepTick() is available when user defined low power mode
2431 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
2433 #if ( configUSE_TICKLESS_IDLE != 0 )
2435 void vTaskStepTick( const TickType_t xTicksToJump )
2437 /* Correct the tick count value after a period during which the tick
2438 was suppressed. Note this does *not* call the tick hook function for
2439 each stepped tick. */
2440 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2441 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2442 xTickCount += xTicksToJump;
2443 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2444 traceINCREASE_TICK_COUNT( xTicksToJump );
2447 #endif /* configUSE_TICKLESS_IDLE */
2448 /*----------------------------------------------------------*/
2450 BaseType_t xTaskIncrementTick( void )
2453 TickType_t xItemValue;
2454 BaseType_t xSwitchRequired = pdFALSE;
2456 /* Called by the portable layer each time a tick interrupt occurs.
2457 Increments the tick then checks to see if the new tick value will cause any
2458 tasks to be unblocked. */
2460 /* Only let core 0 increase the tick count, to keep accurate track of time. */
2461 /* ToDo: This doesn't really play nice with the logic below: it means when core 1 is
2462 running a low-priority task, it will keep running it until there is a context
2463 switch, even when this routine (running on core 0) unblocks a bunch of high-priority
2464 tasks... this is less than optimal -- JD. */
2465 if ( xPortGetCoreID()!=0 ) {
2466 #if ( configUSE_TICK_HOOK == 1 )
2467 vApplicationTickHook();
2468 #endif /* configUSE_TICK_HOOK */
2469 esp_vApplicationTickHook();
2472 We can't really calculate what we need, that's done on core 0... just assume we need a switch.
2473 ToDo: Make this more intelligent? -- JD
2479 traceTASK_INCREMENT_TICK( xTickCount );
2481 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2483 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2484 /* Increment the RTOS tick, switching the delayed and overflowed
2485 delayed lists if it wraps to 0. */
2487 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2489 //The other CPU may decide to mess with the task queues, so this needs a mux.
2490 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2492 /* Minor optimisation. The tick count cannot change in this
2494 const TickType_t xConstTickCount = xTickCount;
2496 if( xConstTickCount == ( TickType_t ) 0U )
2498 taskSWITCH_DELAYED_LISTS();
2502 mtCOVERAGE_TEST_MARKER();
2505 /* See if this tick has made a timeout expire. Tasks are stored in
2506 the queue in the order of their wake time - meaning once one task
2507 has been found whose block time has not expired there is no need to
2508 look any further down the list. */
2509 if( xConstTickCount >= xNextTaskUnblockTime )
2513 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2515 /* The delayed list is empty. Set xNextTaskUnblockTime
2516 to the maximum possible value so it is extremely
2518 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2519 next time through. */
2520 xNextTaskUnblockTime = portMAX_DELAY;
2525 /* The delayed list is not empty, get the value of the
2526 item at the head of the delayed list. This is the time
2527 at which the task at the head of the delayed list must
2528 be removed from the Blocked state. */
2529 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
2530 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xGenericListItem ) );
2532 if( xConstTickCount < xItemValue )
2534 /* It is not time to unblock this item yet, but the
2535 item value is the time at which the task at the head
2536 of the blocked list must be removed from the Blocked
2537 state - so record the item value in
2538 xNextTaskUnblockTime. */
2539 xNextTaskUnblockTime = xItemValue;
2544 mtCOVERAGE_TEST_MARKER();
2547 /* It is time to remove the item from the Blocked state. */
2548 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2550 /* Is the task waiting on an event also? If so remove
2551 it from the event list. */
2552 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2554 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2558 mtCOVERAGE_TEST_MARKER();
2561 /* Place the unblocked task into the appropriate ready
2563 prvAddTaskToReadyList( pxTCB );
2565 /* A task being unblocked cannot cause an immediate
2566 context switch if preemption is turned off. */
2567 #if ( configUSE_PREEMPTION == 1 )
2569 /* Preemption is on, but a context switch should
2570 only be performed if the unblocked task has a
2571 priority that is equal to or higher than the
2572 currently executing task. */
2573 if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2575 xSwitchRequired = pdTRUE;
2579 mtCOVERAGE_TEST_MARKER();
2582 #endif /* configUSE_PREEMPTION */
2588 /* Tasks of equal priority to the currently running task will share
2589 processing time (time slice) if preemption is on, and the application
2590 writer has not explicitly turned time slicing off. */
2591 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2593 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2595 xSwitchRequired = pdTRUE;
2599 mtCOVERAGE_TEST_MARKER();
2602 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2605 /* Guard against the tick hook being called when the pended tick
2606 count is being unwound (when the scheduler is being unlocked). */
2607 if( uxPendedTicks == ( UBaseType_t ) 0U )
2609 #if ( configUSE_TICK_HOOK == 1 )
2610 vApplicationTickHook();
2611 #endif /* configUSE_TICK_HOOK */
2612 esp_vApplicationTickHook();
2616 mtCOVERAGE_TEST_MARKER();
2619 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2625 /* The tick hook gets called at regular intervals, even if the
2626 scheduler is locked. */
2627 #if ( configUSE_TICK_HOOK == 1 )
2629 vApplicationTickHook();
2632 esp_vApplicationTickHook();
2635 #if ( configUSE_PREEMPTION == 1 )
2637 if( xYieldPending [ xPortGetCoreID() ] != pdFALSE )
2639 xSwitchRequired = pdTRUE;
2643 mtCOVERAGE_TEST_MARKER();
2646 #endif /* configUSE_PREEMPTION */
2648 return xSwitchRequired;
2650 /*-----------------------------------------------------------*/
2652 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2654 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2658 /* If xTask is NULL then it is the task hook of the calling task that is
2662 xTCB = ( TCB_t * ) pxCurrentTCB[ xPortGetCoreID() ];
2666 xTCB = ( TCB_t * ) xTask;
2669 /* Save the hook function in the TCB. A critical section is required as
2670 the value can be accessed from an interrupt. */
2671 taskENTER_CRITICAL(&xTaskQueueMutex);
2672 xTCB->pxTaskTag = pxHookFunction;
2673 taskEXIT_CRITICAL(&xTaskQueueMutex);
2676 #endif /* configUSE_APPLICATION_TASK_TAG */
2677 /*-----------------------------------------------------------*/
2679 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2681 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2684 TaskHookFunction_t xReturn;
2686 /* If xTask is NULL then we are setting our own task hook. */
2689 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2693 xTCB = ( TCB_t * ) xTask;
2696 /* Save the hook function in the TCB. A critical section is required as
2697 the value can be accessed from an interrupt. */
2698 taskENTER_CRITICAL(&xTaskQueueMutex);
2700 xReturn = xTCB->pxTaskTag;
2702 taskEXIT_CRITICAL(&xTaskQueueMutex);
2707 #endif /* configUSE_APPLICATION_TASK_TAG */
2708 /*-----------------------------------------------------------*/
2710 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2712 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2717 /* If xTask is NULL then we are calling our own task hook. */
2720 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2724 xTCB = ( TCB_t * ) xTask;
2727 if( xTCB->pxTaskTag != NULL )
2729 xReturn = xTCB->pxTaskTag( pvParameter );
2739 #endif /* configUSE_APPLICATION_TASK_TAG */
2740 /*-----------------------------------------------------------*/
2742 void vTaskSwitchContext( void )
2744 //Theoretically, this is only called from either the tick interrupt or the crosscore interrupt, so disabling
2745 //interrupts shouldn't be necessary anymore. Still, for safety we'll leave it in for now.
2746 int irqstate=portENTER_CRITICAL_NESTED();
2748 if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
2750 /* The scheduler is currently suspended - do not allow a context
2752 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2756 xYieldPending[ xPortGetCoreID() ] = pdFALSE;
2757 xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
2758 traceTASK_SWITCHED_OUT();
2760 #if ( configGENERATE_RUN_TIME_STATS == 1 )
2762 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2763 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
2765 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2768 /* Add the amount of time the task has been running to the
2769 accumulated time so far. The time the task started running was
2770 stored in ulTaskSwitchedInTime. Note that there is no overflow
2771 protection here so count values are only valid until the timer
2772 overflows. The guard against negative values is to protect
2773 against suspect run time stat counter implementations - which
2774 are provided by the application, not the kernel. */
2775 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2776 if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
2778 pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
2782 mtCOVERAGE_TEST_MARKER();
2784 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2785 ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
2787 #endif /* configGENERATE_RUN_TIME_STATS */
2789 /* Check for stack overflow, if configured. */
2790 taskFIRST_CHECK_FOR_STACK_OVERFLOW();
2791 taskSECOND_CHECK_FOR_STACK_OVERFLOW();
2793 /* Select a new task to run */
2796 We cannot do taskENTER_CRITICAL_ISR(&xTaskQueueMutex); here because it saves the interrupt context to the task tcb, and we're
2797 swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
2798 need to acquire the mutex.
2800 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2801 vPortCPUAcquireMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2803 vPortCPUAcquireMutex( &xTaskQueueMutex );
2806 unsigned portBASE_TYPE foundNonExecutingWaiter = pdFALSE, ableToSchedule = pdFALSE, resetListHead;
2807 portBASE_TYPE uxDynamicTopReady = uxTopReadyPriority;
2808 unsigned portBASE_TYPE holdTop=pdFALSE;
2811 * ToDo: This scheduler doesn't correctly implement the round-robin scheduling as done in the single-core
2812 * FreeRTOS stack when multiple tasks have the same priority and are all ready; it just keeps grabbing the
2813 * first one. ToDo: fix this.
2814 * (Is this still true? if any, there's the issue with one core skipping over the processes for the other
2815 * core, potentially not giving the skipped-over processes any time.)
2818 while ( ableToSchedule == pdFALSE && uxDynamicTopReady >= 0 )
2820 resetListHead = pdFALSE;
2821 // Nothing to do for empty lists
2822 if (!listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxDynamicTopReady ] ) )) {
2824 ableToSchedule = pdFALSE;
2827 /* Remember the current list item so that we
2828 can detect if all items have been inspected.
2829 Once this happens, we move on to a lower
2830 priority list (assuming nothing is suitable
2831 for scheduling). Note: This can return NULL if
2832 the list index is at the listItem */
2833 pxRefTCB = pxReadyTasksLists[ uxDynamicTopReady ].pxIndex->pvOwner;
2835 if ((void*)pxReadyTasksLists[ uxDynamicTopReady ].pxIndex==(void*)&pxReadyTasksLists[ uxDynamicTopReady ].xListEnd) {
2836 //pxIndex points to the list end marker. Skip that and just get the next item.
2837 listGET_OWNER_OF_NEXT_ENTRY( pxRefTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2841 listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2842 /* Find out if the next task in the list is
2843 already being executed by another core */
2844 foundNonExecutingWaiter = pdTRUE;
2845 portBASE_TYPE i = 0;
2846 for ( i=0; i<portNUM_PROCESSORS; i++ ) {
2847 if (i == xPortGetCoreID()) {
2849 } else if (pxCurrentTCB[i] == pxTCB) {
2850 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2851 foundNonExecutingWaiter = pdFALSE;
2856 if (foundNonExecutingWaiter == pdTRUE) {
2857 /* If the task is not being executed
2858 by another core and its affinity is
2859 compatible with the current one,
2860 prepare it to be swapped in */
2861 if (pxTCB->xCoreID == tskNO_AFFINITY) {
2862 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2863 ableToSchedule = pdTRUE;
2864 } else if (pxTCB->xCoreID == xPortGetCoreID()) {
2865 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2866 ableToSchedule = pdTRUE;
2868 ableToSchedule = pdFALSE;
2869 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2872 ableToSchedule = pdFALSE;
2875 if (ableToSchedule == pdFALSE) {
2876 resetListHead = pdTRUE;
2877 } else if ((ableToSchedule == pdTRUE) && (resetListHead == pdTRUE)) {
2878 tskTCB * pxResetTCB;
2880 listGET_OWNER_OF_NEXT_ENTRY( pxResetTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2881 } while(pxResetTCB != pxRefTCB);
2883 } while ((ableToSchedule == pdFALSE) && (pxTCB != pxRefTCB));
2885 if (!holdTop) --uxTopReadyPriority;
2887 --uxDynamicTopReady;
2890 traceTASK_SWITCHED_IN();
2891 xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
2893 //Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
2894 //exit the function.
2895 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2896 vPortCPUReleaseMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2898 vPortCPUReleaseMutex( &xTaskQueueMutex );
2901 #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
2902 vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
2906 portEXIT_CRITICAL_NESTED(irqstate);
2908 /*-----------------------------------------------------------*/
2910 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
2912 TickType_t xTimeToWake;
2914 configASSERT( pxEventList );
2916 taskENTER_CRITICAL(&xTaskQueueMutex);
2918 /* Place the event list item of the TCB in the appropriate event list.
2919 This is placed in the list in priority order so the highest priority task
2920 is the first to be woken by the event. The queue that contains the event
2921 list is locked, preventing simultaneous access from interrupts. */
2922 vListInsert( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
2924 /* The task must be removed from from the ready list before it is added to
2925 the blocked list as the same list item is used for both lists. Exclusive
2926 access to the ready lists guaranteed because the scheduler is locked. */
2927 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
2929 /* The current task must be in a ready list, so there is no need to
2930 check, and the port reset macro can be called directly. */
2931 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
2935 mtCOVERAGE_TEST_MARKER();
2938 #if ( INCLUDE_vTaskSuspend == 1 )
2940 if( xTicksToWait == portMAX_DELAY )
2942 /* Add the task to the suspended task list instead of a delayed task
2943 list to ensure the task is not woken by a timing event. It will
2944 block indefinitely. */
2945 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
2946 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
2950 /* Calculate the time at which the task should be woken if the event
2951 does not occur. This may overflow but this doesn't matter, the
2952 scheduler will handle it. */
2953 xTimeToWake = xTickCount + xTicksToWait;
2954 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
2957 #else /* INCLUDE_vTaskSuspend */
2959 /* Calculate the time at which the task should be woken if the event does
2960 not occur. This may overflow but this doesn't matter, the scheduler
2962 xTimeToWake = xTickCount + xTicksToWait;
2963 prvAddCurrentTaskToDelayedList( xTimeToWake );
2965 #endif /* INCLUDE_vTaskSuspend */
2967 taskEXIT_CRITICAL(&xTaskQueueMutex);
2970 /*-----------------------------------------------------------*/
2972 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
2974 TickType_t xTimeToWake;
2976 configASSERT( pxEventList );
2978 taskENTER_CRITICAL(&xTaskQueueMutex);
2980 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
2981 the event groups implementation. */
2982 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 );
2984 /* Store the item value in the event list item. It is safe to access the
2985 event list item here as interrupts won't access the event list item of a
2986 task that is not in the Blocked state. */
2987 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
2989 /* Place the event list item of the TCB at the end of the appropriate event
2990 list. It is safe to access the event list here because it is part of an
2991 event group implementation - and interrupts don't access event groups
2992 directly (instead they access them indirectly by pending function calls to
2994 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
2996 /* The task must be removed from the ready list before it is added to the
2997 blocked list. Exclusive access can be assured to the ready list as the
2998 scheduler is locked. */
2999 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
3001 /* The current task must be in a ready list, so there is no need to
3002 check, and the port reset macro can be called directly. */
3003 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
3007 mtCOVERAGE_TEST_MARKER();
3010 #if ( INCLUDE_vTaskSuspend == 1 )
3012 if( xTicksToWait == portMAX_DELAY )
3014 /* Add the task to the suspended task list instead of a delayed task
3015 list to ensure it is not woken by a timing event. It will block
3017 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
3021 /* Calculate the time at which the task should be woken if the event
3022 does not occur. This may overflow but this doesn't matter, the
3023 kernel will manage it correctly. */
3024 xTimeToWake = xTickCount + xTicksToWait;
3025 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
3028 #else /* INCLUDE_vTaskSuspend */
3030 /* Calculate the time at which the task should be woken if the event does
3031 not occur. This may overflow but this doesn't matter, the kernel
3032 will manage it correctly. */
3033 xTimeToWake = xTickCount + xTicksToWait;
3034 prvAddCurrentTaskToDelayedList( xTimeToWake );
3036 #endif /* INCLUDE_vTaskSuspend */
3038 taskEXIT_CRITICAL(&xTaskQueueMutex);
3040 /*-----------------------------------------------------------*/
3042 #if configUSE_TIMERS == 1
3044 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, const TickType_t xTicksToWait )
3046 TickType_t xTimeToWake;
3048 taskENTER_CRITICAL(&xTaskQueueMutex);
3049 configASSERT( pxEventList );
3051 /* This function should not be called by application code hence the
3052 'Restricted' in its name. It is not part of the public API. It is
3053 designed for use by kernel code, and has special calling requirements -
3054 it should be called from a critical section. */
3057 /* Place the event list item of the TCB in the appropriate event list.
3058 In this case it is assume that this is the only task that is going to
3059 be waiting on this event list, so the faster vListInsertEnd() function
3060 can be used in place of vListInsert. */
3061 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
3063 /* We must remove this task from the ready list before adding it to the
3064 blocked list as the same list item is used for both lists. This
3065 function is called form a critical section. */
3066 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
3068 /* The current task must be in a ready list, so there is no need to
3069 check, and the port reset macro can be called directly. */
3070 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
3074 mtCOVERAGE_TEST_MARKER();
3077 /* Calculate the time at which the task should be woken if the event does
3078 not occur. This may overflow but this doesn't matter. */
3079 xTimeToWake = xTickCount + xTicksToWait;
3081 traceTASK_DELAY_UNTIL();
3082 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
3083 taskEXIT_CRITICAL(&xTaskQueueMutex);
3087 #endif /* configUSE_TIMERS */
3088 /*-----------------------------------------------------------*/
3090 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3092 TCB_t *pxUnblockedTCB;
3094 BaseType_t xTaskCanBeReady;
3095 UBaseType_t i, uxTargetCPU;
3097 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3098 called from a critical section within an ISR. */
3099 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
3100 /* The event list is sorted in priority order, so the first in the list can
3101 be removed as it is known to be the highest priority. Remove the TCB from
3102 the delayed list, and add it to the ready list.
3104 If an event is for a queue that is locked then this function will never
3105 get called - the lock count on the queue will get modified instead. This
3106 means exclusive access to the event list is guaranteed here.
3108 This function assumes that a check has already been made to ensure that
3109 pxEventList is not empty. */
3110 if ( ( listLIST_IS_EMPTY( pxEventList ) ) == pdFALSE ) {
3111 pxUnblockedTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
3112 configASSERT( pxUnblockedTCB );
3113 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3115 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3119 /* Determine if the task can possibly be run on either CPU now, either because the scheduler
3120 the task is pinned to is running or because a scheduler is running on any CPU. */
3121 xTaskCanBeReady = pdFALSE;
3122 if ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) {
3123 uxTargetCPU = xPortGetCoreID();
3124 for (i = 0; i < portNUM_PROCESSORS; i++) {
3125 if ( uxSchedulerSuspended[ i ] == ( UBaseType_t ) pdFALSE ) {
3126 xTaskCanBeReady = pdTRUE;
3131 uxTargetCPU = pxUnblockedTCB->xCoreID;
3132 xTaskCanBeReady = uxSchedulerSuspended[ uxTargetCPU ] == ( UBaseType_t ) pdFALSE;
3136 if( xTaskCanBeReady )
3138 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3139 prvAddTaskToReadyList( pxUnblockedTCB );
3143 /* The delayed and ready lists cannot be accessed, so hold this task
3144 pending until the scheduler is resumed on this CPU. */
3145 vListInsertEnd( &( xPendingReadyList[ uxTargetCPU ] ), &( pxUnblockedTCB->xEventListItem ) );
3148 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3150 /* Return true if the task removed from the event list has a higher
3151 priority than the calling task. This allows the calling task to know if
3152 it should force a context switch now. */
3155 /* Mark that a yield is pending in case the user is not using the
3156 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3157 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3159 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3161 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3169 #if( configUSE_TICKLESS_IDLE == 1 )
3171 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3172 might be set to the blocked task's time out time. If the task is
3173 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3174 normally left unchanged, because it is automatically get reset to a new
3175 value when the tick count equals xNextTaskUnblockTime. However if
3176 tickless idling is used it might be more important to enter sleep mode
3177 at the earliest possible time - so reset xNextTaskUnblockTime here to
3178 ensure it is updated at the earliest possible time. */
3179 prvResetNextTaskUnblockTime();
3182 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3186 /*-----------------------------------------------------------*/
3188 BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3190 TCB_t *pxUnblockedTCB;
3193 taskENTER_CRITICAL(&xTaskQueueMutex);
3194 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3195 the event flags implementation. */
3196 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != pdFALSE );
3198 /* Store the new item value in the event list. */
3199 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3201 /* Remove the event list form the event flag. Interrupts do not access
3203 pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem );
3204 configASSERT( pxUnblockedTCB );
3205 ( void ) uxListRemove( pxEventListItem );
3207 /* Remove the task from the delayed list and add it to the ready list. The
3208 scheduler is suspended so interrupts will not be accessing the ready
3210 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3211 prvAddTaskToReadyList( pxUnblockedTCB );
3213 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3215 /* Return true if the task removed from the event list has
3216 a higher priority than the calling task. This allows
3217 the calling task to know if it should force a context
3221 /* Mark that a yield is pending in case the user is not using the
3222 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3223 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3225 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3227 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3235 taskEXIT_CRITICAL(&xTaskQueueMutex);
3238 /*-----------------------------------------------------------*/
3240 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3242 configASSERT( pxTimeOut );
3243 pxTimeOut->xOverflowCount = xNumOfOverflows;
3244 pxTimeOut->xTimeOnEntering = xTickCount;
3246 /*-----------------------------------------------------------*/
3248 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3252 configASSERT( pxTimeOut );
3253 configASSERT( pxTicksToWait );
3255 taskENTER_CRITICAL(&xTickCountMutex);
3257 /* Minor optimisation. The tick count cannot change in this block. */
3258 const TickType_t xConstTickCount = xTickCount;
3260 #if ( INCLUDE_vTaskSuspend == 1 )
3261 /* If INCLUDE_vTaskSuspend is set to 1 and the block time specified is
3262 the maximum block time then the task should block indefinitely, and
3263 therefore never time out. */
3264 if( *pxTicksToWait == portMAX_DELAY )
3268 else /* We are not blocking indefinitely, perform the checks below. */
3271 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3273 /* The tick count is greater than the time at which vTaskSetTimeout()
3274 was called, but has also overflowed since vTaskSetTimeOut() was called.
3275 It must have wrapped all the way around and gone past us again. This
3276 passed since vTaskSetTimeout() was called. */
3279 else if( ( xConstTickCount - pxTimeOut->xTimeOnEntering ) < *pxTicksToWait )
3281 /* Not a genuine timeout. Adjust parameters for time remaining. */
3282 *pxTicksToWait -= ( xConstTickCount - pxTimeOut->xTimeOnEntering );
3283 vTaskSetTimeOutState( pxTimeOut );
3291 taskEXIT_CRITICAL(&xTickCountMutex);
3295 /*-----------------------------------------------------------*/
3297 void vTaskMissedYield( void )
3299 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3301 /*-----------------------------------------------------------*/
3303 #if ( configUSE_TRACE_FACILITY == 1 )
3305 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3307 UBaseType_t uxReturn;
3312 pxTCB = ( TCB_t * ) xTask;
3313 uxReturn = pxTCB->uxTaskNumber;
3323 #endif /* configUSE_TRACE_FACILITY */
3324 /*-----------------------------------------------------------*/
3326 #if ( configUSE_TRACE_FACILITY == 1 )
3328 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3334 pxTCB = ( TCB_t * ) xTask;
3335 pxTCB->uxTaskNumber = uxHandle;
3339 #endif /* configUSE_TRACE_FACILITY */
3342 * -----------------------------------------------------------
3344 * ----------------------------------------------------------
3346 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3347 * language extensions. The equivalent prototype for this function is:
3349 * void prvIdleTask( void *pvParameters );
3352 static portTASK_FUNCTION( prvIdleTask, pvParameters )
3354 /* Stop warnings. */
3355 ( void ) pvParameters;
3359 /* See if any tasks have been deleted. */
3360 prvCheckTasksWaitingTermination();
3362 #if ( configUSE_PREEMPTION == 0 )
3364 /* If we are not using preemption we keep forcing a task switch to
3365 see if any other task has become available. If we are using
3366 preemption we don't need to do this as any task becoming available
3367 will automatically get the processor anyway. */
3370 #endif /* configUSE_PREEMPTION */
3372 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3374 /* When using preemption tasks of equal priority will be
3375 timesliced. If a task that is sharing the idle priority is ready
3376 to run then the idle task should yield before the end of the
3379 A critical region is not required here as we are just reading from
3380 the list, and an occasional incorrect value will not matter. If
3381 the ready list at the idle priority contains more than one task
3382 then a task other than the idle task is ready to execute. */
3383 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3389 mtCOVERAGE_TEST_MARKER();
3392 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3394 #if ( configUSE_IDLE_HOOK == 1 )
3396 extern void vApplicationIdleHook( void );
3398 /* Call the user defined function from within the idle task. This
3399 allows the application designer to add background functionality
3400 without the overhead of a separate task.
3401 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3402 CALL A FUNCTION THAT MIGHT BLOCK. */
3403 vApplicationIdleHook();
3405 #endif /* configUSE_IDLE_HOOK */
3407 /* Call the esp-idf hook system */
3408 extern void esp_vApplicationIdleHook( void );
3409 esp_vApplicationIdleHook();
3413 /* This conditional compilation should use inequality to 0, not equality
3414 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3415 user defined low power mode implementations require
3416 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3417 #if ( configUSE_TICKLESS_IDLE != 0 )
3419 TickType_t xExpectedIdleTime;
3421 /* It is not desirable to suspend then resume the scheduler on
3422 each iteration of the idle task. Therefore, a preliminary
3423 test of the expected idle time is performed without the
3424 scheduler suspended. The result here is not necessarily
3426 xExpectedIdleTime = prvGetExpectedIdleTime();
3428 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3430 // vTaskSuspendAll();
3431 taskENTER_CRITICAL(&xTaskQueueMutex);
3433 /* Now the scheduler is suspended, the expected idle
3434 time can be sampled again, and this time its value can
3436 configASSERT( xNextTaskUnblockTime >= xTickCount );
3437 xExpectedIdleTime = prvGetExpectedIdleTime();
3439 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3441 traceLOW_POWER_IDLE_BEGIN();
3442 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3443 traceLOW_POWER_IDLE_END();
3447 mtCOVERAGE_TEST_MARKER();
3450 taskEXIT_CRITICAL(&xTaskQueueMutex);
3451 // ( void ) xTaskResumeAll();
3455 mtCOVERAGE_TEST_MARKER();
3458 #endif /* configUSE_TICKLESS_IDLE */
3461 /*-----------------------------------------------------------*/
3463 #if configUSE_TICKLESS_IDLE != 0
3465 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3467 eSleepModeStatus eReturn = eStandardSleep;
3468 taskENTER_CRITICAL(&xTaskQueueMutex);
3470 if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
3472 /* A task was made ready while the scheduler was suspended. */
3473 eReturn = eAbortSleep;
3475 else if( xYieldPending[ xPortGetCoreID() ] != pdFALSE )
3477 /* A yield was pended while the scheduler was suspended. */
3478 eReturn = eAbortSleep;
3482 #if configUSE_TIMERS == 0
3484 /* The idle task exists in addition to the application tasks. */
3485 const UBaseType_t uxNonApplicationTasks = 1;
3487 /* If timers are not being used and all the tasks are in the
3488 suspended list (which might mean they have an infinite block
3489 time rather than actually being suspended) then it is safe to
3490 turn all clocks off and just wait for external interrupts. */
3491 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3493 eReturn = eNoTasksWaitingTimeout;
3497 mtCOVERAGE_TEST_MARKER();
3500 #endif /* configUSE_TIMERS */
3502 taskEXIT_CRITICAL(&xTaskQueueMutex);
3506 #endif /* configUSE_TICKLESS_IDLE */
3507 /*-----------------------------------------------------------*/
3509 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3511 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3513 void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue , TlsDeleteCallbackFunction_t xDelCallback)
3517 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3519 taskENTER_CRITICAL(&xTaskQueueMutex);
3520 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3521 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3522 pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
3523 taskEXIT_CRITICAL(&xTaskQueueMutex);
3527 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3529 vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, (TlsDeleteCallbackFunction_t)NULL );
3534 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3538 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3540 taskENTER_CRITICAL(&xTaskQueueMutex);
3541 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3542 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3543 taskEXIT_CRITICAL(&xTaskQueueMutex);
3546 #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
3548 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3549 /*-----------------------------------------------------------*/
3551 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3553 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3555 void *pvReturn = NULL;
3558 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3560 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3561 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3571 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3574 #if ( portUSING_MPU_WRAPPERS == 1 )
3575 /* ToDo: Check for multicore */
3576 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3580 UNTESTED_FUNCTION();
3581 /* If null is passed in here then we are deleting ourselves. */
3582 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3584 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3587 #endif /* portUSING_MPU_WRAPPERS */
3588 /*-----------------------------------------------------------*/
3590 static void prvInitialiseTaskLists( void )
3592 UBaseType_t uxPriority;
3594 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3596 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3599 vListInitialise( &xDelayedTaskList1 );
3600 vListInitialise( &xDelayedTaskList2 );
3601 vListInitialise( &xPendingReadyList[ 0 ] );
3602 if (portNUM_PROCESSORS == 2) {
3603 vListInitialise( &xPendingReadyList[ 1 ] );
3606 #if ( INCLUDE_vTaskDelete == 1 )
3608 vListInitialise( &xTasksWaitingTermination );
3610 #endif /* INCLUDE_vTaskDelete */
3612 #if ( INCLUDE_vTaskSuspend == 1 )
3614 vListInitialise( &xSuspendedTaskList );
3616 #endif /* INCLUDE_vTaskSuspend */
3618 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3620 pxDelayedTaskList = &xDelayedTaskList1;
3621 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3623 /*-----------------------------------------------------------*/
3625 static void prvCheckTasksWaitingTermination( void )
3627 #if ( INCLUDE_vTaskDelete == 1 )
3629 BaseType_t xListIsEmpty;
3630 int core = xPortGetCoreID();
3632 /* ucTasksDeleted is used to prevent vTaskSuspendAll() being called
3633 too often in the idle task. */
3634 while(uxTasksDeleted > ( UBaseType_t ) 0U )
3636 TCB_t *pxTCB = NULL;
3638 taskENTER_CRITICAL(&xTaskQueueMutex);
3640 xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
3641 if( xListIsEmpty == pdFALSE )
3643 /* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
3644 be called on the core the process is pinned on, if any */
3645 ListItem_t *target = listGET_HEAD_ENTRY(&xTasksWaitingTermination);
3646 for( ; target != listGET_END_MARKER(&xTasksWaitingTermination); target = listGET_NEXT(target) ){ //Walk the list
3647 TCB_t *tgt_tcb = ( TCB_t * )listGET_LIST_ITEM_OWNER(target);
3648 int affinity = tgt_tcb->xCoreID;
3649 //Self deleting tasks are added to Termination List before they switch context. Ensure they aren't still currently running
3650 if( pxCurrentTCB[core] == tgt_tcb || (portNUM_PROCESSORS > 1 && pxCurrentTCB[!core] == tgt_tcb) ){
3651 continue; //Can't free memory of task that is still running
3653 if(affinity == core || affinity == tskNO_AFFINITY){ //Find first item not pinned to other core
3659 ( void ) uxListRemove( target ); //Remove list item from list
3660 --uxCurrentNumberOfTasks;
3665 taskEXIT_CRITICAL(&xTaskQueueMutex); //Need to call deletion callbacks outside critical section
3667 if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
3668 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3669 prvDeleteTLS( pxTCB );
3671 prvDeleteTCB( pxTCB );
3675 mtCOVERAGE_TEST_MARKER();
3676 break; //No TCB found that could be freed by this core, break out of loop
3680 #endif /* vTaskDelete */
3682 /*-----------------------------------------------------------*/
3684 //This should be called with the taskqueuemutex grabbed. -JD
3685 static void prvAddCurrentTaskToDelayedList( const BaseType_t xCoreID, const TickType_t xTimeToWake )
3687 /* The list item will be inserted in wake time order. */
3688 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xGenericListItem ), xTimeToWake );
3690 if( xTimeToWake < xTickCount )
3692 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
3693 /* Wake time has overflowed. Place this item in the overflow list. */
3694 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3698 traceMOVED_TASK_TO_DELAYED_LIST();
3699 /* The wake time has not overflowed, so the current block list is used. */
3700 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3702 /* If the task entering the blocked state was placed at the head of the
3703 list of blocked tasks then xNextTaskUnblockTime needs to be updated
3705 if( xTimeToWake < xNextTaskUnblockTime )
3707 xNextTaskUnblockTime = xTimeToWake;
3711 mtCOVERAGE_TEST_MARKER();
3715 /*-----------------------------------------------------------*/
3717 BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
3721 pxTCB = prvGetTCBFromHandle( xTask );
3723 return pxTCB->xCoreID;
3725 /*-----------------------------------------------------------*/
3728 #if ( configUSE_TRACE_FACILITY == 1 )
3730 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3732 volatile TCB_t *pxNextTCB, *pxFirstTCB;
3733 UBaseType_t uxTask = 0;
3735 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3737 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
3739 /* Populate an TaskStatus_t structure within the
3740 pxTaskStatusArray array for each task that is referenced from
3741 pxList. See the definition of TaskStatus_t in task.h for the
3742 meaning of each TaskStatus_t structure member. */
3745 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
3747 pxTaskStatusArray[ uxTask ].xHandle = ( TaskHandle_t ) pxNextTCB;
3748 pxTaskStatusArray[ uxTask ].pcTaskName = ( const char * ) &( pxNextTCB->pcTaskName [ 0 ] );
3749 pxTaskStatusArray[ uxTask ].xTaskNumber = pxNextTCB->uxTCBNumber;
3750 pxTaskStatusArray[ uxTask ].eCurrentState = eState;
3751 pxTaskStatusArray[ uxTask ].uxCurrentPriority = pxNextTCB->uxPriority;
3753 #if ( INCLUDE_vTaskSuspend == 1 )
3755 /* If the task is in the suspended list then there is a chance
3756 it is actually just blocked indefinitely - so really it should
3757 be reported as being in the Blocked state. */
3758 if( eState == eSuspended )
3760 if( listLIST_ITEM_CONTAINER( &( pxNextTCB->xEventListItem ) ) != NULL )
3762 pxTaskStatusArray[ uxTask ].eCurrentState = eBlocked;
3766 #endif /* INCLUDE_vTaskSuspend */
3768 #if ( configUSE_MUTEXES == 1 )
3770 pxTaskStatusArray[ uxTask ].uxBasePriority = pxNextTCB->uxBasePriority;
3774 pxTaskStatusArray[ uxTask ].uxBasePriority = 0;
3778 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3780 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = pxNextTCB->ulRunTimeCounter;
3784 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = 0;
3788 #if ( portSTACK_GROWTH > 0 )
3790 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxEndOfStack );
3794 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxStack );
3800 } while( pxNextTCB != pxFirstTCB );
3804 mtCOVERAGE_TEST_MARKER();
3810 #endif /* configUSE_TRACE_FACILITY */
3811 /*-----------------------------------------------------------*/
3813 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
3815 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3817 uint32_t ulCount = 0U;
3819 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3821 pucStackByte -= portSTACK_GROWTH;
3825 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3827 return ( uint32_t ) ulCount;
3830 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */
3831 /*-----------------------------------------------------------*/
3833 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3835 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3838 uint8_t *pucEndOfStack;
3839 UBaseType_t uxReturn;
3841 pxTCB = prvGetTCBFromHandle( xTask );
3843 #if portSTACK_GROWTH < 0
3845 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3849 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3853 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
3858 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3859 /*-----------------------------------------------------------*/
3861 #if (INCLUDE_pxTaskGetStackStart == 1)
3863 uint8_t* pxTaskGetStackStart( TaskHandle_t xTask)
3868 pxTCB = prvGetTCBFromHandle( xTask );
3869 uxReturn = (uint8_t*)pxTCB->pxStack;
3874 #endif /* INCLUDE_pxTaskGetStackStart */
3875 /*-----------------------------------------------------------*/
3877 #if ( INCLUDE_vTaskDelete == 1 )
3879 static void prvDeleteTCB( TCB_t *pxTCB )
3881 /* Free up the memory allocated by the scheduler for the task. It is up
3882 to the task to free any memory allocated at the application level. */
3883 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3885 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3887 #endif /* configUSE_NEWLIB_REENTRANT */
3889 #if ( portUSING_MPU_WRAPPERS == 1 )
3890 vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
3893 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3895 /* The task can only have been allocated dynamically - free both
3896 the stack and TCB. */
3897 vPortFreeAligned( pxTCB->pxStack );
3900 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
3902 /* The task could have been allocated statically or dynamically, so
3903 check what was statically allocated before trying to free the
3905 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3907 /* Both the stack and TCB were allocated dynamically, so both
3909 vPortFreeAligned( pxTCB->pxStack );
3912 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3914 /* Only the stack was statically allocated, so the TCB is the
3915 only memory that must be freed. */
3920 /* Neither the stack nor the TCB were allocated dynamically, so
3921 nothing needs to be freed. */
3922 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
3923 portCLEAN_UP_TCB( pxTCB );
3924 mtCOVERAGE_TEST_MARKER();
3927 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3930 #endif /* INCLUDE_vTaskDelete */
3931 /*-----------------------------------------------------------*/
3933 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3935 static void prvDeleteTLS( TCB_t *pxTCB )
3937 configASSERT( pxTCB );
3938 for( int x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
3940 if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL) //If del cb is set
3942 pxTCB->pvThreadLocalStoragePointersDelCallback[ x ](x, pxTCB->pvThreadLocalStoragePointers[ x ]); //Call del cb
3947 #endif /* ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) */
3948 /*-----------------------------------------------------------*/
3950 static void prvResetNextTaskUnblockTime( void )
3954 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3956 /* The new current delayed list is empty. Set
3957 xNextTaskUnblockTime to the maximum possible value so it is
3958 extremely unlikely that the
3959 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
3960 there is an item in the delayed list. */
3961 xNextTaskUnblockTime = portMAX_DELAY;
3965 /* The new current delayed list is not empty, get the value of
3966 the item at the head of the delayed list. This is the time at
3967 which the task at the head of the delayed list should be removed
3968 from the Blocked state. */
3969 ( pxTCB ) = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
3970 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xGenericListItem ) );
3973 /*-----------------------------------------------------------*/
3975 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
3977 TaskHandle_t xTaskGetCurrentTaskHandle( void )
3979 TaskHandle_t xReturn;
3982 state = portENTER_CRITICAL_NESTED();
3983 xReturn = pxCurrentTCB[ xPortGetCoreID() ];
3984 portEXIT_CRITICAL_NESTED(state);
3989 TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid )
3991 TaskHandle_t xReturn=NULL;
3993 //Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock.
3994 if (cpuid < portNUM_PROCESSORS) {
3995 xReturn = pxCurrentTCB[ cpuid ];
4002 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
4003 /*-----------------------------------------------------------*/
4005 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
4007 BaseType_t xTaskGetSchedulerState( void )
4012 state = portENTER_CRITICAL_NESTED();
4013 if( xSchedulerRunning == pdFALSE )
4015 xReturn = taskSCHEDULER_NOT_STARTED;
4019 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
4021 xReturn = taskSCHEDULER_RUNNING;
4025 xReturn = taskSCHEDULER_SUSPENDED;
4028 portEXIT_CRITICAL_NESTED(state);
4033 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4034 /*-----------------------------------------------------------*/
4036 #if ( configUSE_MUTEXES == 1 )
4038 void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4040 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
4042 taskENTER_CRITICAL(&xTickCountMutex);
4043 /* If the mutex was given back by an interrupt while the queue was
4044 locked then the mutex holder might now be NULL. */
4045 if( pxMutexHolder != NULL )
4047 if( pxTCB->uxPriority < pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4049 taskENTER_CRITICAL(&xTaskQueueMutex);
4050 /* Adjust the mutex holder state to account for its new
4051 priority. Only reset the event list item value if the value is
4052 not being used for anything else. */
4053 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4055 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4059 mtCOVERAGE_TEST_MARKER();
4062 /* If the task being modified is in the ready state it will need to
4063 be moved into a new list. */
4064 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxTCB->uxPriority ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
4066 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4068 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4072 mtCOVERAGE_TEST_MARKER();
4075 /* Inherit the priority before being moved into the new list. */
4076 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4077 prvReaddTaskToReadyList( pxTCB );
4081 /* Just inherit the priority. */
4082 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4085 taskEXIT_CRITICAL(&xTaskQueueMutex);
4087 traceTASK_PRIORITY_INHERIT( pxTCB, pxCurrentTCB[ xPortGetCoreID() ]->uxPriority );
4091 mtCOVERAGE_TEST_MARKER();
4096 mtCOVERAGE_TEST_MARKER();
4099 taskEXIT_CRITICAL(&xTickCountMutex);
4103 #endif /* configUSE_MUTEXES */
4104 /*-----------------------------------------------------------*/
4106 #if ( configUSE_MUTEXES == 1 )
4108 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4110 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
4111 BaseType_t xReturn = pdFALSE;
4112 taskENTER_CRITICAL(&xTickCountMutex);
4114 if( pxMutexHolder != NULL )
4116 configASSERT( pxTCB->uxMutexesHeld );
4117 ( pxTCB->uxMutexesHeld )--;
4119 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4121 /* Only disinherit if no other mutexes are held. */
4122 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4124 taskENTER_CRITICAL(&xTaskQueueMutex);
4125 /* A task can only have an inhertied priority if it holds
4126 the mutex. If the mutex is held by a task then it cannot be
4127 given from an interrupt, and if a mutex is given by the
4128 holding task then it must be the running state task. Remove
4129 the holding task from the ready list. */
4130 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4132 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4136 mtCOVERAGE_TEST_MARKER();
4139 /* Disinherit the priority before adding the task into the
4141 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4142 pxTCB->uxPriority = pxTCB->uxBasePriority;
4144 /* Reset the event list item value. It cannot be in use for
4145 any other purpose if this task is running, and it must be
4146 running to give back the mutex. */
4147 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4148 prvReaddTaskToReadyList( pxTCB );
4150 /* Return true to indicate that a context switch is required.
4151 This is only actually required in the corner case whereby
4152 multiple mutexes were held and the mutexes were given back
4153 in an order different to that in which they were taken.
4154 If a context switch did not occur when the first mutex was
4155 returned, even if a task was waiting on it, then a context
4156 switch should occur when the last mutex is returned whether
4157 a task is waiting on it or not. */
4159 taskEXIT_CRITICAL(&xTaskQueueMutex);
4163 mtCOVERAGE_TEST_MARKER();
4168 mtCOVERAGE_TEST_MARKER();
4173 mtCOVERAGE_TEST_MARKER();
4176 taskEXIT_CRITICAL(&xTickCountMutex);
4180 #endif /* configUSE_MUTEXES */
4181 /*-----------------------------------------------------------*/
4183 /* For multicore, this assumes the vPortCPUAquireMutex is recursive, that is, it can be called multiple
4184 times and the release call will have to be called as many times for the mux to unlock. */
4186 /* Gotcha (which seems to be deliberate in FreeRTOS, according to
4187 http://www.freertos.org/FreeRTOS_Support_Forum_Archive/December_2012/freertos_PIC32_Bug_-_vTaskEnterCritical_6400806.html
4188 ) is that calling vTaskEnterCritical followed by vTaskExitCritical will leave the interrupts DISABLED when the scheduler
4189 is not running. Re-enabling the scheduler will re-enable the interrupts instead.
4191 For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and portENTER_CRITICAL_ISR.
4194 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4196 #include "portmux_impl.h"
4198 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4199 void vTaskEnterCritical( portMUX_TYPE *mux, const char *function, int line )
4201 void vTaskEnterCritical( portMUX_TYPE *mux )
4204 BaseType_t oldInterruptLevel=0;
4205 BaseType_t schedulerRunning = xSchedulerRunning;
4206 if( schedulerRunning != pdFALSE )
4208 //Interrupts may already be disabled (because we're doing this recursively) but we can't get the interrupt level after
4209 //vPortCPUAquireMutex, because it also may mess with interrupts. Get it here first, then later figure out if we're nesting
4210 //and save for real there.
4211 oldInterruptLevel=portENTER_CRITICAL_NESTED();
4213 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4214 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT, function, line );
4216 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT );
4219 if( schedulerRunning != pdFALSE )
4221 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4222 BaseType_t newNesting = tcb->uxCriticalNesting + 1;
4223 tcb->uxCriticalNesting = newNesting;
4224 if( newNesting == 1 )
4226 //This is the first time we get called. Save original interrupt level.
4227 tcb->uxOldInterruptState = oldInterruptLevel;
4230 /* Original FreeRTOS comment, saved for reference:
4231 This is not the interrupt safe version of the enter critical
4232 function so assert() if it is being called from an interrupt
4233 context. Only API functions that end in "FromISR" can be used in an
4234 interrupt. Only assert if the critical nesting count is 1 to
4235 protect against recursive calls if the assert function also uses a
4236 critical section. */
4238 /* DISABLED in the esp32 port - because of SMP, For ESP32
4239 FreeRTOS, vTaskEnterCritical implements both
4240 portENTER_CRITICAL and portENTER_CRITICAL_ISR. vTaskEnterCritical
4241 has to be used in way more places than before, and some are called
4242 both from ISR as well as non-ISR code, thus we re-organized
4243 vTaskEnterCritical to also work in ISRs. */
4245 if( newNesting == 1 )
4247 portASSERT_IF_IN_ISR();
4254 mtCOVERAGE_TEST_MARKER();
4258 #endif /* portCRITICAL_NESTING_IN_TCB */
4259 /*-----------------------------------------------------------*/
4263 For ESP32 FreeRTOS, vTaskExitCritical implements both portEXIT_CRITICAL and portEXIT_CRITICAL_ISR.
4265 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4267 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4268 void vTaskExitCritical( portMUX_TYPE *mux, const char *function, int line )
4270 void vTaskExitCritical( portMUX_TYPE *mux )
4273 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4274 vPortCPUReleaseMutexIntsDisabled( mux, function, line );
4276 vPortCPUReleaseMutexIntsDisabled( mux );
4278 if( xSchedulerRunning != pdFALSE )
4280 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4281 BaseType_t nesting = tcb->uxCriticalNesting;
4285 tcb->uxCriticalNesting = nesting;
4289 portEXIT_CRITICAL_NESTED(tcb->uxOldInterruptState);
4293 mtCOVERAGE_TEST_MARKER();
4298 mtCOVERAGE_TEST_MARKER();
4303 mtCOVERAGE_TEST_MARKER();
4307 #endif /* portCRITICAL_NESTING_IN_TCB */
4308 /*-----------------------------------------------------------*/
4310 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4312 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4316 /* Start by copying the entire string. */
4317 strcpy( pcBuffer, pcTaskName );
4319 /* Pad the end of the string with spaces to ensure columns line up when
4321 for( x = strlen( pcBuffer ); x < ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4323 pcBuffer[ x ] = ' ';
4327 pcBuffer[ x ] = 0x00;
4329 /* Return the new end of string. */
4330 return &( pcBuffer[ x ] );
4333 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4334 /*-----------------------------------------------------------*/
4336 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4338 void vTaskList( char * pcWriteBuffer )
4340 TaskStatus_t *pxTaskStatusArray;
4341 volatile UBaseType_t uxArraySize, x;
4347 * This function is provided for convenience only, and is used by many
4348 * of the demo applications. Do not consider it to be part of the
4351 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4352 * uxTaskGetSystemState() output into a human readable table that
4353 * displays task names, states and stack usage.
4355 * vTaskList() has a dependency on the sprintf() C library function that
4356 * might bloat the code size, use a lot of stack, and provide different
4357 * results on different platforms. An alternative, tiny, third party,
4358 * and limited functionality implementation of sprintf() is provided in
4359 * many of the FreeRTOS/Demo sub-directories in a file called
4360 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4361 * snprintf() implementation!).
4363 * It is recommended that production systems call uxTaskGetSystemState()
4364 * directly to get access to raw stats data, rather than indirectly
4365 * through a call to vTaskList().
4369 /* Make sure the write buffer does not contain a string. */
4370 *pcWriteBuffer = 0x00;
4372 /* Take a snapshot of the number of tasks in case it changes while this
4373 function is executing. */
4374 uxArraySize = uxCurrentNumberOfTasks;
4376 /* Allocate an array index for each task. NOTE! if
4377 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4379 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4381 if( pxTaskStatusArray != NULL )
4383 /* Generate the (binary) data. */
4384 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
4386 /* Create a human readable table from the binary data. */
4387 for( x = 0; x < uxArraySize; x++ )
4389 switch( pxTaskStatusArray[ x ].eCurrentState )
4391 case eReady: cStatus = tskREADY_CHAR;
4394 case eBlocked: cStatus = tskBLOCKED_CHAR;
4397 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4400 case eDeleted: cStatus = tskDELETED_CHAR;
4403 default: /* Should not get here, but it is included
4404 to prevent static checking errors. */
4409 /* Write the task name to the string, padding with spaces so it
4410 can be printed in tabular form more easily. */
4411 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4413 /* Write the rest of the string. */
4414 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
4415 pcWriteBuffer += strlen( pcWriteBuffer );
4418 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4419 is 0 then vPortFree() will be #defined to nothing. */
4420 vPortFree( pxTaskStatusArray );
4424 mtCOVERAGE_TEST_MARKER();
4428 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4429 /*----------------------------------------------------------*/
4431 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4433 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4435 TaskStatus_t *pxTaskStatusArray;
4436 volatile UBaseType_t uxArraySize, x;
4437 uint32_t ulTotalTime, ulStatsAsPercentage;
4439 #if( configUSE_TRACE_FACILITY != 1 )
4441 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4448 * This function is provided for convenience only, and is used by many
4449 * of the demo applications. Do not consider it to be part of the
4452 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4453 * of the uxTaskGetSystemState() output into a human readable table that
4454 * displays the amount of time each task has spent in the Running state
4455 * in both absolute and percentage terms.
4457 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4458 * function that might bloat the code size, use a lot of stack, and
4459 * provide different results on different platforms. An alternative,
4460 * tiny, third party, and limited functionality implementation of
4461 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4462 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4463 * a full snprintf() implementation!).
4465 * It is recommended that production systems call uxTaskGetSystemState()
4466 * directly to get access to raw stats data, rather than indirectly
4467 * through a call to vTaskGetRunTimeStats().
4470 /* Make sure the write buffer does not contain a string. */
4471 *pcWriteBuffer = 0x00;
4473 /* Take a snapshot of the number of tasks in case it changes while this
4474 function is executing. */
4475 uxArraySize = uxCurrentNumberOfTasks;
4477 /* Allocate an array index for each task. NOTE! If
4478 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4480 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4482 if( pxTaskStatusArray != NULL )
4484 /* Generate the (binary) data. */
4485 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4487 /* For percentage calculations. */
4488 ulTotalTime /= 100UL;
4490 /* Avoid divide by zero errors. */
4491 if( ulTotalTime > 0 )
4493 /* Create a human readable table from the binary data. */
4494 for( x = 0; x < uxArraySize; x++ )
4496 /* What percentage of the total run time has the task used?
4497 This will always be rounded down to the nearest integer.
4498 ulTotalRunTimeDiv100 has already been divided by 100. */
4499 /* Also need to consider total run time of all */
4500 ulStatsAsPercentage = (pxTaskStatusArray[ x ].ulRunTimeCounter/portNUM_PROCESSORS)/ ulTotalTime;
4502 /* Write the task name to the string, padding with
4503 spaces so it can be printed in tabular form more
4505 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4507 if( ulStatsAsPercentage > 0UL )
4509 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4511 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4515 /* sizeof( int ) == sizeof( long ) so a smaller
4516 printf() library can be used. */
4517 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage );
4523 /* If the percentage is zero here then the task has
4524 consumed less than 1% of the total run time. */
4525 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4527 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4531 /* sizeof( int ) == sizeof( long ) so a smaller
4532 printf() library can be used. */
4533 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
4538 pcWriteBuffer += strlen( pcWriteBuffer );
4543 mtCOVERAGE_TEST_MARKER();
4546 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4547 is 0 then vPortFree() will be #defined to nothing. */
4548 vPortFree( pxTaskStatusArray );
4552 mtCOVERAGE_TEST_MARKER();
4556 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4557 /*-----------------------------------------------------------*/
4559 TickType_t uxTaskResetEventItemValue( void )
4561 TickType_t uxReturn;
4562 taskENTER_CRITICAL(&xTaskQueueMutex);
4563 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
4565 /* Reset the event list item to its normal value - so it can be used with
4566 queues and semaphores. */
4567 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4568 taskEXIT_CRITICAL(&xTaskQueueMutex);
4572 /*-----------------------------------------------------------*/
4574 #if ( configUSE_MUTEXES == 1 )
4576 void *pvTaskIncrementMutexHeldCount( void )
4580 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4581 then pxCurrentTCB will be NULL. */
4582 taskENTER_CRITICAL(&xTaskQueueMutex);
4583 if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
4585 ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
4587 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
4588 taskEXIT_CRITICAL(&xTaskQueueMutex);
4593 #endif /* configUSE_MUTEXES */
4594 /*-----------------------------------------------------------*/
4596 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4598 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4600 TickType_t xTimeToWake;
4603 taskENTER_CRITICAL(&xTaskQueueMutex);
4605 /* Only block if the notification count is not already non-zero. */
4606 if( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue == 0UL )
4608 /* Mark this task as waiting for a notification. */
4609 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4611 if( xTicksToWait > ( TickType_t ) 0 )
4613 /* The task is going to block. First it must be removed
4614 from the ready list. */
4615 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4617 /* The current task must be in a ready list, so there is
4618 no need to check, and the port reset macro can be called
4620 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4624 mtCOVERAGE_TEST_MARKER();
4627 #if ( INCLUDE_vTaskSuspend == 1 )
4629 if( xTicksToWait == portMAX_DELAY )
4631 /* Add the task to the suspended task list instead
4632 of a delayed task list to ensure the task is not
4633 woken by a timing event. It will block
4635 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4636 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4640 /* Calculate the time at which the task should be
4641 woken if no notification events occur. This may
4642 overflow but this doesn't matter, the scheduler will
4644 xTimeToWake = xTickCount + xTicksToWait;
4645 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4648 #else /* INCLUDE_vTaskSuspend */
4650 /* Calculate the time at which the task should be
4651 woken if the event does not occur. This may
4652 overflow but this doesn't matter, the scheduler will
4654 xTimeToWake = xTickCount + xTicksToWait;
4655 prvAddCurrentTaskToDelayedList( xTimeToWake );
4657 #endif /* INCLUDE_vTaskSuspend */
4659 /* All ports are written to allow a yield in a critical
4660 section (some will yield immediately, others wait until the
4661 critical section exits) - but it is not something that
4662 application code should ever do. */
4663 portYIELD_WITHIN_API();
4667 mtCOVERAGE_TEST_MARKER();
4672 mtCOVERAGE_TEST_MARKER();
4675 taskEXIT_CRITICAL(&xTaskQueueMutex);
4677 taskENTER_CRITICAL(&xTaskQueueMutex);
4679 ulReturn = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4681 if( ulReturn != 0UL )
4683 if( xClearCountOnExit != pdFALSE )
4685 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue = 0UL;
4689 ( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue )--;
4694 mtCOVERAGE_TEST_MARKER();
4697 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4699 taskEXIT_CRITICAL(&xTaskQueueMutex);
4704 #endif /* configUSE_TASK_NOTIFICATIONS */
4705 /*-----------------------------------------------------------*/
4707 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4709 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4711 TickType_t xTimeToWake;
4714 taskENTER_CRITICAL(&xTaskQueueMutex);
4716 /* Only block if a notification is not already pending. */
4717 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState != eNotified )
4719 /* Clear bits in the task's notification value as bits may get
4720 set by the notifying task or interrupt. This can be used to
4721 clear the value to zero. */
4722 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnEntry;
4724 /* Mark this task as waiting for a notification. */
4725 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4727 if( xTicksToWait > ( TickType_t ) 0 )
4729 /* The task is going to block. First it must be removed
4730 from the ready list. */
4731 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4733 /* The current task must be in a ready list, so there is
4734 no need to check, and the port reset macro can be called
4736 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4740 mtCOVERAGE_TEST_MARKER();
4743 #if ( INCLUDE_vTaskSuspend == 1 )
4745 if( xTicksToWait == portMAX_DELAY )
4747 /* Add the task to the suspended task list instead
4748 of a delayed task list to ensure the task is not
4749 woken by a timing event. It will block
4751 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4752 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4756 /* Calculate the time at which the task should be
4757 woken if no notification events occur. This may
4758 overflow but this doesn't matter, the scheduler will
4760 xTimeToWake = xTickCount + xTicksToWait;
4761 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4764 #else /* INCLUDE_vTaskSuspend */
4766 /* Calculate the time at which the task should be
4767 woken if the event does not occur. This may
4768 overflow but this doesn't matter, the scheduler will
4770 xTimeToWake = xTickCount + xTicksToWait;
4771 prvAddCurrentTaskToDelayedList( xTimeToWake );
4773 #endif /* INCLUDE_vTaskSuspend */
4775 /* All ports are written to allow a yield in a critical
4776 section (some will yield immediately, others wait until the
4777 critical section exits) - but it is not something that
4778 application code should ever do. */
4779 portYIELD_WITHIN_API();
4783 mtCOVERAGE_TEST_MARKER();
4788 mtCOVERAGE_TEST_MARKER();
4791 taskEXIT_CRITICAL(&xTaskQueueMutex);
4793 taskENTER_CRITICAL(&xTaskQueueMutex);
4795 if( pulNotificationValue != NULL )
4797 /* Output the current notification value, which may or may not
4799 *pulNotificationValue = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4802 /* If eNotifyValue is set then either the task never entered the
4803 blocked state (because a notification was already pending) or the
4804 task unblocked because of a notification. Otherwise the task
4805 unblocked because of a timeout. */
4806 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState == eWaitingNotification )
4808 /* A notification was not received. */
4813 /* A notification was already pending or a notification was
4814 received while the task was waiting. */
4815 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnExit;
4819 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4821 taskEXIT_CRITICAL(&xTaskQueueMutex);
4826 #endif /* configUSE_TASK_NOTIFICATIONS */
4827 /*-----------------------------------------------------------*/
4829 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4831 BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction )
4834 eNotifyValue eOriginalNotifyState;
4835 BaseType_t xReturn = pdPASS;
4837 configASSERT( xTaskToNotify );
4838 pxTCB = ( TCB_t * ) xTaskToNotify;
4840 taskENTER_CRITICAL(&xTaskQueueMutex);
4842 eOriginalNotifyState = pxTCB->eNotifyState;
4844 pxTCB->eNotifyState = eNotified;
4849 pxTCB->ulNotifiedValue |= ulValue;
4853 ( pxTCB->ulNotifiedValue )++;
4856 case eSetValueWithOverwrite :
4857 pxTCB->ulNotifiedValue = ulValue;
4860 case eSetValueWithoutOverwrite :
4861 if( eOriginalNotifyState != eNotified )
4863 pxTCB->ulNotifiedValue = ulValue;
4867 /* The value could not be written to the task. */
4873 /* The task is being notified without its notify value being
4879 /* If the task is in the blocked state specifically to wait for a
4880 notification then unblock it now. */
4881 if( eOriginalNotifyState == eWaitingNotification )
4883 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
4884 prvAddTaskToReadyList( pxTCB );
4886 /* The task should not have been on an event list. */
4887 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4889 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4891 /* The notified task has a priority above the currently
4892 executing task so a yield is required. */
4893 portYIELD_WITHIN_API();
4895 else if ( pxTCB->xCoreID != xPortGetCoreID() )
4897 taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
4901 mtCOVERAGE_TEST_MARKER();
4906 mtCOVERAGE_TEST_MARKER();
4909 taskEXIT_CRITICAL(&xTaskQueueMutex);
4914 #endif /* configUSE_TASK_NOTIFICATIONS */
4915 /*-----------------------------------------------------------*/
4917 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4919 BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken )
4922 eNotifyValue eOriginalNotifyState;
4923 BaseType_t xReturn = pdPASS;
4925 configASSERT( xTaskToNotify );
4927 pxTCB = ( TCB_t * ) xTaskToNotify;
4929 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
4932 eOriginalNotifyState = pxTCB->eNotifyState;
4934 pxTCB->eNotifyState = eNotified;
4939 pxTCB->ulNotifiedValue |= ulValue;
4943 ( pxTCB->ulNotifiedValue )++;
4946 case eSetValueWithOverwrite :
4947 pxTCB->ulNotifiedValue = ulValue;
4950 case eSetValueWithoutOverwrite :
4951 if( eOriginalNotifyState != eNotified )
4953 pxTCB->ulNotifiedValue = ulValue;
4957 /* The value could not be written to the task. */
4963 /* The task is being notified without its notify value being
4969 /* If the task is in the blocked state specifically to wait for a
4970 notification then unblock it now. */
4971 if( eOriginalNotifyState == eWaitingNotification )
4973 /* The task should not have been on an event list. */
4974 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4976 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
4978 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
4979 prvAddTaskToReadyList( pxTCB );
4983 /* The delayed and ready lists cannot be accessed, so hold
4984 this task pending until the scheduler is resumed. */
4985 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
4988 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4990 /* The notified task has a priority above the currently
4991 executing task so a yield is required. */
4992 if( pxHigherPriorityTaskWoken != NULL )
4994 *pxHigherPriorityTaskWoken = pdTRUE;
4997 else if ( pxTCB->xCoreID != xPortGetCoreID() )
4999 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5003 mtCOVERAGE_TEST_MARKER();
5007 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5012 #endif /* configUSE_TASK_NOTIFICATIONS */
5013 /*-----------------------------------------------------------*/
5015 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5017 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
5020 eNotifyValue eOriginalNotifyState;
5022 configASSERT( xTaskToNotify );
5025 pxTCB = ( TCB_t * ) xTaskToNotify;
5027 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
5029 eOriginalNotifyState = pxTCB->eNotifyState;
5030 pxTCB->eNotifyState = eNotified;
5032 /* 'Giving' is equivalent to incrementing a count in a counting
5034 ( pxTCB->ulNotifiedValue )++;
5036 /* If the task is in the blocked state specifically to wait for a
5037 notification then unblock it now. */
5038 if( eOriginalNotifyState == eWaitingNotification )
5040 /* The task should not have been on an event list. */
5041 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5043 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
5045 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
5046 prvAddTaskToReadyList( pxTCB );
5050 /* The delayed and ready lists cannot be accessed, so hold
5051 this task pending until the scheduler is resumed. */
5052 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
5055 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5057 /* The notified task has a priority above the currently
5058 executing task so a yield is required. */
5059 if( pxHigherPriorityTaskWoken != NULL )
5061 *pxHigherPriorityTaskWoken = pdTRUE;
5064 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5066 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5070 mtCOVERAGE_TEST_MARKER();
5074 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5077 #endif /* configUSE_TASK_NOTIFICATIONS */
5079 #if ( configENABLE_TASK_SNAPSHOT == 1 )
5080 static void prvTaskGetSnapshot( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, TCB_t *pxTCB )
5082 if (pxTCB == NULL) {
5085 pxTaskSnapshotArray[ *uxTask ].pxTCB = pxTCB;
5086 pxTaskSnapshotArray[ *uxTask ].pxTopOfStack = (StackType_t *)pxTCB->pxTopOfStack;
5087 #if( portSTACK_GROWTH < 0 )
5089 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxEndOfStack;
5093 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxStack;
5099 static void prvTaskGetSnapshotsFromList( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, const UBaseType_t uxArraySize, List_t *pxList )
5101 TCB_t *pxNextTCB, *pxFirstTCB;
5103 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
5105 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
5108 if( *uxTask >= uxArraySize )
5111 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
5112 prvTaskGetSnapshot( pxTaskSnapshotArray, uxTask, pxNextTCB );
5113 } while( pxNextTCB != pxFirstTCB );
5117 mtCOVERAGE_TEST_MARKER();
5121 UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray, const UBaseType_t uxArraySize, UBaseType_t * const pxTcbSz )
5123 UBaseType_t uxTask = 0, i = 0;
5126 *pxTcbSz = sizeof(TCB_t);
5127 /* Fill in an TaskStatus_t structure with information on each
5128 task in the Ready state. */
5129 i = configMAX_PRIORITIES;
5133 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( pxReadyTasksLists[ i ] ) );
5134 } while( i > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
5136 /* Fill in an TaskStatus_t structure with information on each
5137 task in the Blocked state. */
5138 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxDelayedTaskList );
5139 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxOverflowDelayedTaskList );
5140 for (i = 0; i < portNUM_PROCESSORS; i++) {
5141 if( uxTask >= uxArraySize )
5143 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( xPendingReadyList[ i ] ) );
5146 #if( INCLUDE_vTaskDelete == 1 )
5148 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xTasksWaitingTermination );
5152 #if ( INCLUDE_vTaskSuspend == 1 )
5154 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xSuspendedTaskList );
5162 #ifdef FREERTOS_MODULE_TEST
5163 #include "tasks_test_access_functions.h"