2 FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
7 This file is part of the FreeRTOS distribution.
9 FreeRTOS is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License (version 2) as published by the
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
13 ***************************************************************************
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
15 >>! distribute a combined work that includes FreeRTOS without being !<<
16 >>! obliged to provide the source code for proprietary components !<<
17 >>! outside of the FreeRTOS kernel. !<<
18 ***************************************************************************
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
23 link: http://www.freertos.org/a00114.html
25 ***************************************************************************
27 * FreeRTOS provides completely free yet professionally developed, *
28 * robust, strictly quality controlled, supported, and cross *
29 * platform software that is more than just the market leader, it *
30 * is the industry's de facto standard. *
32 * Help yourself get started quickly while simultaneously helping *
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
34 * tutorial book, reference manual, or both: *
35 * http://www.FreeRTOS.org/Documentation *
37 ***************************************************************************
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
40 the FAQ page "My application does not run, what could be wrong?". Have you
41 defined configASSERT()?
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
44 embedded software for free we request you assist our global community by
45 participating in the support forum.
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
48 be as productive as possible as early as possible. Now you can receive
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
50 Ltd, and the world's leading authority on the world's leading RTOS.
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
61 licenses offer ticketed support, indemnification and commercial middleware.
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
64 engineered and independently SIL3 certified version for use in safety and
65 mission critical applications that require provable dependability.
70 /* Standard includes. */
74 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
75 all the API functions to use the MPU wrappers. That should only be done when
76 task.h is included from an application file. */
77 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
79 #include "rom/ets_sys.h"
80 #include "esp_newlib.h"
81 #include "esp_panic.h"
83 /* FreeRTOS includes. */
87 #include "StackMacros.h"
88 #include "portmacro.h"
91 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
92 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
93 header files above, but not in this file, in order to generate the correct
94 privileged Vs unprivileged linkage and placement. */
95 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
97 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
98 functions but without including stdio.h here. */
99 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
100 /* At the bottom of this file are two optional functions that can be used
101 to generate human readable text from the raw data generated by the
102 uxTaskGetSystemState() function. Note the formatting functions are provided
103 for convenience only, and are NOT considered part of the kernel. */
105 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
107 /* Sanity check the configuration. */
108 #if configUSE_TICKLESS_IDLE != 0
109 #if INCLUDE_vTaskSuspend != 1
110 #error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0
111 #endif /* INCLUDE_vTaskSuspend */
112 #endif /* configUSE_TICKLESS_IDLE */
115 * Defines the size, in bytes, of the stack allocated to the idle task.
117 #define tskIDLE_STACK_SIZE configIDLE_TASK_STACK_SIZE
119 #if( configUSE_PREEMPTION == 0 )
120 /* If the cooperative scheduler is being used then a yield should not be
121 performed just because a higher priority task has been woken. */
122 #define taskYIELD_IF_USING_PREEMPTION()
124 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
130 /* Value that can be assigned to the eNotifyState member of the TCB. */
133 eNotWaitingNotification = 0,
134 eWaitingNotification,
138 /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using
139 dynamically allocated RAM, in which case when any task is deleted it is known
140 that both the task's stack and TCB need to be freed. Sometimes the
141 FreeRTOSConfig.h settings only allow a task to be created using statically
142 allocated RAM, in which case when any task is deleted it is known that neither
143 the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h
144 settings allow a task to be created using either statically or dynamically
145 allocated RAM, in which case a member of the TCB is used to record whether the
146 stack and/or TCB were allocated statically or dynamically, so when a task is
147 deleted the RAM that was allocated dynamically is freed again and no attempt is
148 made to free the RAM that was allocated statically.
149 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a
150 task to be created using either statically or dynamically allocated RAM. Note
151 that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with
152 a statically allocated stack and a dynamically allocated TCB. */
153 #define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) || ( portUSING_MPU_WRAPPERS == 1 ) )
154 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
155 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
156 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
159 * Task control block. A task control block (TCB) is allocated for each task,
160 * and stores task state information, including a pointer to the task's context
161 * (the task's run time environment, including register values)
163 typedef struct tskTaskControlBlock
165 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
167 #if ( portUSING_MPU_WRAPPERS == 1 )
168 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
171 ListItem_t xGenericListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
172 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
173 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
174 StackType_t *pxStack; /*< Points to the start of the stack. */
175 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
176 BaseType_t xCoreID; /*< Core this task is pinned to */
177 /* If this moves around (other than pcTaskName size changes), please change the define in xtensa_vectors.S as well. */
178 #if ( portSTACK_GROWTH > 0 || configENABLE_TASK_SNAPSHOT == 1 )
179 StackType_t *pxEndOfStack; /*< Points to the end of the stack on architectures where the stack grows up from low memory. */
182 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
183 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
184 uint32_t uxOldInterruptState; /*< Interrupt state before the outer taskEnterCritical was called */
187 #if ( configUSE_TRACE_FACILITY == 1 )
188 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
189 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
192 #if ( configUSE_MUTEXES == 1 )
193 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
194 UBaseType_t uxMutexesHeld;
197 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
198 TaskHookFunction_t pxTaskTag;
201 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
202 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
203 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
204 TlsDeleteCallbackFunction_t pvThreadLocalStoragePointersDelCallback[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
208 #if ( configGENERATE_RUN_TIME_STATS == 1 )
209 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
212 #if ( configUSE_NEWLIB_REENTRANT == 1 )
213 /* Allocate a Newlib reent structure that is specific to this task.
214 Note Newlib support has been included by popular demand, but is not
215 used by the FreeRTOS maintainers themselves. FreeRTOS is not
216 responsible for resulting newlib operation. User must be familiar with
217 newlib and must provide system-wide implementations of the necessary
218 stubs. Be warned that (at the time of writing) the current newlib design
219 implements a system-wide malloc() that must be provided with locks. */
220 struct _reent xNewLib_reent;
223 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
224 volatile uint32_t ulNotifiedValue;
225 volatile eNotifyValue eNotifyState;
228 /* See the comments above the definition of
229 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
230 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
231 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
236 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
237 below to enable the use of older kernel aware debuggers. */
238 typedef tskTCB TCB_t;
240 #if __GNUC_PREREQ(4, 6)
241 _Static_assert(sizeof(StaticTask_t) == sizeof(TCB_t), "StaticTask_t != TCB_t");
245 * Some kernel aware debuggers require the data the debugger needs access to to
246 * be global, rather than file scope.
248 #ifdef portREMOVE_STATIC_QUALIFIER
252 /*lint -e956 A manual analysis and inspection has been used to determine which
253 static variables must be declared volatile. */
255 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB[ portNUM_PROCESSORS ] = { NULL };
257 /* Lists for ready and blocked tasks. --------------------*/
258 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
259 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
260 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
261 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
262 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
263 PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
265 #if ( INCLUDE_vTaskDelete == 1 )
267 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. Protected by xTaskQueueMutex.*/
268 PRIVILEGED_DATA static volatile UBaseType_t uxTasksDeleted = ( UBaseType_t ) 0U;
272 #if ( INCLUDE_vTaskSuspend == 1 )
274 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
278 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
280 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[portNUM_PROCESSORS] = {NULL}; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
284 /* Other file private variables. --------------------------------*/
285 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
286 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) 0U;
287 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
288 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
289 PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
290 PRIVILEGED_DATA static volatile BaseType_t xYieldPending[portNUM_PROCESSORS] = {pdFALSE};
291 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
292 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
293 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = portMAX_DELAY;
295 /* Context switches are held pending while the scheduler is suspended. Also,
296 interrupts must not manipulate the xGenericListItem of a TCB, or any of the
297 lists the xGenericListItem can be referenced from, if the scheduler is suspended.
298 If an interrupt needs to unblock a task while the scheduler is suspended then it
299 moves the task's event list item into the xPendingReadyList, ready for the
300 kernel to move the task from the pending ready list into the real ready list
301 when the scheduler is unsuspended. The pending ready list itself can only be
302 accessed from a critical section. */
303 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
305 /* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
306 that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
307 PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
308 PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
310 #if ( configGENERATE_RUN_TIME_STATS == 1 )
312 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime[portNUM_PROCESSORS] = {0U}; /*< Holds the value of a timer/counter the last time a task was switched in on a particular core. */
313 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
318 // per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules
319 // in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting
320 // for locks to be free or for host to read full trace buffer
321 PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ portNUM_PROCESSORS ] = { pdFALSE };
325 /* Debugging and trace facilities private variables and macros. ------------*/
328 * The value used to fill the stack of a task when the task is created. This
329 * is used purely for checking the high water mark for tasks.
331 #define tskSTACK_FILL_BYTE ( 0xa5U )
334 * Macros used by vListTask to indicate which state a task is in.
336 #define tskBLOCKED_CHAR ( 'B' )
337 #define tskREADY_CHAR ( 'R' )
338 #define tskDELETED_CHAR ( 'D' )
339 #define tskSUSPENDED_CHAR ( 'S' )
341 /*-----------------------------------------------------------*/
344 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
346 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
347 performed in a generic way that is not optimised to any particular
348 microcontroller architecture. */
350 /* uxTopReadyPriority holds the priority of the highest priority ready
352 #define taskRECORD_READY_PRIORITY( uxPriority ) \
354 if( ( uxPriority ) > uxTopReadyPriority ) \
356 uxTopReadyPriority = ( uxPriority ); \
358 } /* taskRECORD_READY_PRIORITY */
360 /*-----------------------------------------------------------*/
362 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
364 /* Find the highest priority queue that contains ready tasks. */ \
365 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopReadyPriority ] ) ) ) \
367 configASSERT( uxTopReadyPriority ); \
368 --uxTopReadyPriority; \
371 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
372 the same priority get an equal share of the processor time. */ \
373 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopReadyPriority ] ) ); \
374 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
376 /*-----------------------------------------------------------*/
378 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
379 they are only required when a port optimised method of task selection is
381 #define taskRESET_READY_PRIORITY( uxPriority )
382 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
384 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
386 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
387 performed in a way that is tailored to the particular microcontroller
388 architecture being used. */
390 /* A port optimised version is provided. Call the port defined macros. */
391 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
393 /*-----------------------------------------------------------*/
395 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
397 UBaseType_t uxTopPriority; \
399 /* Find the highest priority queue that contains ready tasks. */ \
400 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
401 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
402 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopPriority ] ) ); \
403 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
405 /*-----------------------------------------------------------*/
407 /* A port optimised version is provided, call it only if the TCB being reset
408 is being referenced from a ready list. If it is referenced from a delayed
409 or suspended list then it won't be in a ready list. */
410 #define taskRESET_READY_PRIORITY( uxPriority ) \
412 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
414 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
418 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
420 /*-----------------------------------------------------------*/
422 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
424 #define taskSWITCH_DELAYED_LISTS() \
428 /* The delayed tasks list should be empty when the lists are switched. */ \
429 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
431 pxTemp = pxDelayedTaskList; \
432 pxDelayedTaskList = pxOverflowDelayedTaskList; \
433 pxOverflowDelayedTaskList = pxTemp; \
435 prvResetNextTaskUnblockTime(); \
438 /*-----------------------------------------------------------*/
441 * Place the task represented by pxTCB into the appropriate ready list for
442 * the task. It is inserted at the end of the list.
444 #define prvAddTaskToReadyList( pxTCB ) \
445 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
446 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
447 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
449 * Place the task represented by pxTCB which has been in a ready list before
450 * into the appropriate ready list for the task.
451 * It is inserted at the end of the list.
453 #define prvReaddTaskToReadyList( pxTCB ) \
454 traceREADDED_TASK_TO_READY_STATE( pxTCB ); \
455 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
456 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
457 /*-----------------------------------------------------------*/
459 #define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
462 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
463 * where NULL is used to indicate that the handle of the currently executing
464 * task should be used in place of the parameter. This macro simply checks to
465 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
467 /* ToDo: See if this still works for multicore. */
468 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? ( TCB_t * ) xTaskGetCurrentTaskHandle() : ( TCB_t * ) ( pxHandle ) )
470 /* The item value of the event list item is normally used to hold the priority
471 of the task to which it belongs (coded to allow it to be held in reverse
472 priority order). However, it is occasionally borrowed for other purposes. It
473 is important its value is not updated due to a task priority change while it is
474 being used for another purpose. The following bit definition is used to inform
475 the scheduler that the value should not be changed - in which case it is the
476 responsibility of whichever module is using the value to ensure it gets set back
477 to its original value when it is released. */
478 #if configUSE_16_BIT_TICKS == 1
479 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
481 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
484 /* Callback function prototypes. --------------------------*/
485 #if configCHECK_FOR_STACK_OVERFLOW > 0
486 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
489 #if configUSE_TICK_HOOK > 0
490 extern void vApplicationTickHook( void );
493 #if portFIRST_TASK_HOOK
494 extern void vPortFirstTaskHook(TaskFunction_t taskfn);
498 /* File private functions. --------------------------------*/
501 * Utility task that simply returns pdTRUE if the task referenced by xTask is
502 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
503 * is in any other state.
505 * Caller must hold xTaskQueueMutex before calling this function.
507 #if ( INCLUDE_vTaskSuspend == 1 )
508 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
509 #endif /* INCLUDE_vTaskSuspend */
512 * Utility to ready all the lists used by the scheduler. This is called
513 * automatically upon the creation of the first task.
515 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
518 * The idle task, which as all tasks is implemented as a never ending loop.
519 * The idle task is automatically created and added to the ready lists upon
520 * creation of the first user task.
522 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
523 * language extensions. The equivalent prototype for this function is:
525 * void prvIdleTask( void *pvParameters );
528 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
531 * Utility to free all memory allocated by the scheduler to hold a TCB,
532 * including the stack pointed to by the TCB.
534 * This does not free memory allocated by the task itself (i.e. memory
535 * allocated by calls to pvPortMalloc from within the tasks application code).
537 #if ( INCLUDE_vTaskDelete == 1 )
539 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
543 //Function to call the Thread Local Storage Pointer Deletion Callbacks. Will be
544 //called during task deletion before prvDeleteTCB is called.
545 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
546 static void prvDeleteTLS( TCB_t *pxTCB );
550 * Used only by the idle task. This checks to see if anything has been placed
551 * in the list of tasks waiting to be deleted. If so the task is cleaned up
552 * and its TCB deleted.
554 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
557 * The currently executing task is entering the Blocked state. Add the task to
558 * either the current or the overflow delayed task list.
560 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTimeToWake ) PRIVILEGED_FUNCTION;
563 * Fills an TaskStatus_t structure with information on each task that is
564 * referenced from the pxList list (which may be a ready list, a delayed list,
565 * a suspended list, etc.).
567 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
568 * NORMAL APPLICATION CODE.
570 #if ( configUSE_TRACE_FACILITY == 1 )
572 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
577 * When a task is created, the stack of the task is filled with a known value.
578 * This function determines the 'high water mark' of the task stack by
579 * determining how much of the stack remains at the original preset value.
581 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
583 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
588 * Return the amount of time, in ticks, that will pass before the kernel will
589 * next move a task from the Blocked state to the Running state.
591 * This conditional compilation should use inequality to 0, not equality to 1.
592 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
593 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
594 * set to a value other than 1.
596 #if ( configUSE_TICKLESS_IDLE != 0 )
598 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
603 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
604 * will exit the Blocked state.
606 static void prvResetNextTaskUnblockTime( void );
608 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
611 * Helper function used to pad task names with spaces when printing out
612 * human readable tables of task information.
614 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName );
619 * Called after a Task_t structure has been allocated either statically or
620 * dynamically to fill in the structure's members.
622 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
623 const char * const pcName,
624 const uint32_t ulStackDepth,
625 void * const pvParameters,
626 UBaseType_t uxPriority,
627 TaskHandle_t * const pxCreatedTask,
629 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
632 * Called after a new task has been created and initialised to place the task
633 * under the control of the scheduler.
635 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
639 /*-----------------------------------------------------------*/
642 * This routine tries to send an interrupt to another core if needed to make it execute a task
643 * of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
644 * other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
645 * is possible that it is inaccurate because the other CPU just did a task switch, but in that case
646 * at most a superfluous interrupt is generated.
648 void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
650 TCB_t *curTCB = pxCurrentTCB[xCoreID];
653 if (xCoreID != tskNO_AFFINITY) {
654 if ( curTCB->uxPriority < uxPriority ) {
655 vPortYieldOtherCore( xCoreID );
660 /* The task has no affinity. See if we can find a CPU to put it on.*/
661 for (i=0; i<portNUM_PROCESSORS; i++) {
662 if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
664 vPortYieldOtherCore( i );
671 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
673 TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode,
674 const char * const pcName,
675 const uint32_t ulStackDepth,
676 void * const pvParameters,
677 UBaseType_t uxPriority,
678 StackType_t * const puxStackBuffer,
679 StaticTask_t * const pxTaskBuffer,
680 const BaseType_t xCoreID )
683 TaskHandle_t xReturn;
685 configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
686 configASSERT( portVALID_STACK_MEM(puxStackBuffer) );
687 configASSERT( (xCoreID>=0 && xCoreID<portNUM_PROCESSORS) || (xCoreID==tskNO_AFFINITY) );
689 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
691 /* The memory used for the task's TCB and stack are passed into this
692 function - use them. */
693 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
694 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
696 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
698 /* Tasks can be created statically or dynamically, so note this
699 task was created statically in case the task is later deleted. */
700 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
702 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
704 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
705 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
715 #endif /* SUPPORT_STATIC_ALLOCATION */
716 /*-----------------------------------------------------------*/
718 #if( portUSING_MPU_WRAPPERS == 1 )
720 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
723 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
725 configASSERT( pxTaskDefinition->puxStackBuffer );
727 if( pxTaskDefinition->puxStackBuffer != NULL )
729 /* Allocate space for the TCB. Where the memory comes from depends
730 on the implementation of the port malloc function and whether or
731 not static allocation is being used. */
732 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
734 if( pxNewTCB != NULL )
736 /* Store the stack location in the TCB. */
737 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
739 /* Tasks can be created statically or dynamically, so note
740 this task had a statically allocated stack in case it is
741 later deleted. The TCB was allocated dynamically. */
742 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
744 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
745 pxTaskDefinition->pcName,
746 pxTaskDefinition->usStackDepth,
747 pxTaskDefinition->pvParameters,
748 pxTaskDefinition->uxPriority,
749 pxCreatedTask, pxNewTCB,
750 pxTaskDefinition->xRegions,
753 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY );
761 #endif /* portUSING_MPU_WRAPPERS */
762 /*-----------------------------------------------------------*/
764 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
766 BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode,
767 const char * const pcName,
768 const uint32_t usStackDepth,
769 void * const pvParameters,
770 UBaseType_t uxPriority,
771 TaskHandle_t * const pxCreatedTask,
772 const BaseType_t xCoreID )
777 /* If the stack grows down then allocate the stack then the TCB so the stack
778 does not grow into the TCB. Likewise if the stack grows up then allocate
779 the TCB then the stack. */
780 #if( portSTACK_GROWTH > 0 )
782 /* Allocate space for the TCB. Where the memory comes from depends on
783 the implementation of the port malloc function and whether or not static
784 allocation is being used. */
785 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
787 if( pxNewTCB != NULL )
789 /* Allocate space for the stack used by the task being created.
790 The base of the stack memory stored in the TCB so the task can
791 be deleted later if required. */
792 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
794 if( pxNewTCB->pxStack == NULL )
796 /* Could not allocate the stack. Delete the allocated TCB. */
797 vPortFree( pxNewTCB );
802 #else /* portSTACK_GROWTH */
804 StackType_t *pxStack;
806 /* Allocate space for the stack used by the task being created. */
807 pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
809 if( pxStack != NULL )
811 /* Allocate space for the TCB. */
812 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) ); /*lint !e961 MISRA exception as the casts are only redundant for some paths. */
814 if( pxNewTCB != NULL )
816 /* Store the stack location in the TCB. */
817 pxNewTCB->pxStack = pxStack;
821 /* The stack cannot be used as the TCB was not created. Free
823 vPortFree( pxStack );
831 #endif /* portSTACK_GROWTH */
833 if( pxNewTCB != NULL )
835 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
837 /* Tasks can be created statically or dynamically, so note this
838 task was created dynamically in case it is later deleted. */
839 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
841 #endif /* configSUPPORT_STATIC_ALLOCATION */
843 prvInitialiseNewTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL, xCoreID );
844 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
849 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
855 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
856 /*-----------------------------------------------------------*/
858 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
859 const char * const pcName,
860 const uint32_t ulStackDepth,
861 void * const pvParameters,
862 UBaseType_t uxPriority,
863 TaskHandle_t * const pxCreatedTask,
865 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
867 StackType_t *pxTopOfStack;
870 #if( portUSING_MPU_WRAPPERS == 1 )
871 /* Should the task be created in privileged mode? */
872 BaseType_t xRunPrivileged;
873 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
875 xRunPrivileged = pdTRUE;
879 xRunPrivileged = pdFALSE;
881 uxPriority &= ~portPRIVILEGE_BIT;
882 #endif /* portUSING_MPU_WRAPPERS == 1 */
884 /* Avoid dependency on memset() if it is not required. */
885 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
887 /* Fill the stack with a known value to assist debugging. */
888 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
890 #endif /* ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) ) */
892 /* Calculate the top of stack address. This depends on whether the stack
893 grows from high memory to low (as per the 80x86) or vice versa.
894 portSTACK_GROWTH is used to make the result positive or negative as required
896 #if( portSTACK_GROWTH < 0 )
898 pxTopOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
899 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */
901 /* Check the alignment of the calculated top of stack is correct. */
902 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
903 #if ( configENABLE_TASK_SNAPSHOT == 1 )
905 /* need stack end for core dumps */
906 pxNewTCB->pxEndOfStack = pxTopOfStack;
910 #else /* portSTACK_GROWTH */
912 pxTopOfStack = pxNewTCB->pxStack;
914 /* Check the alignment of the stack buffer is correct. */
915 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
917 /* The other extreme of the stack space is required if stack checking is
919 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
921 #endif /* portSTACK_GROWTH */
923 /* Store the task name in the TCB. */
924 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
926 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
928 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
929 configMAX_TASK_NAME_LEN characters just in case the memory after the
930 string is not accessible (extremely unlikely). */
931 if( pcName[ x ] == 0x00 )
937 mtCOVERAGE_TEST_MARKER();
941 /* Ensure the name string is terminated in the case that the string length
942 was greater or equal to configMAX_TASK_NAME_LEN. */
943 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
945 /* This is used as an array index so must ensure it's not too large. First
946 remove the privilege bit if one is present. */
947 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
949 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
953 mtCOVERAGE_TEST_MARKER();
956 pxNewTCB->uxPriority = uxPriority;
957 pxNewTCB->xCoreID = xCoreID;
958 #if ( configUSE_MUTEXES == 1 )
960 pxNewTCB->uxBasePriority = uxPriority;
961 pxNewTCB->uxMutexesHeld = 0;
963 #endif /* configUSE_MUTEXES */
965 vListInitialiseItem( &( pxNewTCB->xGenericListItem ) );
966 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
968 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
969 back to the containing TCB from a generic item in a list. */
970 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xGenericListItem ), pxNewTCB );
972 /* Event lists are always in priority order. */
973 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
974 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
976 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
978 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
980 #endif /* portCRITICAL_NESTING_IN_TCB */
982 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
984 pxNewTCB->pxTaskTag = NULL;
986 #endif /* configUSE_APPLICATION_TASK_TAG */
988 #if ( configGENERATE_RUN_TIME_STATS == 1 )
990 pxNewTCB->ulRunTimeCounter = 0UL;
992 #endif /* configGENERATE_RUN_TIME_STATS */
994 #if ( portUSING_MPU_WRAPPERS == 1 )
996 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1000 /* Avoid compiler warning about unreferenced parameter. */
1005 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1007 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1009 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1010 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1)
1011 pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
1017 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1019 pxNewTCB->ulNotifiedValue = 0;
1020 pxNewTCB->eNotifyState = eNotWaitingNotification;
1024 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1026 /* Initialise this task's Newlib reent structure. */
1027 esp_reent_init(&pxNewTCB->xNewLib_reent);
1031 #if( INCLUDE_xTaskAbortDelay == 1 )
1033 pxNewTCB->ucDelayAborted = pdFALSE;
1037 /* Initialize the TCB stack to look as if the task was already running,
1038 but had been interrupted by the scheduler. The return address is set
1039 to the start of the task function. Once the stack has been initialised
1040 the top of stack variable is updated. */
1041 #if( portUSING_MPU_WRAPPERS == 1 )
1043 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1045 #else /* portUSING_MPU_WRAPPERS */
1047 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1049 #endif /* portUSING_MPU_WRAPPERS */
1051 if( ( void * ) pxCreatedTask != NULL )
1053 /* Pass the handle out in an anonymous way. The handle can be used to
1054 change the created task's priority, delete the created task, etc.*/
1055 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1059 mtCOVERAGE_TEST_MARKER();
1062 /*-----------------------------------------------------------*/
1064 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID )
1066 TCB_t *curTCB, *tcb0, *tcb1;
1068 /* Assure that xCoreID is valid or we'll have an out-of-bounds on pxCurrentTCB
1069 You will assert here if e.g. you only have one CPU enabled in menuconfig and
1070 are trying to start a task on core 1. */
1071 configASSERT( xCoreID == tskNO_AFFINITY || xCoreID < portNUM_PROCESSORS);
1073 /* Ensure interrupts don't access the task lists while the lists are being
1075 taskENTER_CRITICAL(&xTaskQueueMutex);
1077 uxCurrentNumberOfTasks++;
1079 // Determine which core this task starts on
1080 if ( xCoreID == tskNO_AFFINITY )
1082 if ( portNUM_PROCESSORS == 1 )
1088 // if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
1089 // put it on the core where it will preempt the lowest priority running task. If neither of these are true,
1090 // queue it on the currently running core.
1091 tcb0 = pxCurrentTCB[0];
1092 tcb1 = pxCurrentTCB[1];
1097 else if ( tcb1 == NULL )
1101 else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
1105 else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
1111 xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
1116 // If nothing is running on this core, put the new task there now
1117 if( pxCurrentTCB[ xCoreID ] == NULL )
1119 /* There are no other tasks, or all the other tasks are in
1120 the suspended state - make this the current task. */
1121 pxCurrentTCB[ xCoreID ] = pxNewTCB;
1123 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1125 #if portFIRST_TASK_HOOK
1126 if ( xPortGetCoreID() == 0 ) {
1127 vPortFirstTaskHook(pxTaskCode);
1129 #endif /* configFIRST_TASK_HOOK */
1130 /* This is the first task to be created so do the preliminary
1131 initialisation required. We will not recover if this call
1132 fails, but we will report the failure. */
1133 prvInitialiseTaskLists();
1137 mtCOVERAGE_TEST_MARKER();
1142 /* If the scheduler is not already running, make this task the
1143 current task if it is the highest priority task to be created
1145 if( xSchedulerRunning == pdFALSE )
1147 /* Scheduler isn't running yet. We need to determine on which CPU to run this task.
1148 Schedule now if either nothing is scheduled yet or we can replace a task of lower prio. */
1149 if ( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
1151 pxCurrentTCB[xCoreID] = pxNewTCB;
1156 mtCOVERAGE_TEST_MARKER();
1162 #if ( configUSE_TRACE_FACILITY == 1 )
1164 /* Add a counter into the TCB for tracing only. */
1165 pxNewTCB->uxTCBNumber = uxTaskNumber;
1167 #endif /* configUSE_TRACE_FACILITY */
1168 traceTASK_CREATE( pxNewTCB );
1170 prvAddTaskToReadyList( pxNewTCB );
1172 portSETUP_TCB( pxNewTCB );
1175 taskEXIT_CRITICAL(&xTaskQueueMutex);
1177 if( xSchedulerRunning != pdFALSE )
1179 taskENTER_CRITICAL(&xTaskQueueMutex);
1181 curTCB = pxCurrentTCB[ xCoreID ];
1182 /* Scheduler is running. If the created task is of a higher priority than an executing task
1183 then it should run now.
1185 if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
1187 if( xCoreID == xPortGetCoreID() )
1189 taskYIELD_IF_USING_PREEMPTION();
1192 taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
1197 mtCOVERAGE_TEST_MARKER();
1199 taskEXIT_CRITICAL(&xTaskQueueMutex);
1203 mtCOVERAGE_TEST_MARKER();
1206 /*-----------------------------------------------------------*/
1208 #if ( INCLUDE_vTaskDelete == 1 )
1210 void vTaskDelete( TaskHandle_t xTaskToDelete )
1212 //The following vTaskDelete() is backported from FreeRTOS v9.0.0 and modified for SMP.
1213 //v9.0.0 vTaskDelete() will immediately free task memory if the task being deleted is
1214 //NOT currently running and not pinned to the other core. Otherwise, freeing of task memory
1215 //will still be delegated to the Idle Task.
1218 int core = xPortGetCoreID(); //Current core
1219 UBaseType_t free_now; //Flag to indicate if task memory can be freed immediately
1221 taskENTER_CRITICAL(&xTaskQueueMutex);
1223 /* If null is passed in here then it is the calling task that is
1225 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1227 /* Remove task from the ready list. */
1228 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1230 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1234 mtCOVERAGE_TEST_MARKER();
1237 /* Is the task waiting on an event also? */
1238 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1240 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1244 mtCOVERAGE_TEST_MARKER();
1247 /* Increment the uxTaskNumber also so kernel aware debuggers can
1248 detect that the task lists need re-generating. This is done before
1249 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1253 //If task to be deleted is currently running on either core or is pinned to the other core. Let Idle free memory
1254 if( pxTCB == pxCurrentTCB[ core ] ||
1255 (portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
1256 (portNUM_PROCESSORS > 1 && pxTCB->xCoreID == (!core)) )
1258 /* Deleting a currently running task. This cannot complete
1259 within the task itself, as a context switch to another task is
1260 required. Place the task in the termination list. The idle task
1261 will check the termination list and free up any memory allocated
1262 by the scheduler for the TCB and stack of the deleted task. */
1263 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xGenericListItem ) );
1265 /* Increment the ucTasksDeleted variable so the idle task knows
1266 there is a task that has been deleted and that it should therefore
1267 check the xTasksWaitingTermination list. */
1270 /* The pre-delete hook is primarily for the Windows simulator,
1271 in which Windows specific clean up operations are performed,
1272 after which it is not possible to yield away from this task -
1273 hence xYieldPending is used to latch that a context switch is
1275 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1277 free_now = pdFALSE; //Let Idle Task free task memory
1279 else //Task is not currently running and not pinned to the other core
1281 --uxCurrentNumberOfTasks;
1283 /* Reset the next expected unblock time in case it referred to
1284 the task that has just been deleted. */
1285 prvResetNextTaskUnblockTime();
1286 free_now = pdTRUE; //Set flag to free task memory immediately
1289 traceTASK_DELETE( pxTCB );
1291 taskEXIT_CRITICAL(&xTaskQueueMutex);
1293 if(free_now == pdTRUE){ //Free task memory. Outside critical section due to deletion callbacks
1294 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
1295 prvDeleteTLS( pxTCB ); //Run deletion callbacks before deleting TCB
1297 prvDeleteTCB( pxTCB ); //Must only be called after del cb
1300 /* Force a reschedule if it is the currently running task that has just
1302 if( xSchedulerRunning != pdFALSE )
1304 //No mux; no harm done if this misfires. The deleted task won't get scheduled anyway.
1305 if( pxTCB == pxCurrentTCB[ core ] ) //If task was currently running on this core
1307 configASSERT( uxSchedulerSuspended[ core ] == 0 );
1309 /* The pre-delete hook is primarily for the Windows simulator,
1310 in which Windows specific clean up operations are performed,
1311 after which it is not possible to yield away from this task -
1312 hence xYieldPending is used to latch that a context switch is
1314 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[xPortGetCoreID()] );
1315 portYIELD_WITHIN_API();
1317 else if ( portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core] ) //If task was currently running on the other core
1319 /* if task is running on the other CPU, force a yield on that CPU to take it off */
1320 vPortYieldOtherCore( !core );
1324 mtCOVERAGE_TEST_MARKER();
1329 #endif /* INCLUDE_vTaskDelete */
1330 /*-----------------------------------------------------------*/
1332 #if ( INCLUDE_vTaskDelayUntil == 1 )
1334 /* ToDo: Make this multicore-compatible. */
1335 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1337 TickType_t xTimeToWake;
1338 BaseType_t xAlreadyYielded=pdFALSE, xShouldDelay = pdFALSE;
1340 configASSERT( pxPreviousWakeTime );
1341 configASSERT( ( xTimeIncrement > 0U ) );
1342 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1344 taskENTER_CRITICAL(&xTaskQueueMutex);
1345 // vTaskSuspendAll();
1347 /* Minor optimisation. The tick count cannot change in this
1349 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1350 const TickType_t xConstTickCount = xTickCount;
1351 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1353 /* Generate the tick time at which the task wants to wake. */
1354 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1356 if( xConstTickCount < *pxPreviousWakeTime )
1358 /* The tick count has overflowed since this function was
1359 lasted called. In this case the only time we should ever
1360 actually delay is if the wake time has also overflowed,
1361 and the wake time is greater than the tick time. When this
1362 is the case it is as if neither time had overflowed. */
1363 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1365 xShouldDelay = pdTRUE;
1369 mtCOVERAGE_TEST_MARKER();
1374 /* The tick time has not overflowed. In this case we will
1375 delay if either the wake time has overflowed, and/or the
1376 tick time is less than the wake time. */
1377 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1379 xShouldDelay = pdTRUE;
1383 mtCOVERAGE_TEST_MARKER();
1387 /* Update the wake time ready for the next call. */
1388 *pxPreviousWakeTime = xTimeToWake;
1390 if( xShouldDelay != pdFALSE )
1392 traceTASK_DELAY_UNTIL();
1394 /* Remove the task from the ready list before adding it to the
1395 blocked list as the same list item is used for both lists. */
1396 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1398 /* The current task must be in a ready list, so there is
1399 no need to check, and the port reset macro can be called
1401 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1405 mtCOVERAGE_TEST_MARKER();
1408 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1412 mtCOVERAGE_TEST_MARKER();
1415 // xAlreadyYielded = xTaskResumeAll();
1416 taskEXIT_CRITICAL(&xTaskQueueMutex);
1418 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1419 have put ourselves to sleep. */
1420 if( xAlreadyYielded == pdFALSE )
1422 portYIELD_WITHIN_API();
1426 mtCOVERAGE_TEST_MARKER();
1430 #endif /* INCLUDE_vTaskDelayUntil */
1431 /*-----------------------------------------------------------*/
1433 #if ( INCLUDE_vTaskDelay == 1 )
1434 void vTaskDelay( const TickType_t xTicksToDelay )
1436 TickType_t xTimeToWake;
1437 BaseType_t xAlreadyYielded = pdFALSE;
1439 /* A delay time of zero just forces a reschedule. */
1440 if( xTicksToDelay > ( TickType_t ) 0U )
1442 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1443 taskENTER_CRITICAL(&xTaskQueueMutex);
1444 // vTaskSuspendAll();
1448 /* A task that is removed from the event list while the
1449 scheduler is suspended will not get placed in the ready
1450 list or removed from the blocked list until the scheduler
1453 This task cannot be in an event list as it is the currently
1456 /* Calculate the time to wake - this may overflow but this is
1458 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1459 xTimeToWake = xTickCount + xTicksToDelay;
1460 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1462 /* We must remove ourselves from the ready list before adding
1463 ourselves to the blocked list as the same list item is used for
1465 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1467 /* The current task must be in a ready list, so there is
1468 no need to check, and the port reset macro can be called
1470 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1474 mtCOVERAGE_TEST_MARKER();
1476 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1478 // xAlreadyYielded = xTaskResumeAll();
1479 taskEXIT_CRITICAL(&xTaskQueueMutex);
1483 mtCOVERAGE_TEST_MARKER();
1486 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1487 have put ourselves to sleep. */
1488 if( xAlreadyYielded == pdFALSE )
1490 portYIELD_WITHIN_API();
1494 mtCOVERAGE_TEST_MARKER();
1498 #endif /* INCLUDE_vTaskDelay */
1499 /*-----------------------------------------------------------*/
1501 #if ( INCLUDE_eTaskGetState == 1 )
1502 eTaskState eTaskGetState( TaskHandle_t xTask )
1505 List_t *pxStateList;
1506 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1507 TCB_t * curTCBcurCore = xTaskGetCurrentTaskHandle();
1508 TCB_t * curTCBothrCore = xTaskGetCurrentTaskHandleForCPU(!xPortGetCoreID()); //Returns NULL if Unicore
1510 configASSERT( pxTCB );
1512 if( pxTCB == curTCBcurCore || pxTCB == curTCBothrCore )
1514 /* The task calling this function is querying its own state. */
1519 taskENTER_CRITICAL(&xTaskQueueMutex);
1521 pxStateList = ( List_t * ) listLIST_ITEM_CONTAINER( &( pxTCB->xGenericListItem ) );
1523 taskEXIT_CRITICAL(&xTaskQueueMutex);
1525 if( ( pxStateList == pxDelayedTaskList ) || ( pxStateList == pxOverflowDelayedTaskList ) )
1527 /* The task being queried is referenced from one of the Blocked
1532 #if ( INCLUDE_vTaskSuspend == 1 )
1533 else if( pxStateList == &xSuspendedTaskList )
1535 /* The task being queried is referenced from the suspended
1536 list. Is it genuinely suspended or is it block
1538 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1540 eReturn = eSuspended;
1549 #if ( INCLUDE_vTaskDelete == 1 )
1550 else if( pxStateList == &xTasksWaitingTermination )
1552 /* The task being queried is referenced from the deleted
1558 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1560 /* If the task is not in any other state, it must be in the
1561 Ready (including pending ready) state. */
1567 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1569 #endif /* INCLUDE_eTaskGetState */
1570 /*-----------------------------------------------------------*/
1572 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1573 UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask )
1576 UBaseType_t uxReturn;
1578 taskENTER_CRITICAL(&xTaskQueueMutex);
1580 /* If null is passed in here then we are changing the
1581 priority of the calling function. */
1582 pxTCB = prvGetTCBFromHandle( xTask );
1583 uxReturn = pxTCB->uxPriority;
1585 taskEXIT_CRITICAL(&xTaskQueueMutex);
1590 #endif /* INCLUDE_uxTaskPriorityGet */
1591 /*-----------------------------------------------------------*/
1593 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1594 UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask )
1597 UBaseType_t uxReturn;
1599 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1601 /* If null is passed in here then it is the priority of the calling
1602 task that is being queried. */
1603 pxTCB = prvGetTCBFromHandle( xTask );
1604 uxReturn = pxTCB->uxPriority;
1606 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
1611 #endif /* INCLUDE_uxTaskPriorityGet */
1612 /*-----------------------------------------------------------*/
1614 #if ( INCLUDE_vTaskPrioritySet == 1 )
1616 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1619 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1620 BaseType_t xYieldRequired = pdFALSE;
1622 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1624 /* Ensure the new priority is valid. */
1625 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1627 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1631 mtCOVERAGE_TEST_MARKER();
1634 taskENTER_CRITICAL(&xTaskQueueMutex);
1636 /* If null is passed in here then it is the priority of the calling
1637 task that is being changed. */
1638 pxTCB = prvGetTCBFromHandle( xTask );
1640 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1642 #if ( configUSE_MUTEXES == 1 )
1644 uxCurrentBasePriority = pxTCB->uxBasePriority;
1648 uxCurrentBasePriority = pxTCB->uxPriority;
1652 if( uxCurrentBasePriority != uxNewPriority )
1654 /* The priority change may have readied a task of higher
1655 priority than the calling task. */
1656 if( uxNewPriority > uxCurrentBasePriority )
1658 if( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] )
1660 /* The priority of a task other than the currently
1661 running task is being raised. Is the priority being
1662 raised above that of the running task? */
1663 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1665 xYieldRequired = pdTRUE;
1667 else if ( pxTCB->xCoreID != xPortGetCoreID() )
1669 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1673 mtCOVERAGE_TEST_MARKER();
1678 /* The priority of the running task is being raised,
1679 but the running task must already be the highest
1680 priority task able to run so no yield is required. */
1683 else if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
1685 /* Setting the priority of the running task down means
1686 there may now be another task of higher priority that
1687 is ready to execute. */
1688 xYieldRequired = pdTRUE;
1692 /* Setting the priority of any other task down does not
1693 require a yield as the running task must be above the
1694 new priority of the task being modified. */
1697 /* Remember the ready list the task might be referenced from
1698 before its uxPriority member is changed so the
1699 taskRESET_READY_PRIORITY() macro can function correctly. */
1700 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1702 #if ( configUSE_MUTEXES == 1 )
1704 /* Only change the priority being used if the task is not
1705 currently using an inherited priority. */
1706 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1708 pxTCB->uxPriority = uxNewPriority;
1712 mtCOVERAGE_TEST_MARKER();
1715 /* The base priority gets set whatever. */
1716 pxTCB->uxBasePriority = uxNewPriority;
1720 pxTCB->uxPriority = uxNewPriority;
1724 /* Only reset the event list item value if the value is not
1725 being used for anything else. */
1726 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1728 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1732 mtCOVERAGE_TEST_MARKER();
1735 /* If the task is in the blocked or suspended list we need do
1736 nothing more than change it's priority variable. However, if
1737 the task is in a ready list it needs to be removed and placed
1738 in the list appropriate to its new priority. */
1739 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
1741 /* The task is currently in its ready list - remove before adding
1742 it to it's new ready list. As we are in a critical section we
1743 can do this even if the scheduler is suspended. */
1744 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1746 /* It is known that the task is in its ready list so
1747 there is no need to check again and the port level
1748 reset macro can be called directly. */
1749 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1753 mtCOVERAGE_TEST_MARKER();
1755 prvReaddTaskToReadyList( pxTCB );
1759 mtCOVERAGE_TEST_MARKER();
1762 if( xYieldRequired == pdTRUE )
1764 taskYIELD_IF_USING_PREEMPTION();
1768 mtCOVERAGE_TEST_MARKER();
1771 /* Remove compiler warning about unused variables when the port
1772 optimised task selection is not being used. */
1773 ( void ) uxPriorityUsedOnEntry;
1776 taskEXIT_CRITICAL(&xTaskQueueMutex);
1779 #endif /* INCLUDE_vTaskPrioritySet */
1780 /*-----------------------------------------------------------*/
1782 #if ( INCLUDE_vTaskSuspend == 1 )
1783 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1788 taskENTER_CRITICAL(&xTaskQueueMutex);
1790 /* If null is passed in here then it is the running task that is
1792 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1794 traceTASK_SUSPEND( pxTCB );
1796 /* Remove task from the ready/delayed list and place in the
1798 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1800 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1804 mtCOVERAGE_TEST_MARKER();
1807 /* Is the task waiting on an event also? */
1808 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1810 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1814 mtCOVERAGE_TEST_MARKER();
1816 traceMOVED_TASK_TO_SUSPENDED_LIST(pxTCB);
1817 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) );
1818 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
1820 taskEXIT_CRITICAL(&xTaskQueueMutex);
1822 if( pxTCB == curTCB )
1824 if( xSchedulerRunning != pdFALSE )
1826 /* The current task has just been suspended. */
1827 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1828 portYIELD_WITHIN_API();
1832 /* The scheduler is not running, but the task that was pointed
1833 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1834 must be adjusted to point to a different task. */
1835 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
1837 /* No other tasks are ready, so set pxCurrentTCB back to
1838 NULL so when the next task is created pxCurrentTCB will
1839 be set to point to it no matter what its relative priority
1841 taskENTER_CRITICAL(&xTaskQueueMutex);
1842 pxCurrentTCB[ xPortGetCoreID() ] = NULL;
1843 taskEXIT_CRITICAL(&xTaskQueueMutex);
1847 vTaskSwitchContext();
1853 if( xSchedulerRunning != pdFALSE )
1855 /* A task other than the currently running task was suspended,
1856 reset the next expected unblock time in case it referred to the
1857 task that is now in the Suspended state. */
1858 taskENTER_CRITICAL(&xTaskQueueMutex);
1860 prvResetNextTaskUnblockTime();
1862 taskEXIT_CRITICAL(&xTaskQueueMutex);
1866 mtCOVERAGE_TEST_MARKER();
1871 #endif /* INCLUDE_vTaskSuspend */
1872 /*-----------------------------------------------------------*/
1874 #if ( INCLUDE_vTaskSuspend == 1 )
1875 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1877 BaseType_t xReturn = pdFALSE;
1878 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1880 /* Accesses xPendingReadyList so must be called from a critical
1881 section (caller is required to hold xTaskQueueMutex). */
1883 /* It does not make sense to check if the calling task is suspended. */
1884 configASSERT( xTask );
1886 /* Is the task being resumed actually in the suspended list? */
1887 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE )
1889 /* Has the task already been resumed from within an ISR? */
1890 if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE )
1892 /* Is it in the suspended list because it is in the Suspended
1893 state, or because is is blocked with no timeout? */
1894 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE )
1900 mtCOVERAGE_TEST_MARKER();
1905 mtCOVERAGE_TEST_MARKER();
1910 mtCOVERAGE_TEST_MARKER();
1914 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1916 #endif /* INCLUDE_vTaskSuspend */
1917 /*-----------------------------------------------------------*/
1919 #if ( INCLUDE_vTaskSuspend == 1 )
1921 void vTaskResume( TaskHandle_t xTaskToResume )
1923 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1925 /* It does not make sense to resume the calling task. */
1926 configASSERT( xTaskToResume );
1928 taskENTER_CRITICAL(&xTaskQueueMutex);
1929 /* The parameter cannot be NULL as it is impossible to resume the
1930 currently executing task. */
1931 if( ( pxTCB != NULL ) && ( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] ) )
1934 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1936 traceTASK_RESUME( pxTCB );
1938 /* As we are in a critical section we can access the ready
1939 lists even if the scheduler is suspended. */
1940 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
1941 prvAddTaskToReadyList( pxTCB );
1943 /* We may have just resumed a higher priority task. */
1944 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1946 /* This yield may not cause the task just resumed to run,
1947 but will leave the lists in the correct state for the
1949 taskYIELD_IF_USING_PREEMPTION();
1951 else if( pxTCB->xCoreID != xPortGetCoreID() )
1953 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
1957 mtCOVERAGE_TEST_MARKER();
1962 mtCOVERAGE_TEST_MARKER();
1968 mtCOVERAGE_TEST_MARKER();
1970 taskEXIT_CRITICAL(&xTaskQueueMutex);
1973 #endif /* INCLUDE_vTaskSuspend */
1975 /*-----------------------------------------------------------*/
1977 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1979 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1981 BaseType_t xYieldRequired = pdFALSE;
1982 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1984 configASSERT( xTaskToResume );
1986 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1989 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1991 traceTASK_RESUME_FROM_ISR( pxTCB );
1993 /* Check the ready lists can be accessed. */
1994 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
1996 /* Ready lists can be accessed so move the task from the
1997 suspended list to the ready list directly. */
1998 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
1999 prvAddTaskToReadyList( pxTCB );
2001 if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2003 xYieldRequired = pdTRUE;
2005 else if ( pxTCB->xCoreID != xPortGetCoreID() )
2007 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
2011 mtCOVERAGE_TEST_MARKER();
2016 /* The delayed or ready lists cannot be accessed so the task
2017 is held in the pending ready list until the scheduler is
2019 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
2024 mtCOVERAGE_TEST_MARKER();
2027 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2029 return xYieldRequired;
2032 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2033 /*-----------------------------------------------------------*/
2035 void vTaskStartScheduler( void )
2040 /* Add the per-core idle tasks at the lowest priority. */
2041 for ( i=0; i<portNUM_PROCESSORS; i++) {
2042 //Generate idle task name
2043 char cIdleName[configMAX_TASK_NAME_LEN];
2044 snprintf(cIdleName, configMAX_TASK_NAME_LEN, "IDLE%d", i);
2045 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2047 /* Create the idle task, storing its handle in xIdleTaskHandle so it can
2048 be returned by the xTaskGetIdleTaskHandle() function. */
2049 xReturn = xTaskCreatePinnedToCore( prvIdleTask, cIdleName, tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), &xIdleTaskHandle[i], i ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2053 /* Create the idle task without storing its handle. */
2054 xReturn = xTaskCreatePinnedToCore( prvIdleTask, cIdleName, tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), NULL, i); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2056 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2059 #if ( configUSE_TIMERS == 1 )
2061 if( xReturn == pdPASS )
2063 xReturn = xTimerCreateTimerTask();
2067 mtCOVERAGE_TEST_MARKER();
2070 #endif /* configUSE_TIMERS */
2072 if( xReturn == pdPASS )
2074 /* Interrupts are turned off here, to ensure a tick does not occur
2075 before or during the call to xPortStartScheduler(). The stacks of
2076 the created tasks contain a status word with interrupts switched on
2077 so interrupts will automatically get re-enabled when the first task
2079 portDISABLE_INTERRUPTS();
2082 xTickCount = ( TickType_t ) 0U;
2084 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2085 macro must be defined to configure the timer/counter used to generate
2086 the run time counter time base. */
2087 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2088 xSchedulerRunning = pdTRUE;
2090 /* Setting up the timer tick is hardware specific and thus in the
2091 portable interface. */
2092 if( xPortStartScheduler() != pdFALSE )
2094 /* Should not reach here as if the scheduler is running the
2095 function will not return. */
2099 /* Should only reach here if a task calls xTaskEndScheduler(). */
2104 /* This line will only be reached if the kernel could not be started,
2105 because there was not enough FreeRTOS heap to create the idle task
2106 or the timer task. */
2107 configASSERT( xReturn );
2110 /*-----------------------------------------------------------*/
2112 void vTaskEndScheduler( void )
2114 /* Stop the scheduler interrupts and call the portable scheduler end
2115 routine so the original ISRs can be restored if necessary. The port
2116 layer must ensure interrupts enable bit is left in the correct state. */
2117 portDISABLE_INTERRUPTS();
2118 xSchedulerRunning = pdFALSE;
2119 vPortEndScheduler();
2121 /*----------------------------------------------------------*/
2124 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2125 //Return global reent struct if FreeRTOS isn't running,
2126 struct _reent* __getreent() {
2127 //No lock needed because if this changes, we won't be running anymore.
2128 TCB_t *currTask=xTaskGetCurrentTaskHandle();
2129 if (currTask==NULL) {
2130 //No task running. Return global struct.
2131 return _GLOBAL_REENT;
2133 //We have a task; return its reentrant struct.
2134 return &currTask->xNewLib_reent;
2140 void vTaskSuspendAll( void )
2142 /* A critical section is not required as the variable is of type
2143 BaseType_t. Please read Richard Barry's reply in the following link to a
2144 post in the FreeRTOS support forum before reporting this as a bug! -
2145 http://goo.gl/wu4acr */
2148 state = portENTER_CRITICAL_NESTED();
2149 ++uxSchedulerSuspended[ xPortGetCoreID() ];
2150 portEXIT_CRITICAL_NESTED(state);
2152 /*----------------------------------------------------------*/
2154 #if ( configUSE_TICKLESS_IDLE != 0 )
2156 static BaseType_t xHaveReadyTasks()
2158 for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
2160 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
2166 mtCOVERAGE_TEST_MARKER();
2173 static TickType_t prvGetExpectedIdleTime( void )
2178 taskENTER_CRITICAL(&xTaskQueueMutex);
2179 if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
2183 #if portNUM_PROCESSORS > 1
2184 /* This function is called from Idle task; in single core case this
2185 * means that no higher priority tasks are ready to run, and we can
2186 * enter sleep. In SMP case, there might be ready tasks waiting for
2187 * the other CPU, so need to check all ready lists.
2189 else if( xHaveReadyTasks() )
2193 #endif // portNUM_PROCESSORS > 1
2194 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > portNUM_PROCESSORS )
2196 /* There are other idle priority tasks in the ready state. If
2197 time slicing is used then the very next tick interrupt must be
2203 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2204 xReturn = xNextTaskUnblockTime - xTickCount;
2205 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2207 taskEXIT_CRITICAL(&xTaskQueueMutex);
2212 #endif /* configUSE_TICKLESS_IDLE */
2213 /*----------------------------------------------------------*/
2215 BaseType_t xTaskResumeAll( void )
2218 BaseType_t xAlreadyYielded = pdFALSE;
2220 /* If uxSchedulerSuspended[ xPortGetCoreID() ] is zero then this function does not match a
2221 previous call to vTaskSuspendAll(). */
2222 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] );
2223 /* It is possible that an ISR caused a task to be removed from an event
2224 list while the scheduler was suspended. If this was the case then the
2225 removed task will have been added to the xPendingReadyList. Once the
2226 scheduler has been resumed it is safe to move all the pending ready
2227 tasks from this list into their appropriate ready list. */
2229 taskENTER_CRITICAL(&xTaskQueueMutex);
2231 --uxSchedulerSuspended[ xPortGetCoreID() ];
2233 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2235 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2237 /* Move any readied tasks from the pending list into the
2238 appropriate ready list. */
2239 while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE )
2241 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) );
2242 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2243 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2244 prvAddTaskToReadyList( pxTCB );
2246 /* If the moved task has a priority higher than the current
2247 task then a yield must be performed. */
2248 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2250 /* We can schedule the awoken task on this CPU. */
2251 xYieldPending[xPortGetCoreID()] = pdTRUE;
2255 mtCOVERAGE_TEST_MARKER();
2259 /* If any ticks occurred while the scheduler was suspended then
2260 they should be processed now. This ensures the tick count does
2261 not slip, and that any delayed tasks are resumed at the correct
2263 if( uxPendedTicks > ( UBaseType_t ) 0U )
2265 while( uxPendedTicks > ( UBaseType_t ) 0U )
2267 if( xTaskIncrementTick() != pdFALSE )
2269 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2273 mtCOVERAGE_TEST_MARKER();
2280 mtCOVERAGE_TEST_MARKER();
2283 if( xYieldPending[ xPortGetCoreID() ] == pdTRUE )
2285 #if( configUSE_PREEMPTION != 0 )
2287 xAlreadyYielded = pdTRUE;
2290 taskYIELD_IF_USING_PREEMPTION();
2294 mtCOVERAGE_TEST_MARKER();
2300 mtCOVERAGE_TEST_MARKER();
2303 taskEXIT_CRITICAL(&xTaskQueueMutex);
2305 return xAlreadyYielded;
2307 /*-----------------------------------------------------------*/
2309 TickType_t xTaskGetTickCount( void )
2313 /* Critical section required if running on a 16 bit processor. */
2314 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2316 xTicks = xTickCount;
2318 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2322 /*-----------------------------------------------------------*/
2324 TickType_t xTaskGetTickCountFromISR( void )
2328 taskENTER_CRITICAL_ISR(&xTickCountMutex);
2330 xReturn = xTickCount;
2331 // vPortCPUReleaseMutex( &xTickCountMutex );
2333 taskEXIT_CRITICAL_ISR(&xTickCountMutex);
2337 /*-----------------------------------------------------------*/
2339 UBaseType_t uxTaskGetNumberOfTasks( void )
2341 /* A critical section is not required because the variables are of type
2343 return uxCurrentNumberOfTasks;
2345 /*-----------------------------------------------------------*/
2347 #if ( INCLUDE_pcTaskGetTaskName == 1 )
2348 char *pcTaskGetTaskName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2352 /* If null is passed in here then the name of the calling task is being queried. */
2353 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2354 configASSERT( pxTCB );
2355 return &( pxTCB->pcTaskName[ 0 ] );
2358 #endif /* INCLUDE_pcTaskGetTaskName */
2359 /*-----------------------------------------------------------*/
2361 #if ( configUSE_TRACE_FACILITY == 1 )
2363 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2365 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2367 taskENTER_CRITICAL(&xTaskQueueMutex);
2369 /* Is there a space in the array for each task in the system? */
2370 if( uxArraySize >= uxCurrentNumberOfTasks )
2372 /* Fill in an TaskStatus_t structure with information on each
2373 task in the Ready state. */
2377 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2379 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2381 /* Fill in an TaskStatus_t structure with information on each
2382 task in the Blocked state. */
2383 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2384 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2386 #if( INCLUDE_vTaskDelete == 1 )
2388 /* Fill in an TaskStatus_t structure with information on
2389 each task that has been deleted but not yet cleaned up. */
2390 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2394 #if ( INCLUDE_vTaskSuspend == 1 )
2396 /* Fill in an TaskStatus_t structure with information on
2397 each task in the Suspended state. */
2398 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2402 #if ( configGENERATE_RUN_TIME_STATS == 1)
2404 if( pulTotalRunTime != NULL )
2406 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2407 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2409 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2415 if( pulTotalRunTime != NULL )
2417 *pulTotalRunTime = 0;
2424 mtCOVERAGE_TEST_MARKER();
2427 taskEXIT_CRITICAL(&xTaskQueueMutex);
2431 #endif /* configUSE_TRACE_FACILITY */
2432 /*----------------------------------------------------------*/
2434 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2436 TaskHandle_t xTaskGetIdleTaskHandle( void )
2438 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2439 started, then xIdleTaskHandle will be NULL. */
2440 configASSERT( ( xIdleTaskHandle[ xPortGetCoreID() ] != NULL ) );
2441 return xIdleTaskHandle[ xPortGetCoreID() ];
2444 TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid )
2446 TaskHandle_t xReturn = NULL;
2447 /* If xTaskGetIdleTaskHandleForCPU() is called before the scheduler has been
2448 started, then xIdleTaskHandle will be NULL. */
2449 if (cpuid < portNUM_PROCESSORS) {
2450 configASSERT( ( xIdleTaskHandle[ cpuid ] != NULL ) );
2451 xReturn = xIdleTaskHandle[ cpuid ];
2456 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2457 /*----------------------------------------------------------*/
2459 /* This conditional compilation should use inequality to 0, not equality to 1.
2460 This is to ensure vTaskStepTick() is available when user defined low power mode
2461 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
2463 #if ( configUSE_TICKLESS_IDLE != 0 )
2465 void vTaskStepTick( const TickType_t xTicksToJump )
2467 /* Correct the tick count value after a period during which the tick
2468 was suppressed. Note this does *not* call the tick hook function for
2469 each stepped tick. */
2470 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2471 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2472 xTickCount += xTicksToJump;
2473 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2474 traceINCREASE_TICK_COUNT( xTicksToJump );
2477 #endif /* configUSE_TICKLESS_IDLE */
2478 /*----------------------------------------------------------*/
2480 BaseType_t xTaskIncrementTick( void )
2483 TickType_t xItemValue;
2484 BaseType_t xSwitchRequired = pdFALSE;
2486 /* Called by the portable layer each time a tick interrupt occurs.
2487 Increments the tick then checks to see if the new tick value will cause any
2488 tasks to be unblocked. */
2490 /* Only let core 0 increase the tick count, to keep accurate track of time. */
2491 /* ToDo: This doesn't really play nice with the logic below: it means when core 1 is
2492 running a low-priority task, it will keep running it until there is a context
2493 switch, even when this routine (running on core 0) unblocks a bunch of high-priority
2494 tasks... this is less than optimal -- JD. */
2495 if ( xPortGetCoreID()!=0 ) {
2496 #if ( configUSE_TICK_HOOK == 1 )
2497 vApplicationTickHook();
2498 #endif /* configUSE_TICK_HOOK */
2499 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
2500 esp_vApplicationTickHook();
2501 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
2504 We can't really calculate what we need, that's done on core 0... just assume we need a switch.
2505 ToDo: Make this more intelligent? -- JD
2511 traceTASK_INCREMENT_TICK( xTickCount );
2513 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2515 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2516 /* Increment the RTOS tick, switching the delayed and overflowed
2517 delayed lists if it wraps to 0. */
2519 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2521 //The other CPU may decide to mess with the task queues, so this needs a mux.
2522 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2524 /* Minor optimisation. The tick count cannot change in this
2526 const TickType_t xConstTickCount = xTickCount;
2528 if( xConstTickCount == ( TickType_t ) 0U )
2530 taskSWITCH_DELAYED_LISTS();
2534 mtCOVERAGE_TEST_MARKER();
2537 /* See if this tick has made a timeout expire. Tasks are stored in
2538 the queue in the order of their wake time - meaning once one task
2539 has been found whose block time has not expired there is no need to
2540 look any further down the list. */
2541 if( xConstTickCount >= xNextTaskUnblockTime )
2545 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2547 /* The delayed list is empty. Set xNextTaskUnblockTime
2548 to the maximum possible value so it is extremely
2550 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2551 next time through. */
2552 xNextTaskUnblockTime = portMAX_DELAY;
2557 /* The delayed list is not empty, get the value of the
2558 item at the head of the delayed list. This is the time
2559 at which the task at the head of the delayed list must
2560 be removed from the Blocked state. */
2561 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
2562 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xGenericListItem ) );
2564 if( xConstTickCount < xItemValue )
2566 /* It is not time to unblock this item yet, but the
2567 item value is the time at which the task at the head
2568 of the blocked list must be removed from the Blocked
2569 state - so record the item value in
2570 xNextTaskUnblockTime. */
2571 xNextTaskUnblockTime = xItemValue;
2576 mtCOVERAGE_TEST_MARKER();
2579 /* It is time to remove the item from the Blocked state. */
2580 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2582 /* Is the task waiting on an event also? If so remove
2583 it from the event list. */
2584 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2586 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2590 mtCOVERAGE_TEST_MARKER();
2593 /* Place the unblocked task into the appropriate ready
2595 prvAddTaskToReadyList( pxTCB );
2597 /* A task being unblocked cannot cause an immediate
2598 context switch if preemption is turned off. */
2599 #if ( configUSE_PREEMPTION == 1 )
2601 /* Preemption is on, but a context switch should
2602 only be performed if the unblocked task has a
2603 priority that is equal to or higher than the
2604 currently executing task. */
2605 if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2607 xSwitchRequired = pdTRUE;
2611 mtCOVERAGE_TEST_MARKER();
2614 #endif /* configUSE_PREEMPTION */
2620 /* Tasks of equal priority to the currently running task will share
2621 processing time (time slice) if preemption is on, and the application
2622 writer has not explicitly turned time slicing off. */
2623 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2625 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2627 xSwitchRequired = pdTRUE;
2631 mtCOVERAGE_TEST_MARKER();
2634 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2637 /* Guard against the tick hook being called when the pended tick
2638 count is being unwound (when the scheduler is being unlocked). */
2639 if( uxPendedTicks == ( UBaseType_t ) 0U )
2641 #if ( configUSE_TICK_HOOK == 1 )
2642 vApplicationTickHook();
2643 #endif /* configUSE_TICK_HOOK */
2644 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
2645 esp_vApplicationTickHook();
2646 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
2650 mtCOVERAGE_TEST_MARKER();
2653 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2659 /* The tick hook gets called at regular intervals, even if the
2660 scheduler is locked. */
2661 #if ( configUSE_TICK_HOOK == 1 )
2663 vApplicationTickHook();
2666 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
2667 esp_vApplicationTickHook();
2668 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
2671 #if ( configUSE_PREEMPTION == 1 )
2673 if( xYieldPending [ xPortGetCoreID() ] != pdFALSE )
2675 xSwitchRequired = pdTRUE;
2679 mtCOVERAGE_TEST_MARKER();
2682 #endif /* configUSE_PREEMPTION */
2684 return xSwitchRequired;
2686 /*-----------------------------------------------------------*/
2688 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2690 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2694 /* If xTask is NULL then it is the task hook of the calling task that is
2698 xTCB = ( TCB_t * ) pxCurrentTCB[ xPortGetCoreID() ];
2702 xTCB = ( TCB_t * ) xTask;
2705 /* Save the hook function in the TCB. A critical section is required as
2706 the value can be accessed from an interrupt. */
2707 taskENTER_CRITICAL(&xTaskQueueMutex);
2708 xTCB->pxTaskTag = pxHookFunction;
2709 taskEXIT_CRITICAL(&xTaskQueueMutex);
2712 #endif /* configUSE_APPLICATION_TASK_TAG */
2713 /*-----------------------------------------------------------*/
2715 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2717 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2720 TaskHookFunction_t xReturn;
2722 /* If xTask is NULL then we are setting our own task hook. */
2725 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2729 xTCB = ( TCB_t * ) xTask;
2732 /* Save the hook function in the TCB. A critical section is required as
2733 the value can be accessed from an interrupt. */
2734 taskENTER_CRITICAL(&xTaskQueueMutex);
2736 xReturn = xTCB->pxTaskTag;
2738 taskEXIT_CRITICAL(&xTaskQueueMutex);
2743 #endif /* configUSE_APPLICATION_TASK_TAG */
2744 /*-----------------------------------------------------------*/
2746 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2748 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2753 /* If xTask is NULL then we are calling our own task hook. */
2756 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2760 xTCB = ( TCB_t * ) xTask;
2763 if( xTCB->pxTaskTag != NULL )
2765 xReturn = xTCB->pxTaskTag( pvParameter );
2775 #endif /* configUSE_APPLICATION_TASK_TAG */
2776 /*-----------------------------------------------------------*/
2778 void vTaskSwitchContext( void )
2780 //Theoretically, this is only called from either the tick interrupt or the crosscore interrupt, so disabling
2781 //interrupts shouldn't be necessary anymore. Still, for safety we'll leave it in for now.
2782 int irqstate=portENTER_CRITICAL_NESTED();
2784 if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
2786 /* The scheduler is currently suspended - do not allow a context
2788 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2792 xYieldPending[ xPortGetCoreID() ] = pdFALSE;
2793 xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
2794 traceTASK_SWITCHED_OUT();
2796 #if ( configGENERATE_RUN_TIME_STATS == 1 )
2798 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2799 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
2801 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2804 /* Add the amount of time the task has been running to the
2805 accumulated time so far. The time the task started running was
2806 stored in ulTaskSwitchedInTime. Note that there is no overflow
2807 protection here so count values are only valid until the timer
2808 overflows. The guard against negative values is to protect
2809 against suspect run time stat counter implementations - which
2810 are provided by the application, not the kernel. */
2811 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2812 if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
2814 pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
2818 mtCOVERAGE_TEST_MARKER();
2820 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2821 ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
2823 #endif /* configGENERATE_RUN_TIME_STATS */
2825 /* Check for stack overflow, if configured. */
2826 taskFIRST_CHECK_FOR_STACK_OVERFLOW();
2827 taskSECOND_CHECK_FOR_STACK_OVERFLOW();
2829 /* Select a new task to run */
2832 We cannot do taskENTER_CRITICAL_ISR(&xTaskQueueMutex); here because it saves the interrupt context to the task tcb, and we're
2833 swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
2834 need to acquire the mutex.
2836 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2837 vPortCPUAcquireMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2839 vPortCPUAcquireMutex( &xTaskQueueMutex );
2842 unsigned portBASE_TYPE foundNonExecutingWaiter = pdFALSE, ableToSchedule = pdFALSE, resetListHead;
2843 portBASE_TYPE uxDynamicTopReady = uxTopReadyPriority;
2844 unsigned portBASE_TYPE holdTop=pdFALSE;
2847 * ToDo: This scheduler doesn't correctly implement the round-robin scheduling as done in the single-core
2848 * FreeRTOS stack when multiple tasks have the same priority and are all ready; it just keeps grabbing the
2849 * first one. ToDo: fix this.
2850 * (Is this still true? if any, there's the issue with one core skipping over the processes for the other
2851 * core, potentially not giving the skipped-over processes any time.)
2854 while ( ableToSchedule == pdFALSE && uxDynamicTopReady >= 0 )
2856 resetListHead = pdFALSE;
2857 // Nothing to do for empty lists
2858 if (!listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxDynamicTopReady ] ) )) {
2860 ableToSchedule = pdFALSE;
2863 /* Remember the current list item so that we
2864 can detect if all items have been inspected.
2865 Once this happens, we move on to a lower
2866 priority list (assuming nothing is suitable
2867 for scheduling). Note: This can return NULL if
2868 the list index is at the listItem */
2869 pxRefTCB = pxReadyTasksLists[ uxDynamicTopReady ].pxIndex->pvOwner;
2871 if ((void*)pxReadyTasksLists[ uxDynamicTopReady ].pxIndex==(void*)&pxReadyTasksLists[ uxDynamicTopReady ].xListEnd) {
2872 //pxIndex points to the list end marker. Skip that and just get the next item.
2873 listGET_OWNER_OF_NEXT_ENTRY( pxRefTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2877 listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2878 /* Find out if the next task in the list is
2879 already being executed by another core */
2880 foundNonExecutingWaiter = pdTRUE;
2881 portBASE_TYPE i = 0;
2882 for ( i=0; i<portNUM_PROCESSORS; i++ ) {
2883 if (i == xPortGetCoreID()) {
2885 } else if (pxCurrentTCB[i] == pxTCB) {
2886 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2887 foundNonExecutingWaiter = pdFALSE;
2892 if (foundNonExecutingWaiter == pdTRUE) {
2893 /* If the task is not being executed
2894 by another core and its affinity is
2895 compatible with the current one,
2896 prepare it to be swapped in */
2897 if (pxTCB->xCoreID == tskNO_AFFINITY) {
2898 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2899 ableToSchedule = pdTRUE;
2900 } else if (pxTCB->xCoreID == xPortGetCoreID()) {
2901 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2902 ableToSchedule = pdTRUE;
2904 ableToSchedule = pdFALSE;
2905 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2908 ableToSchedule = pdFALSE;
2911 if (ableToSchedule == pdFALSE) {
2912 resetListHead = pdTRUE;
2913 } else if ((ableToSchedule == pdTRUE) && (resetListHead == pdTRUE)) {
2914 tskTCB * pxResetTCB;
2916 listGET_OWNER_OF_NEXT_ENTRY( pxResetTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2917 } while(pxResetTCB != pxRefTCB);
2919 } while ((ableToSchedule == pdFALSE) && (pxTCB != pxRefTCB));
2921 if (!holdTop) --uxTopReadyPriority;
2923 --uxDynamicTopReady;
2926 traceTASK_SWITCHED_IN();
2927 xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
2929 //Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
2930 //exit the function.
2931 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2932 vPortCPUReleaseMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2934 vPortCPUReleaseMutex( &xTaskQueueMutex );
2937 #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
2938 vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
2942 portEXIT_CRITICAL_NESTED(irqstate);
2944 /*-----------------------------------------------------------*/
2946 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
2948 TickType_t xTimeToWake;
2950 configASSERT( pxEventList );
2952 taskENTER_CRITICAL(&xTaskQueueMutex);
2954 /* Place the event list item of the TCB in the appropriate event list.
2955 This is placed in the list in priority order so the highest priority task
2956 is the first to be woken by the event. The queue that contains the event
2957 list is locked, preventing simultaneous access from interrupts. */
2958 vListInsert( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
2960 /* The task must be removed from from the ready list before it is added to
2961 the blocked list as the same list item is used for both lists. Exclusive
2962 access to the ready lists guaranteed because the scheduler is locked. */
2963 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
2965 /* The current task must be in a ready list, so there is no need to
2966 check, and the port reset macro can be called directly. */
2967 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
2971 mtCOVERAGE_TEST_MARKER();
2974 #if ( INCLUDE_vTaskSuspend == 1 )
2976 if( xTicksToWait == portMAX_DELAY )
2978 /* Add the task to the suspended task list instead of a delayed task
2979 list to ensure the task is not woken by a timing event. It will
2980 block indefinitely. */
2981 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
2982 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
2986 /* Calculate the time at which the task should be woken if the event
2987 does not occur. This may overflow but this doesn't matter, the
2988 scheduler will handle it. */
2989 xTimeToWake = xTickCount + xTicksToWait;
2990 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
2993 #else /* INCLUDE_vTaskSuspend */
2995 /* Calculate the time at which the task should be woken if the event does
2996 not occur. This may overflow but this doesn't matter, the scheduler
2998 xTimeToWake = xTickCount + xTicksToWait;
2999 prvAddCurrentTaskToDelayedList( xTimeToWake );
3001 #endif /* INCLUDE_vTaskSuspend */
3003 taskEXIT_CRITICAL(&xTaskQueueMutex);
3006 /*-----------------------------------------------------------*/
3008 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
3010 TickType_t xTimeToWake;
3012 configASSERT( pxEventList );
3014 taskENTER_CRITICAL(&xTaskQueueMutex);
3016 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3017 the event groups implementation. */
3018 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 );
3020 /* Store the item value in the event list item. It is safe to access the
3021 event list item here as interrupts won't access the event list item of a
3022 task that is not in the Blocked state. */
3023 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3025 /* Place the event list item of the TCB at the end of the appropriate event
3026 list. It is safe to access the event list here because it is part of an
3027 event group implementation - and interrupts don't access event groups
3028 directly (instead they access them indirectly by pending function calls to
3030 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
3032 /* The task must be removed from the ready list before it is added to the
3033 blocked list. Exclusive access can be assured to the ready list as the
3034 scheduler is locked. */
3035 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
3037 /* The current task must be in a ready list, so there is no need to
3038 check, and the port reset macro can be called directly. */
3039 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
3043 mtCOVERAGE_TEST_MARKER();
3046 #if ( INCLUDE_vTaskSuspend == 1 )
3048 if( xTicksToWait == portMAX_DELAY )
3050 /* Add the task to the suspended task list instead of a delayed task
3051 list to ensure it is not woken by a timing event. It will block
3053 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
3057 /* Calculate the time at which the task should be woken if the event
3058 does not occur. This may overflow but this doesn't matter, the
3059 kernel will manage it correctly. */
3060 xTimeToWake = xTickCount + xTicksToWait;
3061 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
3064 #else /* INCLUDE_vTaskSuspend */
3066 /* Calculate the time at which the task should be woken if the event does
3067 not occur. This may overflow but this doesn't matter, the kernel
3068 will manage it correctly. */
3069 xTimeToWake = xTickCount + xTicksToWait;
3070 prvAddCurrentTaskToDelayedList( xTimeToWake );
3072 #endif /* INCLUDE_vTaskSuspend */
3074 taskEXIT_CRITICAL(&xTaskQueueMutex);
3076 /*-----------------------------------------------------------*/
3078 #if configUSE_TIMERS == 1
3080 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, const TickType_t xTicksToWait )
3082 TickType_t xTimeToWake;
3084 taskENTER_CRITICAL(&xTaskQueueMutex);
3085 configASSERT( pxEventList );
3087 /* This function should not be called by application code hence the
3088 'Restricted' in its name. It is not part of the public API. It is
3089 designed for use by kernel code, and has special calling requirements -
3090 it should be called from a critical section. */
3093 /* Place the event list item of the TCB in the appropriate event list.
3094 In this case it is assume that this is the only task that is going to
3095 be waiting on this event list, so the faster vListInsertEnd() function
3096 can be used in place of vListInsert. */
3097 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
3099 /* We must remove this task from the ready list before adding it to the
3100 blocked list as the same list item is used for both lists. This
3101 function is called form a critical section. */
3102 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
3104 /* The current task must be in a ready list, so there is no need to
3105 check, and the port reset macro can be called directly. */
3106 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
3110 mtCOVERAGE_TEST_MARKER();
3113 /* Calculate the time at which the task should be woken if the event does
3114 not occur. This may overflow but this doesn't matter. */
3115 xTimeToWake = xTickCount + xTicksToWait;
3117 traceTASK_DELAY_UNTIL();
3118 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
3119 taskEXIT_CRITICAL(&xTaskQueueMutex);
3123 #endif /* configUSE_TIMERS */
3124 /*-----------------------------------------------------------*/
3126 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3128 TCB_t *pxUnblockedTCB;
3130 BaseType_t xTaskCanBeReady;
3131 UBaseType_t i, uxTargetCPU;
3133 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3134 called from a critical section within an ISR. */
3135 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
3136 /* The event list is sorted in priority order, so the first in the list can
3137 be removed as it is known to be the highest priority. Remove the TCB from
3138 the delayed list, and add it to the ready list.
3140 If an event is for a queue that is locked then this function will never
3141 get called - the lock count on the queue will get modified instead. This
3142 means exclusive access to the event list is guaranteed here.
3144 This function assumes that a check has already been made to ensure that
3145 pxEventList is not empty. */
3146 if ( ( listLIST_IS_EMPTY( pxEventList ) ) == pdFALSE ) {
3147 pxUnblockedTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
3148 configASSERT( pxUnblockedTCB );
3149 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3151 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3155 /* Determine if the task can possibly be run on either CPU now, either because the scheduler
3156 the task is pinned to is running or because a scheduler is running on any CPU. */
3157 xTaskCanBeReady = pdFALSE;
3158 if ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) {
3159 uxTargetCPU = xPortGetCoreID();
3160 for (i = 0; i < portNUM_PROCESSORS; i++) {
3161 if ( uxSchedulerSuspended[ i ] == ( UBaseType_t ) pdFALSE ) {
3162 xTaskCanBeReady = pdTRUE;
3167 uxTargetCPU = pxUnblockedTCB->xCoreID;
3168 xTaskCanBeReady = uxSchedulerSuspended[ uxTargetCPU ] == ( UBaseType_t ) pdFALSE;
3172 if( xTaskCanBeReady )
3174 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3175 prvAddTaskToReadyList( pxUnblockedTCB );
3179 /* The delayed and ready lists cannot be accessed, so hold this task
3180 pending until the scheduler is resumed on this CPU. */
3181 vListInsertEnd( &( xPendingReadyList[ uxTargetCPU ] ), &( pxUnblockedTCB->xEventListItem ) );
3184 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3186 /* Return true if the task removed from the event list has a higher
3187 priority than the calling task. This allows the calling task to know if
3188 it should force a context switch now. */
3191 /* Mark that a yield is pending in case the user is not using the
3192 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3193 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3195 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3197 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3205 #if( configUSE_TICKLESS_IDLE == 1 )
3207 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3208 might be set to the blocked task's time out time. If the task is
3209 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3210 normally left unchanged, because it is automatically get reset to a new
3211 value when the tick count equals xNextTaskUnblockTime. However if
3212 tickless idling is used it might be more important to enter sleep mode
3213 at the earliest possible time - so reset xNextTaskUnblockTime here to
3214 ensure it is updated at the earliest possible time. */
3215 prvResetNextTaskUnblockTime();
3218 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3222 /*-----------------------------------------------------------*/
3224 BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3226 TCB_t *pxUnblockedTCB;
3229 taskENTER_CRITICAL(&xTaskQueueMutex);
3230 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3231 the event flags implementation. */
3232 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != pdFALSE );
3234 /* Store the new item value in the event list. */
3235 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3237 /* Remove the event list form the event flag. Interrupts do not access
3239 pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem );
3240 configASSERT( pxUnblockedTCB );
3241 ( void ) uxListRemove( pxEventListItem );
3243 /* Remove the task from the delayed list and add it to the ready list. The
3244 scheduler is suspended so interrupts will not be accessing the ready
3246 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3247 prvAddTaskToReadyList( pxUnblockedTCB );
3249 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3251 /* Return true if the task removed from the event list has
3252 a higher priority than the calling task. This allows
3253 the calling task to know if it should force a context
3257 /* Mark that a yield is pending in case the user is not using the
3258 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3259 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3261 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3263 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3271 taskEXIT_CRITICAL(&xTaskQueueMutex);
3274 /*-----------------------------------------------------------*/
3276 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3278 configASSERT( pxTimeOut );
3279 pxTimeOut->xOverflowCount = xNumOfOverflows;
3280 pxTimeOut->xTimeOnEntering = xTickCount;
3282 /*-----------------------------------------------------------*/
3284 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3288 configASSERT( pxTimeOut );
3289 configASSERT( pxTicksToWait );
3291 taskENTER_CRITICAL(&xTickCountMutex);
3293 /* Minor optimisation. The tick count cannot change in this block. */
3294 const TickType_t xConstTickCount = xTickCount;
3296 #if ( INCLUDE_vTaskSuspend == 1 )
3297 /* If INCLUDE_vTaskSuspend is set to 1 and the block time specified is
3298 the maximum block time then the task should block indefinitely, and
3299 therefore never time out. */
3300 if( *pxTicksToWait == portMAX_DELAY )
3304 else /* We are not blocking indefinitely, perform the checks below. */
3307 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3309 /* The tick count is greater than the time at which vTaskSetTimeout()
3310 was called, but has also overflowed since vTaskSetTimeOut() was called.
3311 It must have wrapped all the way around and gone past us again. This
3312 passed since vTaskSetTimeout() was called. */
3315 else if( ( xConstTickCount - pxTimeOut->xTimeOnEntering ) < *pxTicksToWait )
3317 /* Not a genuine timeout. Adjust parameters for time remaining. */
3318 *pxTicksToWait -= ( xConstTickCount - pxTimeOut->xTimeOnEntering );
3319 vTaskSetTimeOutState( pxTimeOut );
3327 taskEXIT_CRITICAL(&xTickCountMutex);
3331 /*-----------------------------------------------------------*/
3333 void vTaskMissedYield( void )
3335 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3337 /*-----------------------------------------------------------*/
3339 #if ( configUSE_TRACE_FACILITY == 1 )
3341 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3343 UBaseType_t uxReturn;
3348 pxTCB = ( TCB_t * ) xTask;
3349 uxReturn = pxTCB->uxTaskNumber;
3359 #endif /* configUSE_TRACE_FACILITY */
3360 /*-----------------------------------------------------------*/
3362 #if ( configUSE_TRACE_FACILITY == 1 )
3364 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3370 pxTCB = ( TCB_t * ) xTask;
3371 pxTCB->uxTaskNumber = uxHandle;
3375 #endif /* configUSE_TRACE_FACILITY */
3378 * -----------------------------------------------------------
3380 * ----------------------------------------------------------
3382 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3383 * language extensions. The equivalent prototype for this function is:
3385 * void prvIdleTask( void *pvParameters );
3388 static portTASK_FUNCTION( prvIdleTask, pvParameters )
3390 /* Stop warnings. */
3391 ( void ) pvParameters;
3395 /* See if any tasks have been deleted. */
3396 prvCheckTasksWaitingTermination();
3398 #if ( configUSE_PREEMPTION == 0 )
3400 /* If we are not using preemption we keep forcing a task switch to
3401 see if any other task has become available. If we are using
3402 preemption we don't need to do this as any task becoming available
3403 will automatically get the processor anyway. */
3406 #endif /* configUSE_PREEMPTION */
3408 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3410 /* When using preemption tasks of equal priority will be
3411 timesliced. If a task that is sharing the idle priority is ready
3412 to run then the idle task should yield before the end of the
3415 A critical region is not required here as we are just reading from
3416 the list, and an occasional incorrect value will not matter. If
3417 the ready list at the idle priority contains more than one task
3418 then a task other than the idle task is ready to execute. */
3419 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3425 mtCOVERAGE_TEST_MARKER();
3428 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3430 #if ( configUSE_IDLE_HOOK == 1 )
3432 extern void vApplicationIdleHook( void );
3434 /* Call the user defined function from within the idle task. This
3435 allows the application designer to add background functionality
3436 without the overhead of a separate task.
3437 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3438 CALL A FUNCTION THAT MIGHT BLOCK. */
3439 vApplicationIdleHook();
3441 #endif /* configUSE_IDLE_HOOK */
3442 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
3444 /* Call the esp-idf hook system */
3445 esp_vApplicationIdleHook();
3447 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
3450 /* This conditional compilation should use inequality to 0, not equality
3451 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3452 user defined low power mode implementations require
3453 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3454 #if ( configUSE_TICKLESS_IDLE != 0 )
3456 TickType_t xExpectedIdleTime;
3458 /* It is not desirable to suspend then resume the scheduler on
3459 each iteration of the idle task. Therefore, a preliminary
3460 test of the expected idle time is performed without the
3461 scheduler suspended. The result here is not necessarily
3463 xExpectedIdleTime = prvGetExpectedIdleTime();
3465 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3467 taskENTER_CRITICAL(&xTaskQueueMutex);
3469 /* Now the scheduler is suspended, the expected idle
3470 time can be sampled again, and this time its value can
3472 configASSERT( xNextTaskUnblockTime >= xTickCount );
3473 xExpectedIdleTime = prvGetExpectedIdleTime();
3475 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3477 traceLOW_POWER_IDLE_BEGIN();
3478 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3479 traceLOW_POWER_IDLE_END();
3483 mtCOVERAGE_TEST_MARKER();
3486 taskEXIT_CRITICAL(&xTaskQueueMutex);
3490 mtCOVERAGE_TEST_MARKER();
3493 #endif /* configUSE_TICKLESS_IDLE */
3496 /*-----------------------------------------------------------*/
3498 #if configUSE_TICKLESS_IDLE != 0
3500 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3502 eSleepModeStatus eReturn = eStandardSleep;
3503 taskENTER_CRITICAL(&xTaskQueueMutex);
3505 if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
3507 /* A task was made ready while the scheduler was suspended. */
3508 eReturn = eAbortSleep;
3510 else if( xYieldPending[ xPortGetCoreID() ] != pdFALSE )
3512 /* A yield was pended while the scheduler was suspended. */
3513 eReturn = eAbortSleep;
3517 #if configUSE_TIMERS == 0
3519 /* The idle task exists in addition to the application tasks. */
3520 const UBaseType_t uxNonApplicationTasks = 1;
3522 /* If timers are not being used and all the tasks are in the
3523 suspended list (which might mean they have an infinite block
3524 time rather than actually being suspended) then it is safe to
3525 turn all clocks off and just wait for external interrupts. */
3526 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3528 eReturn = eNoTasksWaitingTimeout;
3532 mtCOVERAGE_TEST_MARKER();
3535 #endif /* configUSE_TIMERS */
3537 taskEXIT_CRITICAL(&xTaskQueueMutex);
3541 #endif /* configUSE_TICKLESS_IDLE */
3542 /*-----------------------------------------------------------*/
3544 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3546 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3548 void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue , TlsDeleteCallbackFunction_t xDelCallback)
3552 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3554 taskENTER_CRITICAL(&xTaskQueueMutex);
3555 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3556 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3557 pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
3558 taskEXIT_CRITICAL(&xTaskQueueMutex);
3562 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3564 vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, (TlsDeleteCallbackFunction_t)NULL );
3569 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3573 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3575 taskENTER_CRITICAL(&xTaskQueueMutex);
3576 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3577 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3578 taskEXIT_CRITICAL(&xTaskQueueMutex);
3581 #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
3583 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3584 /*-----------------------------------------------------------*/
3586 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3588 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3590 void *pvReturn = NULL;
3593 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3595 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3596 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3606 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3609 #if ( portUSING_MPU_WRAPPERS == 1 )
3610 /* ToDo: Check for multicore */
3611 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3615 UNTESTED_FUNCTION();
3616 /* If null is passed in here then we are deleting ourselves. */
3617 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3619 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3622 #endif /* portUSING_MPU_WRAPPERS */
3623 /*-----------------------------------------------------------*/
3625 static void prvInitialiseTaskLists( void )
3627 UBaseType_t uxPriority;
3629 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3631 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3634 vListInitialise( &xDelayedTaskList1 );
3635 vListInitialise( &xDelayedTaskList2 );
3636 vListInitialise( &xPendingReadyList[ 0 ] );
3637 if (portNUM_PROCESSORS == 2) {
3638 vListInitialise( &xPendingReadyList[ 1 ] );
3641 #if ( INCLUDE_vTaskDelete == 1 )
3643 vListInitialise( &xTasksWaitingTermination );
3645 #endif /* INCLUDE_vTaskDelete */
3647 #if ( INCLUDE_vTaskSuspend == 1 )
3649 vListInitialise( &xSuspendedTaskList );
3651 #endif /* INCLUDE_vTaskSuspend */
3653 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3655 pxDelayedTaskList = &xDelayedTaskList1;
3656 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3658 /*-----------------------------------------------------------*/
3660 static void prvCheckTasksWaitingTermination( void )
3662 #if ( INCLUDE_vTaskDelete == 1 )
3664 BaseType_t xListIsEmpty;
3665 int core = xPortGetCoreID();
3667 /* ucTasksDeleted is used to prevent vTaskSuspendAll() being called
3668 too often in the idle task. */
3669 while(uxTasksDeleted > ( UBaseType_t ) 0U )
3671 TCB_t *pxTCB = NULL;
3673 taskENTER_CRITICAL(&xTaskQueueMutex);
3675 xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
3676 if( xListIsEmpty == pdFALSE )
3678 /* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
3679 be called on the core the process is pinned on, if any */
3680 ListItem_t *target = listGET_HEAD_ENTRY(&xTasksWaitingTermination);
3681 for( ; target != listGET_END_MARKER(&xTasksWaitingTermination); target = listGET_NEXT(target) ){ //Walk the list
3682 TCB_t *tgt_tcb = ( TCB_t * )listGET_LIST_ITEM_OWNER(target);
3683 int affinity = tgt_tcb->xCoreID;
3684 //Self deleting tasks are added to Termination List before they switch context. Ensure they aren't still currently running
3685 if( pxCurrentTCB[core] == tgt_tcb || (portNUM_PROCESSORS > 1 && pxCurrentTCB[!core] == tgt_tcb) ){
3686 continue; //Can't free memory of task that is still running
3688 if(affinity == core || affinity == tskNO_AFFINITY){ //Find first item not pinned to other core
3694 ( void ) uxListRemove( target ); //Remove list item from list
3695 --uxCurrentNumberOfTasks;
3700 taskEXIT_CRITICAL(&xTaskQueueMutex); //Need to call deletion callbacks outside critical section
3702 if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
3703 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3704 prvDeleteTLS( pxTCB );
3706 prvDeleteTCB( pxTCB );
3710 mtCOVERAGE_TEST_MARKER();
3711 break; //No TCB found that could be freed by this core, break out of loop
3715 #endif /* vTaskDelete */
3717 /*-----------------------------------------------------------*/
3719 //This should be called with the taskqueuemutex grabbed. -JD
3720 static void prvAddCurrentTaskToDelayedList( const BaseType_t xCoreID, const TickType_t xTimeToWake )
3722 /* The list item will be inserted in wake time order. */
3723 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xGenericListItem ), xTimeToWake );
3725 if( xTimeToWake < xTickCount )
3727 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
3728 /* Wake time has overflowed. Place this item in the overflow list. */
3729 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3733 traceMOVED_TASK_TO_DELAYED_LIST();
3734 /* The wake time has not overflowed, so the current block list is used. */
3735 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3737 /* If the task entering the blocked state was placed at the head of the
3738 list of blocked tasks then xNextTaskUnblockTime needs to be updated
3740 if( xTimeToWake < xNextTaskUnblockTime )
3742 xNextTaskUnblockTime = xTimeToWake;
3746 mtCOVERAGE_TEST_MARKER();
3750 /*-----------------------------------------------------------*/
3752 BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
3756 pxTCB = prvGetTCBFromHandle( xTask );
3758 return pxTCB->xCoreID;
3760 /*-----------------------------------------------------------*/
3763 #if ( configUSE_TRACE_FACILITY == 1 )
3765 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3767 volatile TCB_t *pxNextTCB, *pxFirstTCB;
3768 UBaseType_t uxTask = 0;
3770 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3772 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
3774 /* Populate an TaskStatus_t structure within the
3775 pxTaskStatusArray array for each task that is referenced from
3776 pxList. See the definition of TaskStatus_t in task.h for the
3777 meaning of each TaskStatus_t structure member. */
3780 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
3782 pxTaskStatusArray[ uxTask ].xHandle = ( TaskHandle_t ) pxNextTCB;
3783 pxTaskStatusArray[ uxTask ].pcTaskName = ( const char * ) &( pxNextTCB->pcTaskName [ 0 ] );
3784 pxTaskStatusArray[ uxTask ].xTaskNumber = pxNextTCB->uxTCBNumber;
3785 pxTaskStatusArray[ uxTask ].eCurrentState = eState;
3786 pxTaskStatusArray[ uxTask ].uxCurrentPriority = pxNextTCB->uxPriority;
3788 #if ( configTASKLIST_INCLUDE_COREID == 1 )
3789 pxTaskStatusArray[ uxTask ].xCoreID = pxNextTCB->xCoreID;
3790 #endif /* configTASKLIST_INCLUDE_COREID */
3792 #if ( INCLUDE_vTaskSuspend == 1 )
3794 /* If the task is in the suspended list then there is a chance
3795 it is actually just blocked indefinitely - so really it should
3796 be reported as being in the Blocked state. */
3797 if( eState == eSuspended )
3799 if( listLIST_ITEM_CONTAINER( &( pxNextTCB->xEventListItem ) ) != NULL )
3801 pxTaskStatusArray[ uxTask ].eCurrentState = eBlocked;
3805 #endif /* INCLUDE_vTaskSuspend */
3807 #if ( configUSE_MUTEXES == 1 )
3809 pxTaskStatusArray[ uxTask ].uxBasePriority = pxNextTCB->uxBasePriority;
3813 pxTaskStatusArray[ uxTask ].uxBasePriority = 0;
3817 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3819 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = pxNextTCB->ulRunTimeCounter;
3823 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = 0;
3827 #if ( portSTACK_GROWTH > 0 )
3829 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxEndOfStack );
3833 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxStack );
3839 } while( pxNextTCB != pxFirstTCB );
3843 mtCOVERAGE_TEST_MARKER();
3849 #endif /* configUSE_TRACE_FACILITY */
3850 /*-----------------------------------------------------------*/
3852 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
3854 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3856 uint32_t ulCount = 0U;
3858 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3860 pucStackByte -= portSTACK_GROWTH;
3864 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3866 return ( uint32_t ) ulCount;
3869 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */
3870 /*-----------------------------------------------------------*/
3872 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3874 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3877 uint8_t *pucEndOfStack;
3878 UBaseType_t uxReturn;
3880 pxTCB = prvGetTCBFromHandle( xTask );
3882 #if portSTACK_GROWTH < 0
3884 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3888 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3892 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
3897 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3898 /*-----------------------------------------------------------*/
3900 #if (INCLUDE_pxTaskGetStackStart == 1)
3902 uint8_t* pxTaskGetStackStart( TaskHandle_t xTask)
3907 pxTCB = prvGetTCBFromHandle( xTask );
3908 uxReturn = (uint8_t*)pxTCB->pxStack;
3913 #endif /* INCLUDE_pxTaskGetStackStart */
3914 /*-----------------------------------------------------------*/
3916 #if ( INCLUDE_vTaskDelete == 1 )
3918 static void prvDeleteTCB( TCB_t *pxTCB )
3920 /* This call is required for any port specific cleanup related to task.
3921 It must be above the vPortFree() calls. */
3922 portCLEAN_UP_TCB( pxTCB );
3924 /* Free up the memory allocated by the scheduler for the task. It is up
3925 to the task to free any memory allocated at the application level. */
3926 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3928 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3930 #endif /* configUSE_NEWLIB_REENTRANT */
3932 #if ( portUSING_MPU_WRAPPERS == 1 )
3933 vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
3936 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3938 /* The task can only have been allocated dynamically - free both
3939 the stack and TCB. */
3940 vPortFreeAligned( pxTCB->pxStack );
3943 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
3945 /* The task could have been allocated statically or dynamically, so
3946 check what was statically allocated before trying to free the
3948 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3950 /* Both the stack and TCB were allocated dynamically, so both
3952 vPortFreeAligned( pxTCB->pxStack );
3955 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3957 /* Only the stack was statically allocated, so the TCB is the
3958 only memory that must be freed. */
3963 /* Neither the stack nor the TCB were allocated dynamically, so
3964 nothing needs to be freed. */
3965 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
3966 mtCOVERAGE_TEST_MARKER();
3969 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3972 #endif /* INCLUDE_vTaskDelete */
3973 /*-----------------------------------------------------------*/
3975 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3977 static void prvDeleteTLS( TCB_t *pxTCB )
3979 configASSERT( pxTCB );
3980 for( int x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
3982 if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL) //If del cb is set
3984 pxTCB->pvThreadLocalStoragePointersDelCallback[ x ](x, pxTCB->pvThreadLocalStoragePointers[ x ]); //Call del cb
3989 #endif /* ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) */
3990 /*-----------------------------------------------------------*/
3992 static void prvResetNextTaskUnblockTime( void )
3996 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3998 /* The new current delayed list is empty. Set
3999 xNextTaskUnblockTime to the maximum possible value so it is
4000 extremely unlikely that the
4001 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
4002 there is an item in the delayed list. */
4003 xNextTaskUnblockTime = portMAX_DELAY;
4007 /* The new current delayed list is not empty, get the value of
4008 the item at the head of the delayed list. This is the time at
4009 which the task at the head of the delayed list should be removed
4010 from the Blocked state. */
4011 ( pxTCB ) = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
4012 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xGenericListItem ) );
4015 /*-----------------------------------------------------------*/
4017 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
4019 TaskHandle_t xTaskGetCurrentTaskHandle( void )
4021 TaskHandle_t xReturn;
4024 state = portENTER_CRITICAL_NESTED();
4025 xReturn = pxCurrentTCB[ xPortGetCoreID() ];
4026 portEXIT_CRITICAL_NESTED(state);
4031 TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid )
4033 TaskHandle_t xReturn=NULL;
4035 //Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock.
4036 if (cpuid < portNUM_PROCESSORS) {
4037 xReturn = pxCurrentTCB[ cpuid ];
4044 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
4045 /*-----------------------------------------------------------*/
4047 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
4049 BaseType_t xTaskGetSchedulerState( void )
4054 state = portENTER_CRITICAL_NESTED();
4055 if( xSchedulerRunning == pdFALSE )
4057 xReturn = taskSCHEDULER_NOT_STARTED;
4061 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
4063 xReturn = taskSCHEDULER_RUNNING;
4067 xReturn = taskSCHEDULER_SUSPENDED;
4070 portEXIT_CRITICAL_NESTED(state);
4075 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4076 /*-----------------------------------------------------------*/
4078 #if ( configUSE_MUTEXES == 1 )
4080 void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4082 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
4084 taskENTER_CRITICAL(&xTickCountMutex);
4085 /* If the mutex was given back by an interrupt while the queue was
4086 locked then the mutex holder might now be NULL. */
4087 if( pxMutexHolder != NULL )
4089 if( pxTCB->uxPriority < pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4091 taskENTER_CRITICAL(&xTaskQueueMutex);
4092 /* Adjust the mutex holder state to account for its new
4093 priority. Only reset the event list item value if the value is
4094 not being used for anything else. */
4095 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4097 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4101 mtCOVERAGE_TEST_MARKER();
4104 /* If the task being modified is in the ready state it will need to
4105 be moved into a new list. */
4106 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxTCB->uxPriority ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
4108 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4110 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4114 mtCOVERAGE_TEST_MARKER();
4117 /* Inherit the priority before being moved into the new list. */
4118 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4119 prvReaddTaskToReadyList( pxTCB );
4123 /* Just inherit the priority. */
4124 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4127 taskEXIT_CRITICAL(&xTaskQueueMutex);
4129 traceTASK_PRIORITY_INHERIT( pxTCB, pxCurrentTCB[ xPortGetCoreID() ]->uxPriority );
4133 mtCOVERAGE_TEST_MARKER();
4138 mtCOVERAGE_TEST_MARKER();
4141 taskEXIT_CRITICAL(&xTickCountMutex);
4145 #endif /* configUSE_MUTEXES */
4146 /*-----------------------------------------------------------*/
4148 #if ( configUSE_MUTEXES == 1 )
4150 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4152 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
4153 BaseType_t xReturn = pdFALSE;
4154 taskENTER_CRITICAL(&xTickCountMutex);
4156 if( pxMutexHolder != NULL )
4158 configASSERT( pxTCB->uxMutexesHeld );
4159 ( pxTCB->uxMutexesHeld )--;
4161 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4163 /* Only disinherit if no other mutexes are held. */
4164 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4166 taskENTER_CRITICAL(&xTaskQueueMutex);
4167 /* A task can only have an inhertied priority if it holds
4168 the mutex. If the mutex is held by a task then it cannot be
4169 given from an interrupt, and if a mutex is given by the
4170 holding task then it must be the running state task. Remove
4171 the holding task from the ready list. */
4172 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4174 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4178 mtCOVERAGE_TEST_MARKER();
4181 /* Disinherit the priority before adding the task into the
4183 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4184 pxTCB->uxPriority = pxTCB->uxBasePriority;
4186 /* Reset the event list item value. It cannot be in use for
4187 any other purpose if this task is running, and it must be
4188 running to give back the mutex. */
4189 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4190 prvReaddTaskToReadyList( pxTCB );
4192 /* Return true to indicate that a context switch is required.
4193 This is only actually required in the corner case whereby
4194 multiple mutexes were held and the mutexes were given back
4195 in an order different to that in which they were taken.
4196 If a context switch did not occur when the first mutex was
4197 returned, even if a task was waiting on it, then a context
4198 switch should occur when the last mutex is returned whether
4199 a task is waiting on it or not. */
4201 taskEXIT_CRITICAL(&xTaskQueueMutex);
4205 mtCOVERAGE_TEST_MARKER();
4210 mtCOVERAGE_TEST_MARKER();
4215 mtCOVERAGE_TEST_MARKER();
4218 taskEXIT_CRITICAL(&xTickCountMutex);
4222 #endif /* configUSE_MUTEXES */
4223 /*-----------------------------------------------------------*/
4225 /* For multicore, this assumes the vPortCPUAquireMutex is recursive, that is, it can be called multiple
4226 times and the release call will have to be called as many times for the mux to unlock. */
4228 /* Gotcha (which seems to be deliberate in FreeRTOS, according to
4229 http://www.freertos.org/FreeRTOS_Support_Forum_Archive/December_2012/freertos_PIC32_Bug_-_vTaskEnterCritical_6400806.html
4230 ) is that calling vTaskEnterCritical followed by vTaskExitCritical will leave the interrupts DISABLED when the scheduler
4231 is not running. Re-enabling the scheduler will re-enable the interrupts instead.
4233 For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and portENTER_CRITICAL_ISR.
4236 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4238 #include "portmux_impl.h"
4240 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4241 void vTaskEnterCritical( portMUX_TYPE *mux, const char *function, int line )
4243 void vTaskEnterCritical( portMUX_TYPE *mux )
4246 BaseType_t oldInterruptLevel=0;
4247 BaseType_t schedulerRunning = xSchedulerRunning;
4248 if( schedulerRunning != pdFALSE )
4250 //Interrupts may already be disabled (because we're doing this recursively) but we can't get the interrupt level after
4251 //vPortCPUAquireMutex, because it also may mess with interrupts. Get it here first, then later figure out if we're nesting
4252 //and save for real there.
4253 oldInterruptLevel=portENTER_CRITICAL_NESTED();
4255 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4256 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT, function, line );
4258 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT );
4261 if( schedulerRunning != pdFALSE )
4263 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4264 BaseType_t newNesting = tcb->uxCriticalNesting + 1;
4265 tcb->uxCriticalNesting = newNesting;
4266 if( newNesting == 1 )
4268 //This is the first time we get called. Save original interrupt level.
4269 tcb->uxOldInterruptState = oldInterruptLevel;
4272 /* Original FreeRTOS comment, saved for reference:
4273 This is not the interrupt safe version of the enter critical
4274 function so assert() if it is being called from an interrupt
4275 context. Only API functions that end in "FromISR" can be used in an
4276 interrupt. Only assert if the critical nesting count is 1 to
4277 protect against recursive calls if the assert function also uses a
4278 critical section. */
4280 /* DISABLED in the esp32 port - because of SMP, For ESP32
4281 FreeRTOS, vTaskEnterCritical implements both
4282 portENTER_CRITICAL and portENTER_CRITICAL_ISR. vTaskEnterCritical
4283 has to be used in way more places than before, and some are called
4284 both from ISR as well as non-ISR code, thus we re-organized
4285 vTaskEnterCritical to also work in ISRs. */
4287 if( newNesting == 1 )
4289 portASSERT_IF_IN_ISR();
4296 mtCOVERAGE_TEST_MARKER();
4300 #endif /* portCRITICAL_NESTING_IN_TCB */
4301 /*-----------------------------------------------------------*/
4305 For ESP32 FreeRTOS, vTaskExitCritical implements both portEXIT_CRITICAL and portEXIT_CRITICAL_ISR.
4307 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4309 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4310 void vTaskExitCritical( portMUX_TYPE *mux, const char *function, int line )
4312 void vTaskExitCritical( portMUX_TYPE *mux )
4315 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4316 vPortCPUReleaseMutexIntsDisabled( mux, function, line );
4318 vPortCPUReleaseMutexIntsDisabled( mux );
4320 if( xSchedulerRunning != pdFALSE )
4322 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4323 BaseType_t nesting = tcb->uxCriticalNesting;
4327 tcb->uxCriticalNesting = nesting;
4331 portEXIT_CRITICAL_NESTED(tcb->uxOldInterruptState);
4335 mtCOVERAGE_TEST_MARKER();
4340 mtCOVERAGE_TEST_MARKER();
4345 mtCOVERAGE_TEST_MARKER();
4349 #endif /* portCRITICAL_NESTING_IN_TCB */
4350 /*-----------------------------------------------------------*/
4352 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4354 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4358 /* Start by copying the entire string. */
4359 strcpy( pcBuffer, pcTaskName );
4361 /* Pad the end of the string with spaces to ensure columns line up when
4363 for( x = strlen( pcBuffer ); x < ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4365 pcBuffer[ x ] = ' ';
4369 pcBuffer[ x ] = 0x00;
4371 /* Return the new end of string. */
4372 return &( pcBuffer[ x ] );
4375 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4376 /*-----------------------------------------------------------*/
4378 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4380 void vTaskList( char * pcWriteBuffer )
4382 TaskStatus_t *pxTaskStatusArray;
4383 volatile UBaseType_t uxArraySize, x;
4389 * This function is provided for convenience only, and is used by many
4390 * of the demo applications. Do not consider it to be part of the
4393 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4394 * uxTaskGetSystemState() output into a human readable table that
4395 * displays task names, states and stack usage.
4397 * vTaskList() has a dependency on the sprintf() C library function that
4398 * might bloat the code size, use a lot of stack, and provide different
4399 * results on different platforms. An alternative, tiny, third party,
4400 * and limited functionality implementation of sprintf() is provided in
4401 * many of the FreeRTOS/Demo sub-directories in a file called
4402 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4403 * snprintf() implementation!).
4405 * It is recommended that production systems call uxTaskGetSystemState()
4406 * directly to get access to raw stats data, rather than indirectly
4407 * through a call to vTaskList().
4411 /* Make sure the write buffer does not contain a string. */
4412 *pcWriteBuffer = 0x00;
4414 /* Take a snapshot of the number of tasks in case it changes while this
4415 function is executing. */
4416 uxArraySize = uxCurrentNumberOfTasks;
4418 /* Allocate an array index for each task. NOTE! if
4419 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4421 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4423 if( pxTaskStatusArray != NULL )
4425 /* Generate the (binary) data. */
4426 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
4428 /* Create a human readable table from the binary data. */
4429 for( x = 0; x < uxArraySize; x++ )
4431 switch( pxTaskStatusArray[ x ].eCurrentState )
4433 case eReady: cStatus = tskREADY_CHAR;
4436 case eBlocked: cStatus = tskBLOCKED_CHAR;
4439 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4442 case eDeleted: cStatus = tskDELETED_CHAR;
4445 default: /* Should not get here, but it is included
4446 to prevent static checking errors. */
4451 /* Write the task name to the string, padding with spaces so it
4452 can be printed in tabular form more easily. */
4453 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4455 /* Write the rest of the string. */
4456 #if configTASKLIST_INCLUDE_COREID
4457 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\t%hd\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber, ( int ) pxTaskStatusArray[ x ].xCoreID );
4459 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
4461 pcWriteBuffer += strlen( pcWriteBuffer );
4464 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4465 is 0 then vPortFree() will be #defined to nothing. */
4466 vPortFree( pxTaskStatusArray );
4470 mtCOVERAGE_TEST_MARKER();
4474 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4475 /*----------------------------------------------------------*/
4477 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4479 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4481 TaskStatus_t *pxTaskStatusArray;
4482 volatile UBaseType_t uxArraySize, x;
4483 uint32_t ulTotalTime, ulStatsAsPercentage;
4485 #if( configUSE_TRACE_FACILITY != 1 )
4487 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4494 * This function is provided for convenience only, and is used by many
4495 * of the demo applications. Do not consider it to be part of the
4498 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4499 * of the uxTaskGetSystemState() output into a human readable table that
4500 * displays the amount of time each task has spent in the Running state
4501 * in both absolute and percentage terms.
4503 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4504 * function that might bloat the code size, use a lot of stack, and
4505 * provide different results on different platforms. An alternative,
4506 * tiny, third party, and limited functionality implementation of
4507 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4508 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4509 * a full snprintf() implementation!).
4511 * It is recommended that production systems call uxTaskGetSystemState()
4512 * directly to get access to raw stats data, rather than indirectly
4513 * through a call to vTaskGetRunTimeStats().
4516 /* Make sure the write buffer does not contain a string. */
4517 *pcWriteBuffer = 0x00;
4519 /* Take a snapshot of the number of tasks in case it changes while this
4520 function is executing. */
4521 uxArraySize = uxCurrentNumberOfTasks;
4523 /* Allocate an array index for each task. NOTE! If
4524 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4526 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4528 if( pxTaskStatusArray != NULL )
4530 /* Generate the (binary) data. */
4531 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4533 /* For percentage calculations. */
4534 ulTotalTime /= 100UL;
4536 /* Avoid divide by zero errors. */
4537 if( ulTotalTime > 0 )
4539 /* Create a human readable table from the binary data. */
4540 for( x = 0; x < uxArraySize; x++ )
4542 /* What percentage of the total run time has the task used?
4543 This will always be rounded down to the nearest integer.
4544 ulTotalRunTimeDiv100 has already been divided by 100. */
4545 /* Also need to consider total run time of all */
4546 ulStatsAsPercentage = (pxTaskStatusArray[ x ].ulRunTimeCounter/portNUM_PROCESSORS)/ ulTotalTime;
4548 /* Write the task name to the string, padding with
4549 spaces so it can be printed in tabular form more
4551 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4553 if( ulStatsAsPercentage > 0UL )
4555 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4557 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4561 /* sizeof( int ) == sizeof( long ) so a smaller
4562 printf() library can be used. */
4563 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage );
4569 /* If the percentage is zero here then the task has
4570 consumed less than 1% of the total run time. */
4571 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4573 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4577 /* sizeof( int ) == sizeof( long ) so a smaller
4578 printf() library can be used. */
4579 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
4584 pcWriteBuffer += strlen( pcWriteBuffer );
4589 mtCOVERAGE_TEST_MARKER();
4592 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4593 is 0 then vPortFree() will be #defined to nothing. */
4594 vPortFree( pxTaskStatusArray );
4598 mtCOVERAGE_TEST_MARKER();
4602 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4603 /*-----------------------------------------------------------*/
4605 TickType_t uxTaskResetEventItemValue( void )
4607 TickType_t uxReturn;
4608 taskENTER_CRITICAL(&xTaskQueueMutex);
4609 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
4611 /* Reset the event list item to its normal value - so it can be used with
4612 queues and semaphores. */
4613 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4614 taskEXIT_CRITICAL(&xTaskQueueMutex);
4618 /*-----------------------------------------------------------*/
4620 #if ( configUSE_MUTEXES == 1 )
4622 void *pvTaskIncrementMutexHeldCount( void )
4626 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4627 then pxCurrentTCB will be NULL. */
4628 taskENTER_CRITICAL(&xTaskQueueMutex);
4629 if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
4631 ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
4633 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
4634 taskEXIT_CRITICAL(&xTaskQueueMutex);
4639 #endif /* configUSE_MUTEXES */
4640 /*-----------------------------------------------------------*/
4642 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4644 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4646 TickType_t xTimeToWake;
4649 taskENTER_CRITICAL(&xTaskQueueMutex);
4651 /* Only block if the notification count is not already non-zero. */
4652 if( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue == 0UL )
4654 /* Mark this task as waiting for a notification. */
4655 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4657 if( xTicksToWait > ( TickType_t ) 0 )
4659 /* The task is going to block. First it must be removed
4660 from the ready list. */
4661 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4663 /* The current task must be in a ready list, so there is
4664 no need to check, and the port reset macro can be called
4666 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4670 mtCOVERAGE_TEST_MARKER();
4673 #if ( INCLUDE_vTaskSuspend == 1 )
4675 if( xTicksToWait == portMAX_DELAY )
4677 /* Add the task to the suspended task list instead
4678 of a delayed task list to ensure the task is not
4679 woken by a timing event. It will block
4681 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4682 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4686 /* Calculate the time at which the task should be
4687 woken if no notification events occur. This may
4688 overflow but this doesn't matter, the scheduler will
4690 xTimeToWake = xTickCount + xTicksToWait;
4691 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4694 #else /* INCLUDE_vTaskSuspend */
4696 /* Calculate the time at which the task should be
4697 woken if the event does not occur. This may
4698 overflow but this doesn't matter, the scheduler will
4700 xTimeToWake = xTickCount + xTicksToWait;
4701 prvAddCurrentTaskToDelayedList( xTimeToWake );
4703 #endif /* INCLUDE_vTaskSuspend */
4705 /* All ports are written to allow a yield in a critical
4706 section (some will yield immediately, others wait until the
4707 critical section exits) - but it is not something that
4708 application code should ever do. */
4709 portYIELD_WITHIN_API();
4713 mtCOVERAGE_TEST_MARKER();
4718 mtCOVERAGE_TEST_MARKER();
4721 taskEXIT_CRITICAL(&xTaskQueueMutex);
4723 taskENTER_CRITICAL(&xTaskQueueMutex);
4725 ulReturn = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4727 if( ulReturn != 0UL )
4729 if( xClearCountOnExit != pdFALSE )
4731 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue = 0UL;
4735 ( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue )--;
4740 mtCOVERAGE_TEST_MARKER();
4743 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4745 taskEXIT_CRITICAL(&xTaskQueueMutex);
4750 #endif /* configUSE_TASK_NOTIFICATIONS */
4751 /*-----------------------------------------------------------*/
4753 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4755 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4757 TickType_t xTimeToWake;
4760 taskENTER_CRITICAL(&xTaskQueueMutex);
4762 /* Only block if a notification is not already pending. */
4763 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState != eNotified )
4765 /* Clear bits in the task's notification value as bits may get
4766 set by the notifying task or interrupt. This can be used to
4767 clear the value to zero. */
4768 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnEntry;
4770 /* Mark this task as waiting for a notification. */
4771 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4773 if( xTicksToWait > ( TickType_t ) 0 )
4775 /* The task is going to block. First it must be removed
4776 from the ready list. */
4777 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4779 /* The current task must be in a ready list, so there is
4780 no need to check, and the port reset macro can be called
4782 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4786 mtCOVERAGE_TEST_MARKER();
4789 #if ( INCLUDE_vTaskSuspend == 1 )
4791 if( xTicksToWait == portMAX_DELAY )
4793 /* Add the task to the suspended task list instead
4794 of a delayed task list to ensure the task is not
4795 woken by a timing event. It will block
4797 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4798 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4802 /* Calculate the time at which the task should be
4803 woken if no notification events occur. This may
4804 overflow but this doesn't matter, the scheduler will
4806 xTimeToWake = xTickCount + xTicksToWait;
4807 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4810 #else /* INCLUDE_vTaskSuspend */
4812 /* Calculate the time at which the task should be
4813 woken if the event does not occur. This may
4814 overflow but this doesn't matter, the scheduler will
4816 xTimeToWake = xTickCount + xTicksToWait;
4817 prvAddCurrentTaskToDelayedList( xTimeToWake );
4819 #endif /* INCLUDE_vTaskSuspend */
4821 /* All ports are written to allow a yield in a critical
4822 section (some will yield immediately, others wait until the
4823 critical section exits) - but it is not something that
4824 application code should ever do. */
4825 portYIELD_WITHIN_API();
4829 mtCOVERAGE_TEST_MARKER();
4834 mtCOVERAGE_TEST_MARKER();
4837 taskEXIT_CRITICAL(&xTaskQueueMutex);
4839 taskENTER_CRITICAL(&xTaskQueueMutex);
4841 if( pulNotificationValue != NULL )
4843 /* Output the current notification value, which may or may not
4845 *pulNotificationValue = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4848 /* If eNotifyValue is set then either the task never entered the
4849 blocked state (because a notification was already pending) or the
4850 task unblocked because of a notification. Otherwise the task
4851 unblocked because of a timeout. */
4852 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState == eWaitingNotification )
4854 /* A notification was not received. */
4859 /* A notification was already pending or a notification was
4860 received while the task was waiting. */
4861 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnExit;
4865 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4867 taskEXIT_CRITICAL(&xTaskQueueMutex);
4872 #endif /* configUSE_TASK_NOTIFICATIONS */
4873 /*-----------------------------------------------------------*/
4875 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4877 BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction )
4880 eNotifyValue eOriginalNotifyState;
4881 BaseType_t xReturn = pdPASS;
4883 configASSERT( xTaskToNotify );
4884 pxTCB = ( TCB_t * ) xTaskToNotify;
4886 taskENTER_CRITICAL(&xTaskQueueMutex);
4888 eOriginalNotifyState = pxTCB->eNotifyState;
4890 pxTCB->eNotifyState = eNotified;
4895 pxTCB->ulNotifiedValue |= ulValue;
4899 ( pxTCB->ulNotifiedValue )++;
4902 case eSetValueWithOverwrite :
4903 pxTCB->ulNotifiedValue = ulValue;
4906 case eSetValueWithoutOverwrite :
4907 if( eOriginalNotifyState != eNotified )
4909 pxTCB->ulNotifiedValue = ulValue;
4913 /* The value could not be written to the task. */
4919 /* The task is being notified without its notify value being
4925 /* If the task is in the blocked state specifically to wait for a
4926 notification then unblock it now. */
4927 if( eOriginalNotifyState == eWaitingNotification )
4929 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
4930 prvAddTaskToReadyList( pxTCB );
4932 /* The task should not have been on an event list. */
4933 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4935 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4937 /* The notified task has a priority above the currently
4938 executing task so a yield is required. */
4939 portYIELD_WITHIN_API();
4941 else if ( pxTCB->xCoreID != xPortGetCoreID() )
4943 taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
4947 mtCOVERAGE_TEST_MARKER();
4952 mtCOVERAGE_TEST_MARKER();
4955 taskEXIT_CRITICAL(&xTaskQueueMutex);
4960 #endif /* configUSE_TASK_NOTIFICATIONS */
4961 /*-----------------------------------------------------------*/
4963 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4965 BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken )
4968 eNotifyValue eOriginalNotifyState;
4969 BaseType_t xReturn = pdPASS;
4971 configASSERT( xTaskToNotify );
4973 pxTCB = ( TCB_t * ) xTaskToNotify;
4975 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
4978 eOriginalNotifyState = pxTCB->eNotifyState;
4980 pxTCB->eNotifyState = eNotified;
4985 pxTCB->ulNotifiedValue |= ulValue;
4989 ( pxTCB->ulNotifiedValue )++;
4992 case eSetValueWithOverwrite :
4993 pxTCB->ulNotifiedValue = ulValue;
4996 case eSetValueWithoutOverwrite :
4997 if( eOriginalNotifyState != eNotified )
4999 pxTCB->ulNotifiedValue = ulValue;
5003 /* The value could not be written to the task. */
5009 /* The task is being notified without its notify value being
5015 /* If the task is in the blocked state specifically to wait for a
5016 notification then unblock it now. */
5017 if( eOriginalNotifyState == eWaitingNotification )
5019 /* The task should not have been on an event list. */
5020 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5022 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
5024 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
5025 prvAddTaskToReadyList( pxTCB );
5029 /* The delayed and ready lists cannot be accessed, so hold
5030 this task pending until the scheduler is resumed. */
5031 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
5034 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5036 /* The notified task has a priority above the currently
5037 executing task so a yield is required. */
5038 if( pxHigherPriorityTaskWoken != NULL )
5040 *pxHigherPriorityTaskWoken = pdTRUE;
5043 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5045 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5049 mtCOVERAGE_TEST_MARKER();
5053 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5058 #endif /* configUSE_TASK_NOTIFICATIONS */
5059 /*-----------------------------------------------------------*/
5061 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5063 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
5066 eNotifyValue eOriginalNotifyState;
5068 configASSERT( xTaskToNotify );
5071 pxTCB = ( TCB_t * ) xTaskToNotify;
5073 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
5075 eOriginalNotifyState = pxTCB->eNotifyState;
5076 pxTCB->eNotifyState = eNotified;
5078 /* 'Giving' is equivalent to incrementing a count in a counting
5080 ( pxTCB->ulNotifiedValue )++;
5082 /* If the task is in the blocked state specifically to wait for a
5083 notification then unblock it now. */
5084 if( eOriginalNotifyState == eWaitingNotification )
5086 /* The task should not have been on an event list. */
5087 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5089 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
5091 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
5092 prvAddTaskToReadyList( pxTCB );
5096 /* The delayed and ready lists cannot be accessed, so hold
5097 this task pending until the scheduler is resumed. */
5098 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
5101 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5103 /* The notified task has a priority above the currently
5104 executing task so a yield is required. */
5105 if( pxHigherPriorityTaskWoken != NULL )
5107 *pxHigherPriorityTaskWoken = pdTRUE;
5110 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5112 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5116 mtCOVERAGE_TEST_MARKER();
5120 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5123 #endif /* configUSE_TASK_NOTIFICATIONS */
5125 #if ( configENABLE_TASK_SNAPSHOT == 1 )
5126 static void prvTaskGetSnapshot( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, TCB_t *pxTCB )
5128 if (pxTCB == NULL) {
5131 pxTaskSnapshotArray[ *uxTask ].pxTCB = pxTCB;
5132 pxTaskSnapshotArray[ *uxTask ].pxTopOfStack = (StackType_t *)pxTCB->pxTopOfStack;
5133 #if( portSTACK_GROWTH < 0 )
5135 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxEndOfStack;
5139 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxStack;
5145 static void prvTaskGetSnapshotsFromList( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, const UBaseType_t uxArraySize, List_t *pxList )
5147 TCB_t *pxNextTCB, *pxFirstTCB;
5149 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
5151 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
5154 if( *uxTask >= uxArraySize )
5157 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
5158 prvTaskGetSnapshot( pxTaskSnapshotArray, uxTask, pxNextTCB );
5159 } while( pxNextTCB != pxFirstTCB );
5163 mtCOVERAGE_TEST_MARKER();
5167 UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray, const UBaseType_t uxArraySize, UBaseType_t * const pxTcbSz )
5169 UBaseType_t uxTask = 0, i = 0;
5172 *pxTcbSz = sizeof(TCB_t);
5173 /* Fill in an TaskStatus_t structure with information on each
5174 task in the Ready state. */
5175 i = configMAX_PRIORITIES;
5179 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( pxReadyTasksLists[ i ] ) );
5180 } while( i > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
5182 /* Fill in an TaskStatus_t structure with information on each
5183 task in the Blocked state. */
5184 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxDelayedTaskList );
5185 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxOverflowDelayedTaskList );
5186 for (i = 0; i < portNUM_PROCESSORS; i++) {
5187 if( uxTask >= uxArraySize )
5189 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( xPendingReadyList[ i ] ) );
5192 #if( INCLUDE_vTaskDelete == 1 )
5194 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xTasksWaitingTermination );
5198 #if ( INCLUDE_vTaskSuspend == 1 )
5200 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xSuspendedTaskList );
5208 #ifdef FREERTOS_MODULE_TEST
5209 #include "tasks_test_access_functions.h"