2 FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
7 This file is part of the FreeRTOS distribution.
9 FreeRTOS is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License (version 2) as published by the
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
13 ***************************************************************************
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
15 >>! distribute a combined work that includes FreeRTOS without being !<<
16 >>! obliged to provide the source code for proprietary components !<<
17 >>! outside of the FreeRTOS kernel. !<<
18 ***************************************************************************
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
23 link: http://www.freertos.org/a00114.html
25 ***************************************************************************
27 * FreeRTOS provides completely free yet professionally developed, *
28 * robust, strictly quality controlled, supported, and cross *
29 * platform software that is more than just the market leader, it *
30 * is the industry's de facto standard. *
32 * Help yourself get started quickly while simultaneously helping *
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
34 * tutorial book, reference manual, or both: *
35 * http://www.FreeRTOS.org/Documentation *
37 ***************************************************************************
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
40 the FAQ page "My application does not run, what could be wrong?". Have you
41 defined configASSERT()?
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
44 embedded software for free we request you assist our global community by
45 participating in the support forum.
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
48 be as productive as possible as early as possible. Now you can receive
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
50 Ltd, and the world's leading authority on the world's leading RTOS.
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
61 licenses offer ticketed support, indemnification and commercial middleware.
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
64 engineered and independently SIL3 certified version for use in safety and
65 mission critical applications that require provable dependability.
70 /* Standard includes. */
74 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
75 all the API functions to use the MPU wrappers. That should only be done when
76 task.h is included from an application file. */
77 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
79 #include "rom/ets_sys.h"
80 #include "esp_newlib.h"
81 #include "esp_panic.h"
83 /* FreeRTOS includes. */
87 #include "StackMacros.h"
88 #include "portmacro.h"
91 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
92 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
93 header files above, but not in this file, in order to generate the correct
94 privileged Vs unprivileged linkage and placement. */
95 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
97 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
98 functions but without including stdio.h here. */
99 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
100 /* At the bottom of this file are two optional functions that can be used
101 to generate human readable text from the raw data generated by the
102 uxTaskGetSystemState() function. Note the formatting functions are provided
103 for convenience only, and are NOT considered part of the kernel. */
105 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
107 /* Sanity check the configuration. */
108 #if configUSE_TICKLESS_IDLE != 0
109 #if INCLUDE_vTaskSuspend != 1
110 #error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0
111 #endif /* INCLUDE_vTaskSuspend */
112 #endif /* configUSE_TICKLESS_IDLE */
115 * Defines the size, in bytes, of the stack allocated to the idle task.
117 #define tskIDLE_STACK_SIZE configIDLE_TASK_STACK_SIZE
119 #if( configUSE_PREEMPTION == 0 )
120 /* If the cooperative scheduler is being used then a yield should not be
121 performed just because a higher priority task has been woken. */
122 #define taskYIELD_IF_USING_PREEMPTION()
124 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
130 /* Value that can be assigned to the eNotifyState member of the TCB. */
133 eNotWaitingNotification = 0,
134 eWaitingNotification,
138 /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using
139 dynamically allocated RAM, in which case when any task is deleted it is known
140 that both the task's stack and TCB need to be freed. Sometimes the
141 FreeRTOSConfig.h settings only allow a task to be created using statically
142 allocated RAM, in which case when any task is deleted it is known that neither
143 the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h
144 settings allow a task to be created using either statically or dynamically
145 allocated RAM, in which case a member of the TCB is used to record whether the
146 stack and/or TCB were allocated statically or dynamically, so when a task is
147 deleted the RAM that was allocated dynamically is freed again and no attempt is
148 made to free the RAM that was allocated statically.
149 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a
150 task to be created using either statically or dynamically allocated RAM. Note
151 that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with
152 a statically allocated stack and a dynamically allocated TCB. */
153 #define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) || ( portUSING_MPU_WRAPPERS == 1 ) )
154 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
155 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
156 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
159 * Task control block. A task control block (TCB) is allocated for each task,
160 * and stores task state information, including a pointer to the task's context
161 * (the task's run time environment, including register values)
163 typedef struct tskTaskControlBlock
165 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
167 #if ( portUSING_MPU_WRAPPERS == 1 )
168 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
171 ListItem_t xGenericListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
172 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
173 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
174 StackType_t *pxStack; /*< Points to the start of the stack. */
175 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
176 BaseType_t xCoreID; /*< Core this task is pinned to */
177 /* If this moves around (other than pcTaskName size changes), please change the define in xtensa_vectors.S as well. */
178 #if ( portSTACK_GROWTH > 0 || configENABLE_TASK_SNAPSHOT == 1 )
179 StackType_t *pxEndOfStack; /*< Points to the end of the stack on architectures where the stack grows up from low memory. */
182 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
183 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
184 uint32_t uxOldInterruptState; /*< Interrupt state before the outer taskEnterCritical was called */
187 #if ( configUSE_TRACE_FACILITY == 1 )
188 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
189 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
192 #if ( configUSE_MUTEXES == 1 )
193 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
194 UBaseType_t uxMutexesHeld;
197 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
198 TaskHookFunction_t pxTaskTag;
201 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
202 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
203 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
204 TlsDeleteCallbackFunction_t pvThreadLocalStoragePointersDelCallback[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
208 #if ( configGENERATE_RUN_TIME_STATS == 1 )
209 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
212 #if ( configUSE_NEWLIB_REENTRANT == 1 )
213 /* Allocate a Newlib reent structure that is specific to this task.
214 Note Newlib support has been included by popular demand, but is not
215 used by the FreeRTOS maintainers themselves. FreeRTOS is not
216 responsible for resulting newlib operation. User must be familiar with
217 newlib and must provide system-wide implementations of the necessary
218 stubs. Be warned that (at the time of writing) the current newlib design
219 implements a system-wide malloc() that must be provided with locks. */
220 struct _reent xNewLib_reent;
223 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
224 volatile uint32_t ulNotifiedValue;
225 volatile eNotifyValue eNotifyState;
228 /* See the comments above the definition of
229 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
230 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
231 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
236 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
237 below to enable the use of older kernel aware debuggers. */
238 typedef tskTCB TCB_t;
240 #if __GNUC_PREREQ(4, 6)
241 _Static_assert(sizeof(StaticTask_t) == sizeof(TCB_t), "StaticTask_t != TCB_t");
245 * Some kernel aware debuggers require the data the debugger needs access to to
246 * be global, rather than file scope.
248 #ifdef portREMOVE_STATIC_QUALIFIER
252 /*lint -e956 A manual analysis and inspection has been used to determine which
253 static variables must be declared volatile. */
255 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB[ portNUM_PROCESSORS ] = { NULL };
257 /* Lists for ready and blocked tasks. --------------------*/
258 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
259 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
260 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
261 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
262 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
263 PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
265 #if ( INCLUDE_vTaskDelete == 1 )
267 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. Protected by xTaskQueueMutex.*/
268 PRIVILEGED_DATA static volatile UBaseType_t uxTasksDeleted = ( UBaseType_t ) 0U;
272 #if ( INCLUDE_vTaskSuspend == 1 )
274 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
278 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
280 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[portNUM_PROCESSORS] = {NULL}; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
284 /* Other file private variables. --------------------------------*/
285 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
286 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) 0U;
287 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
288 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
289 PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
290 PRIVILEGED_DATA static volatile BaseType_t xYieldPending[portNUM_PROCESSORS] = {pdFALSE};
291 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
292 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
293 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = portMAX_DELAY;
295 /* Context switches are held pending while the scheduler is suspended. Also,
296 interrupts must not manipulate the xGenericListItem of a TCB, or any of the
297 lists the xGenericListItem can be referenced from, if the scheduler is suspended.
298 If an interrupt needs to unblock a task while the scheduler is suspended then it
299 moves the task's event list item into the xPendingReadyList, ready for the
300 kernel to move the task from the pending ready list into the real ready list
301 when the scheduler is unsuspended. The pending ready list itself can only be
302 accessed from a critical section. */
303 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
305 /* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
306 that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
307 PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
308 PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
310 #if ( configGENERATE_RUN_TIME_STATS == 1 )
312 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime[portNUM_PROCESSORS] = {0U}; /*< Holds the value of a timer/counter the last time a task was switched in on a particular core. */
313 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
318 // per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules
319 // in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting
320 // for locks to be free or for host to read full trace buffer
321 PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ portNUM_PROCESSORS ] = { pdFALSE };
325 /* Debugging and trace facilities private variables and macros. ------------*/
328 * The value used to fill the stack of a task when the task is created. This
329 * is used purely for checking the high water mark for tasks.
331 #define tskSTACK_FILL_BYTE ( 0xa5U )
334 * Macros used by vListTask to indicate which state a task is in.
336 #define tskBLOCKED_CHAR ( 'B' )
337 #define tskREADY_CHAR ( 'R' )
338 #define tskDELETED_CHAR ( 'D' )
339 #define tskSUSPENDED_CHAR ( 'S' )
341 /*-----------------------------------------------------------*/
344 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
346 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
347 performed in a generic way that is not optimised to any particular
348 microcontroller architecture. */
350 /* uxTopReadyPriority holds the priority of the highest priority ready
352 #define taskRECORD_READY_PRIORITY( uxPriority ) \
354 if( ( uxPriority ) > uxTopReadyPriority ) \
356 uxTopReadyPriority = ( uxPriority ); \
358 } /* taskRECORD_READY_PRIORITY */
360 /*-----------------------------------------------------------*/
362 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
364 /* Find the highest priority queue that contains ready tasks. */ \
365 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopReadyPriority ] ) ) ) \
367 configASSERT( uxTopReadyPriority ); \
368 --uxTopReadyPriority; \
371 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
372 the same priority get an equal share of the processor time. */ \
373 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopReadyPriority ] ) ); \
374 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
376 /*-----------------------------------------------------------*/
378 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
379 they are only required when a port optimised method of task selection is
381 #define taskRESET_READY_PRIORITY( uxPriority )
382 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
384 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
386 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
387 performed in a way that is tailored to the particular microcontroller
388 architecture being used. */
390 /* A port optimised version is provided. Call the port defined macros. */
391 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
393 /*-----------------------------------------------------------*/
395 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
397 UBaseType_t uxTopPriority; \
399 /* Find the highest priority queue that contains ready tasks. */ \
400 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
401 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
402 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopPriority ] ) ); \
403 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
405 /*-----------------------------------------------------------*/
407 /* A port optimised version is provided, call it only if the TCB being reset
408 is being referenced from a ready list. If it is referenced from a delayed
409 or suspended list then it won't be in a ready list. */
410 #define taskRESET_READY_PRIORITY( uxPriority ) \
412 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
414 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
418 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
420 /*-----------------------------------------------------------*/
422 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
424 #define taskSWITCH_DELAYED_LISTS() \
428 /* The delayed tasks list should be empty when the lists are switched. */ \
429 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
431 pxTemp = pxDelayedTaskList; \
432 pxDelayedTaskList = pxOverflowDelayedTaskList; \
433 pxOverflowDelayedTaskList = pxTemp; \
435 prvResetNextTaskUnblockTime(); \
438 /*-----------------------------------------------------------*/
441 * Place the task represented by pxTCB into the appropriate ready list for
442 * the task. It is inserted at the end of the list.
444 #define prvAddTaskToReadyList( pxTCB ) \
445 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
446 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
447 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
449 * Place the task represented by pxTCB which has been in a ready list before
450 * into the appropriate ready list for the task.
451 * It is inserted at the end of the list.
453 #define prvReaddTaskToReadyList( pxTCB ) \
454 traceREADDED_TASK_TO_READY_STATE( pxTCB ); \
455 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
456 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
457 /*-----------------------------------------------------------*/
459 #define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
462 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
463 * where NULL is used to indicate that the handle of the currently executing
464 * task should be used in place of the parameter. This macro simply checks to
465 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
467 /* ToDo: See if this still works for multicore. */
468 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? ( TCB_t * ) xTaskGetCurrentTaskHandle() : ( TCB_t * ) ( pxHandle ) )
470 /* The item value of the event list item is normally used to hold the priority
471 of the task to which it belongs (coded to allow it to be held in reverse
472 priority order). However, it is occasionally borrowed for other purposes. It
473 is important its value is not updated due to a task priority change while it is
474 being used for another purpose. The following bit definition is used to inform
475 the scheduler that the value should not be changed - in which case it is the
476 responsibility of whichever module is using the value to ensure it gets set back
477 to its original value when it is released. */
478 #if configUSE_16_BIT_TICKS == 1
479 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
481 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
484 /* Callback function prototypes. --------------------------*/
485 #if configCHECK_FOR_STACK_OVERFLOW > 0
486 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
489 #if configUSE_TICK_HOOK > 0
490 extern void vApplicationTickHook( void );
492 extern void esp_vApplicationTickHook( void );
494 #if portFIRST_TASK_HOOK
495 extern void vPortFirstTaskHook(TaskFunction_t taskfn);
499 /* File private functions. --------------------------------*/
502 * Utility task that simply returns pdTRUE if the task referenced by xTask is
503 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
504 * is in any other state.
506 * Caller must hold xTaskQueueMutex before calling this function.
508 #if ( INCLUDE_vTaskSuspend == 1 )
509 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
510 #endif /* INCLUDE_vTaskSuspend */
513 * Utility to ready all the lists used by the scheduler. This is called
514 * automatically upon the creation of the first task.
516 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
519 * The idle task, which as all tasks is implemented as a never ending loop.
520 * The idle task is automatically created and added to the ready lists upon
521 * creation of the first user task.
523 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
524 * language extensions. The equivalent prototype for this function is:
526 * void prvIdleTask( void *pvParameters );
529 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
532 * Utility to free all memory allocated by the scheduler to hold a TCB,
533 * including the stack pointed to by the TCB.
535 * This does not free memory allocated by the task itself (i.e. memory
536 * allocated by calls to pvPortMalloc from within the tasks application code).
538 #if ( INCLUDE_vTaskDelete == 1 )
540 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
544 //Function to call the Thread Local Storage Pointer Deletion Callbacks. Will be
545 //called during task deletion before prvDeleteTCB is called.
546 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
547 static void prvDeleteTLS( TCB_t *pxTCB );
551 * Used only by the idle task. This checks to see if anything has been placed
552 * in the list of tasks waiting to be deleted. If so the task is cleaned up
553 * and its TCB deleted.
555 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
558 * The currently executing task is entering the Blocked state. Add the task to
559 * either the current or the overflow delayed task list.
561 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTimeToWake ) PRIVILEGED_FUNCTION;
564 * Fills an TaskStatus_t structure with information on each task that is
565 * referenced from the pxList list (which may be a ready list, a delayed list,
566 * a suspended list, etc.).
568 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
569 * NORMAL APPLICATION CODE.
571 #if ( configUSE_TRACE_FACILITY == 1 )
573 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
578 * When a task is created, the stack of the task is filled with a known value.
579 * This function determines the 'high water mark' of the task stack by
580 * determining how much of the stack remains at the original preset value.
582 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
584 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
589 * Return the amount of time, in ticks, that will pass before the kernel will
590 * next move a task from the Blocked state to the Running state.
592 * This conditional compilation should use inequality to 0, not equality to 1.
593 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
594 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
595 * set to a value other than 1.
597 #if ( configUSE_TICKLESS_IDLE != 0 )
599 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
604 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
605 * will exit the Blocked state.
607 static void prvResetNextTaskUnblockTime( void );
609 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
612 * Helper function used to pad task names with spaces when printing out
613 * human readable tables of task information.
615 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName );
620 * Called after a Task_t structure has been allocated either statically or
621 * dynamically to fill in the structure's members.
623 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
624 const char * const pcName,
625 const uint32_t ulStackDepth,
626 void * const pvParameters,
627 UBaseType_t uxPriority,
628 TaskHandle_t * const pxCreatedTask,
630 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
633 * Called after a new task has been created and initialised to place the task
634 * under the control of the scheduler.
636 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
640 /*-----------------------------------------------------------*/
643 * This routine tries to send an interrupt to another core if needed to make it execute a task
644 * of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
645 * other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
646 * is possible that it is inaccurate because the other CPU just did a task switch, but in that case
647 * at most a superfluous interrupt is generated.
649 void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
651 TCB_t *curTCB = pxCurrentTCB[xCoreID];
654 if (xCoreID != tskNO_AFFINITY) {
655 if ( curTCB->uxPriority < uxPriority ) {
656 vPortYieldOtherCore( xCoreID );
661 /* The task has no affinity. See if we can find a CPU to put it on.*/
662 for (i=0; i<portNUM_PROCESSORS; i++) {
663 if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
665 vPortYieldOtherCore( i );
672 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
674 TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode,
675 const char * const pcName,
676 const uint32_t ulStackDepth,
677 void * const pvParameters,
678 UBaseType_t uxPriority,
679 StackType_t * const puxStackBuffer,
680 StaticTask_t * const pxTaskBuffer,
681 const BaseType_t xCoreID )
684 TaskHandle_t xReturn;
686 configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
687 configASSERT( portVALID_STACK_MEM(puxStackBuffer) );
688 configASSERT( (xCoreID>=0 && xCoreID<portNUM_PROCESSORS) || (xCoreID==tskNO_AFFINITY) );
690 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
692 /* The memory used for the task's TCB and stack are passed into this
693 function - use them. */
694 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
695 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
697 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
699 /* Tasks can be created statically or dynamically, so note this
700 task was created statically in case the task is later deleted. */
701 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
703 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
705 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
706 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
716 #endif /* SUPPORT_STATIC_ALLOCATION */
717 /*-----------------------------------------------------------*/
719 #if( portUSING_MPU_WRAPPERS == 1 )
721 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
724 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
726 configASSERT( pxTaskDefinition->puxStackBuffer );
728 if( pxTaskDefinition->puxStackBuffer != NULL )
730 /* Allocate space for the TCB. Where the memory comes from depends
731 on the implementation of the port malloc function and whether or
732 not static allocation is being used. */
733 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
735 if( pxNewTCB != NULL )
737 /* Store the stack location in the TCB. */
738 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
740 /* Tasks can be created statically or dynamically, so note
741 this task had a statically allocated stack in case it is
742 later deleted. The TCB was allocated dynamically. */
743 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
745 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
746 pxTaskDefinition->pcName,
747 pxTaskDefinition->usStackDepth,
748 pxTaskDefinition->pvParameters,
749 pxTaskDefinition->uxPriority,
750 pxCreatedTask, pxNewTCB,
751 pxTaskDefinition->xRegions,
754 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY );
762 #endif /* portUSING_MPU_WRAPPERS */
763 /*-----------------------------------------------------------*/
765 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
767 BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode,
768 const char * const pcName,
769 const uint32_t usStackDepth,
770 void * const pvParameters,
771 UBaseType_t uxPriority,
772 TaskHandle_t * const pxCreatedTask,
773 const BaseType_t xCoreID )
778 /* If the stack grows down then allocate the stack then the TCB so the stack
779 does not grow into the TCB. Likewise if the stack grows up then allocate
780 the TCB then the stack. */
781 #if( portSTACK_GROWTH > 0 )
783 /* Allocate space for the TCB. Where the memory comes from depends on
784 the implementation of the port malloc function and whether or not static
785 allocation is being used. */
786 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
788 if( pxNewTCB != NULL )
790 /* Allocate space for the stack used by the task being created.
791 The base of the stack memory stored in the TCB so the task can
792 be deleted later if required. */
793 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
795 if( pxNewTCB->pxStack == NULL )
797 /* Could not allocate the stack. Delete the allocated TCB. */
798 vPortFree( pxNewTCB );
803 #else /* portSTACK_GROWTH */
805 StackType_t *pxStack;
807 /* Allocate space for the stack used by the task being created. */
808 pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
810 if( pxStack != NULL )
812 /* Allocate space for the TCB. */
813 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) ); /*lint !e961 MISRA exception as the casts are only redundant for some paths. */
815 if( pxNewTCB != NULL )
817 /* Store the stack location in the TCB. */
818 pxNewTCB->pxStack = pxStack;
822 /* The stack cannot be used as the TCB was not created. Free
824 vPortFree( pxStack );
832 #endif /* portSTACK_GROWTH */
834 if( pxNewTCB != NULL )
836 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
838 /* Tasks can be created statically or dynamically, so note this
839 task was created dynamically in case it is later deleted. */
840 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
842 #endif /* configSUPPORT_STATIC_ALLOCATION */
844 prvInitialiseNewTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL, xCoreID );
845 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
850 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
856 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
857 /*-----------------------------------------------------------*/
859 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
860 const char * const pcName,
861 const uint32_t ulStackDepth,
862 void * const pvParameters,
863 UBaseType_t uxPriority,
864 TaskHandle_t * const pxCreatedTask,
866 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
868 StackType_t *pxTopOfStack;
871 #if( portUSING_MPU_WRAPPERS == 1 )
872 /* Should the task be created in privileged mode? */
873 BaseType_t xRunPrivileged;
874 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
876 xRunPrivileged = pdTRUE;
880 xRunPrivileged = pdFALSE;
882 uxPriority &= ~portPRIVILEGE_BIT;
883 #endif /* portUSING_MPU_WRAPPERS == 1 */
885 /* Avoid dependency on memset() if it is not required. */
886 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
888 /* Fill the stack with a known value to assist debugging. */
889 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
891 #endif /* ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) ) */
893 /* Calculate the top of stack address. This depends on whether the stack
894 grows from high memory to low (as per the 80x86) or vice versa.
895 portSTACK_GROWTH is used to make the result positive or negative as required
897 #if( portSTACK_GROWTH < 0 )
899 pxTopOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
900 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */
902 /* Check the alignment of the calculated top of stack is correct. */
903 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
904 #if ( configENABLE_TASK_SNAPSHOT == 1 )
906 /* need stack end for core dumps */
907 pxNewTCB->pxEndOfStack = pxTopOfStack;
911 #else /* portSTACK_GROWTH */
913 pxTopOfStack = pxNewTCB->pxStack;
915 /* Check the alignment of the stack buffer is correct. */
916 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
918 /* The other extreme of the stack space is required if stack checking is
920 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
922 #endif /* portSTACK_GROWTH */
924 /* Store the task name in the TCB. */
925 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
927 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
929 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
930 configMAX_TASK_NAME_LEN characters just in case the memory after the
931 string is not accessible (extremely unlikely). */
932 if( pcName[ x ] == 0x00 )
938 mtCOVERAGE_TEST_MARKER();
942 /* Ensure the name string is terminated in the case that the string length
943 was greater or equal to configMAX_TASK_NAME_LEN. */
944 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
946 /* This is used as an array index so must ensure it's not too large. First
947 remove the privilege bit if one is present. */
948 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
950 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
954 mtCOVERAGE_TEST_MARKER();
957 pxNewTCB->uxPriority = uxPriority;
958 pxNewTCB->xCoreID = xCoreID;
959 #if ( configUSE_MUTEXES == 1 )
961 pxNewTCB->uxBasePriority = uxPriority;
962 pxNewTCB->uxMutexesHeld = 0;
964 #endif /* configUSE_MUTEXES */
966 vListInitialiseItem( &( pxNewTCB->xGenericListItem ) );
967 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
969 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
970 back to the containing TCB from a generic item in a list. */
971 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xGenericListItem ), pxNewTCB );
973 /* Event lists are always in priority order. */
974 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
975 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
977 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
979 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
981 #endif /* portCRITICAL_NESTING_IN_TCB */
983 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
985 pxNewTCB->pxTaskTag = NULL;
987 #endif /* configUSE_APPLICATION_TASK_TAG */
989 #if ( configGENERATE_RUN_TIME_STATS == 1 )
991 pxNewTCB->ulRunTimeCounter = 0UL;
993 #endif /* configGENERATE_RUN_TIME_STATS */
995 #if ( portUSING_MPU_WRAPPERS == 1 )
997 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1001 /* Avoid compiler warning about unreferenced parameter. */
1006 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1008 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1010 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1011 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1)
1012 pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
1018 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1020 pxNewTCB->ulNotifiedValue = 0;
1021 pxNewTCB->eNotifyState = eNotWaitingNotification;
1025 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1027 /* Initialise this task's Newlib reent structure. */
1028 esp_reent_init(&pxNewTCB->xNewLib_reent);
1032 #if( INCLUDE_xTaskAbortDelay == 1 )
1034 pxNewTCB->ucDelayAborted = pdFALSE;
1038 /* Initialize the TCB stack to look as if the task was already running,
1039 but had been interrupted by the scheduler. The return address is set
1040 to the start of the task function. Once the stack has been initialised
1041 the top of stack variable is updated. */
1042 #if( portUSING_MPU_WRAPPERS == 1 )
1044 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1046 #else /* portUSING_MPU_WRAPPERS */
1048 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1050 #endif /* portUSING_MPU_WRAPPERS */
1052 if( ( void * ) pxCreatedTask != NULL )
1054 /* Pass the handle out in an anonymous way. The handle can be used to
1055 change the created task's priority, delete the created task, etc.*/
1056 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1060 mtCOVERAGE_TEST_MARKER();
1063 /*-----------------------------------------------------------*/
1065 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID )
1067 TCB_t *curTCB, *tcb0, *tcb1;
1069 /* Assure that xCoreID is valid or we'll have an out-of-bounds on pxCurrentTCB
1070 You will assert here if e.g. you only have one CPU enabled in menuconfig and
1071 are trying to start a task on core 1. */
1072 configASSERT( xCoreID == tskNO_AFFINITY || xCoreID < portNUM_PROCESSORS);
1074 /* Ensure interrupts don't access the task lists while the lists are being
1076 taskENTER_CRITICAL(&xTaskQueueMutex);
1078 uxCurrentNumberOfTasks++;
1080 // Determine which core this task starts on
1081 if ( xCoreID == tskNO_AFFINITY )
1083 if ( portNUM_PROCESSORS == 1 )
1089 // if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
1090 // put it on the core where it will preempt the lowest priority running task. If neither of these are true,
1091 // queue it on the currently running core.
1092 tcb0 = pxCurrentTCB[0];
1093 tcb1 = pxCurrentTCB[1];
1098 else if ( tcb1 == NULL )
1102 else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
1106 else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
1112 xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
1117 // If nothing is running on this core, put the new task there now
1118 if( pxCurrentTCB[ xCoreID ] == NULL )
1120 /* There are no other tasks, or all the other tasks are in
1121 the suspended state - make this the current task. */
1122 pxCurrentTCB[ xCoreID ] = pxNewTCB;
1124 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1126 #if portFIRST_TASK_HOOK
1127 if ( xPortGetCoreID() == 0 ) {
1128 vPortFirstTaskHook(pxTaskCode);
1130 #endif /* configFIRST_TASK_HOOK */
1131 /* This is the first task to be created so do the preliminary
1132 initialisation required. We will not recover if this call
1133 fails, but we will report the failure. */
1134 prvInitialiseTaskLists();
1138 mtCOVERAGE_TEST_MARKER();
1143 /* If the scheduler is not already running, make this task the
1144 current task if it is the highest priority task to be created
1146 if( xSchedulerRunning == pdFALSE )
1148 /* Scheduler isn't running yet. We need to determine on which CPU to run this task.
1149 Schedule now if either nothing is scheduled yet or we can replace a task of lower prio. */
1150 if ( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
1152 pxCurrentTCB[xCoreID] = pxNewTCB;
1157 mtCOVERAGE_TEST_MARKER();
1163 #if ( configUSE_TRACE_FACILITY == 1 )
1165 /* Add a counter into the TCB for tracing only. */
1166 pxNewTCB->uxTCBNumber = uxTaskNumber;
1168 #endif /* configUSE_TRACE_FACILITY */
1169 traceTASK_CREATE( pxNewTCB );
1171 prvAddTaskToReadyList( pxNewTCB );
1173 portSETUP_TCB( pxNewTCB );
1176 taskEXIT_CRITICAL(&xTaskQueueMutex);
1178 if( xSchedulerRunning != pdFALSE )
1180 taskENTER_CRITICAL(&xTaskQueueMutex);
1182 curTCB = pxCurrentTCB[ xCoreID ];
1183 /* Scheduler is running. If the created task is of a higher priority than an executing task
1184 then it should run now.
1186 if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
1188 if( xCoreID == xPortGetCoreID() )
1190 taskYIELD_IF_USING_PREEMPTION();
1193 taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
1198 mtCOVERAGE_TEST_MARKER();
1200 taskEXIT_CRITICAL(&xTaskQueueMutex);
1204 mtCOVERAGE_TEST_MARKER();
1207 /*-----------------------------------------------------------*/
1209 #if ( INCLUDE_vTaskDelete == 1 )
1211 void vTaskDelete( TaskHandle_t xTaskToDelete )
1213 //The following vTaskDelete() is backported from FreeRTOS v9.0.0 and modified for SMP.
1214 //v9.0.0 vTaskDelete() will immediately free task memory if the task being deleted is
1215 //NOT currently running and not pinned to the other core. Otherwise, freeing of task memory
1216 //will still be delegated to the Idle Task.
1219 int core = xPortGetCoreID(); //Current core
1220 UBaseType_t free_now; //Flag to indicate if task memory can be freed immediately
1222 taskENTER_CRITICAL(&xTaskQueueMutex);
1224 /* If null is passed in here then it is the calling task that is
1226 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1228 /* Remove task from the ready list. */
1229 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1231 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1235 mtCOVERAGE_TEST_MARKER();
1238 /* Is the task waiting on an event also? */
1239 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1241 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1245 mtCOVERAGE_TEST_MARKER();
1248 /* Increment the uxTaskNumber also so kernel aware debuggers can
1249 detect that the task lists need re-generating. This is done before
1250 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1254 //If task to be deleted is currently running on either core or is pinned to the other core. Let Idle free memory
1255 if( pxTCB == pxCurrentTCB[ core ] ||
1256 (portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
1257 (portNUM_PROCESSORS > 1 && pxTCB->xCoreID == (!core)) )
1259 /* Deleting a currently running task. This cannot complete
1260 within the task itself, as a context switch to another task is
1261 required. Place the task in the termination list. The idle task
1262 will check the termination list and free up any memory allocated
1263 by the scheduler for the TCB and stack of the deleted task. */
1264 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xGenericListItem ) );
1266 /* Increment the ucTasksDeleted variable so the idle task knows
1267 there is a task that has been deleted and that it should therefore
1268 check the xTasksWaitingTermination list. */
1271 /* The pre-delete hook is primarily for the Windows simulator,
1272 in which Windows specific clean up operations are performed,
1273 after which it is not possible to yield away from this task -
1274 hence xYieldPending is used to latch that a context switch is
1276 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1278 free_now = pdFALSE; //Let Idle Task free task memory
1280 else //Task is not currently running and not pinned to the other core
1282 --uxCurrentNumberOfTasks;
1284 /* Reset the next expected unblock time in case it referred to
1285 the task that has just been deleted. */
1286 prvResetNextTaskUnblockTime();
1287 free_now = pdTRUE; //Set flag to free task memory immediately
1290 traceTASK_DELETE( pxTCB );
1292 taskEXIT_CRITICAL(&xTaskQueueMutex);
1294 if(free_now == pdTRUE){ //Free task memory. Outside critical section due to deletion callbacks
1295 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
1296 prvDeleteTLS( pxTCB ); //Run deletion callbacks before deleting TCB
1298 prvDeleteTCB( pxTCB ); //Must only be called after del cb
1301 /* Force a reschedule if it is the currently running task that has just
1303 if( xSchedulerRunning != pdFALSE )
1305 //No mux; no harm done if this misfires. The deleted task won't get scheduled anyway.
1306 if( pxTCB == pxCurrentTCB[ core ] ) //If task was currently running on this core
1308 configASSERT( uxSchedulerSuspended[ core ] == 0 );
1310 /* The pre-delete hook is primarily for the Windows simulator,
1311 in which Windows specific clean up operations are performed,
1312 after which it is not possible to yield away from this task -
1313 hence xYieldPending is used to latch that a context switch is
1315 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[xPortGetCoreID()] );
1316 portYIELD_WITHIN_API();
1318 else if ( portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core] ) //If task was currently running on the other core
1320 /* if task is running on the other CPU, force a yield on that CPU to take it off */
1321 vPortYieldOtherCore( !core );
1325 mtCOVERAGE_TEST_MARKER();
1330 #endif /* INCLUDE_vTaskDelete */
1331 /*-----------------------------------------------------------*/
1333 #if ( INCLUDE_vTaskDelayUntil == 1 )
1335 /* ToDo: Make this multicore-compatible. */
1336 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1338 TickType_t xTimeToWake;
1339 BaseType_t xAlreadyYielded=pdFALSE, xShouldDelay = pdFALSE;
1341 configASSERT( pxPreviousWakeTime );
1342 configASSERT( ( xTimeIncrement > 0U ) );
1343 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1345 taskENTER_CRITICAL(&xTaskQueueMutex);
1346 // vTaskSuspendAll();
1348 /* Minor optimisation. The tick count cannot change in this
1350 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1351 const TickType_t xConstTickCount = xTickCount;
1352 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1354 /* Generate the tick time at which the task wants to wake. */
1355 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1357 if( xConstTickCount < *pxPreviousWakeTime )
1359 /* The tick count has overflowed since this function was
1360 lasted called. In this case the only time we should ever
1361 actually delay is if the wake time has also overflowed,
1362 and the wake time is greater than the tick time. When this
1363 is the case it is as if neither time had overflowed. */
1364 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1366 xShouldDelay = pdTRUE;
1370 mtCOVERAGE_TEST_MARKER();
1375 /* The tick time has not overflowed. In this case we will
1376 delay if either the wake time has overflowed, and/or the
1377 tick time is less than the wake time. */
1378 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1380 xShouldDelay = pdTRUE;
1384 mtCOVERAGE_TEST_MARKER();
1388 /* Update the wake time ready for the next call. */
1389 *pxPreviousWakeTime = xTimeToWake;
1391 if( xShouldDelay != pdFALSE )
1393 traceTASK_DELAY_UNTIL();
1395 /* Remove the task from the ready list before adding it to the
1396 blocked list as the same list item is used for both lists. */
1397 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1399 /* The current task must be in a ready list, so there is
1400 no need to check, and the port reset macro can be called
1402 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1406 mtCOVERAGE_TEST_MARKER();
1409 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1413 mtCOVERAGE_TEST_MARKER();
1416 // xAlreadyYielded = xTaskResumeAll();
1417 taskEXIT_CRITICAL(&xTaskQueueMutex);
1419 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1420 have put ourselves to sleep. */
1421 if( xAlreadyYielded == pdFALSE )
1423 portYIELD_WITHIN_API();
1427 mtCOVERAGE_TEST_MARKER();
1431 #endif /* INCLUDE_vTaskDelayUntil */
1432 /*-----------------------------------------------------------*/
1434 #if ( INCLUDE_vTaskDelay == 1 )
1435 void vTaskDelay( const TickType_t xTicksToDelay )
1437 TickType_t xTimeToWake;
1438 BaseType_t xAlreadyYielded = pdFALSE;
1440 /* A delay time of zero just forces a reschedule. */
1441 if( xTicksToDelay > ( TickType_t ) 0U )
1443 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1444 taskENTER_CRITICAL(&xTaskQueueMutex);
1445 // vTaskSuspendAll();
1449 /* A task that is removed from the event list while the
1450 scheduler is suspended will not get placed in the ready
1451 list or removed from the blocked list until the scheduler
1454 This task cannot be in an event list as it is the currently
1457 /* Calculate the time to wake - this may overflow but this is
1459 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1460 xTimeToWake = xTickCount + xTicksToDelay;
1461 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1463 /* We must remove ourselves from the ready list before adding
1464 ourselves to the blocked list as the same list item is used for
1466 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1468 /* The current task must be in a ready list, so there is
1469 no need to check, and the port reset macro can be called
1471 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1475 mtCOVERAGE_TEST_MARKER();
1477 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1479 // xAlreadyYielded = xTaskResumeAll();
1480 taskEXIT_CRITICAL(&xTaskQueueMutex);
1484 mtCOVERAGE_TEST_MARKER();
1487 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1488 have put ourselves to sleep. */
1489 if( xAlreadyYielded == pdFALSE )
1491 portYIELD_WITHIN_API();
1495 mtCOVERAGE_TEST_MARKER();
1499 #endif /* INCLUDE_vTaskDelay */
1500 /*-----------------------------------------------------------*/
1502 #if ( INCLUDE_eTaskGetState == 1 )
1503 eTaskState eTaskGetState( TaskHandle_t xTask )
1506 List_t *pxStateList;
1507 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1508 TCB_t * curTCBcurCore = xTaskGetCurrentTaskHandle();
1509 TCB_t * curTCBothrCore = xTaskGetCurrentTaskHandleForCPU(!xPortGetCoreID()); //Returns NULL if Unicore
1511 configASSERT( pxTCB );
1513 if( pxTCB == curTCBcurCore || pxTCB == curTCBothrCore )
1515 /* The task calling this function is querying its own state. */
1520 taskENTER_CRITICAL(&xTaskQueueMutex);
1522 pxStateList = ( List_t * ) listLIST_ITEM_CONTAINER( &( pxTCB->xGenericListItem ) );
1524 taskEXIT_CRITICAL(&xTaskQueueMutex);
1526 if( ( pxStateList == pxDelayedTaskList ) || ( pxStateList == pxOverflowDelayedTaskList ) )
1528 /* The task being queried is referenced from one of the Blocked
1533 #if ( INCLUDE_vTaskSuspend == 1 )
1534 else if( pxStateList == &xSuspendedTaskList )
1536 /* The task being queried is referenced from the suspended
1537 list. Is it genuinely suspended or is it block
1539 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1541 eReturn = eSuspended;
1550 #if ( INCLUDE_vTaskDelete == 1 )
1551 else if( pxStateList == &xTasksWaitingTermination )
1553 /* The task being queried is referenced from the deleted
1559 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1561 /* If the task is not in any other state, it must be in the
1562 Ready (including pending ready) state. */
1568 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1570 #endif /* INCLUDE_eTaskGetState */
1571 /*-----------------------------------------------------------*/
1573 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1574 UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask )
1577 UBaseType_t uxReturn;
1579 taskENTER_CRITICAL(&xTaskQueueMutex);
1581 /* If null is passed in here then we are changing the
1582 priority of the calling function. */
1583 pxTCB = prvGetTCBFromHandle( xTask );
1584 uxReturn = pxTCB->uxPriority;
1586 taskEXIT_CRITICAL(&xTaskQueueMutex);
1591 #endif /* INCLUDE_uxTaskPriorityGet */
1592 /*-----------------------------------------------------------*/
1594 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1595 UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask )
1598 UBaseType_t uxReturn;
1600 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1602 /* If null is passed in here then it is the priority of the calling
1603 task that is being queried. */
1604 pxTCB = prvGetTCBFromHandle( xTask );
1605 uxReturn = pxTCB->uxPriority;
1607 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
1612 #endif /* INCLUDE_uxTaskPriorityGet */
1613 /*-----------------------------------------------------------*/
1615 #if ( INCLUDE_vTaskPrioritySet == 1 )
1617 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1620 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1621 BaseType_t xYieldRequired = pdFALSE;
1623 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1625 /* Ensure the new priority is valid. */
1626 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1628 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1632 mtCOVERAGE_TEST_MARKER();
1635 taskENTER_CRITICAL(&xTaskQueueMutex);
1637 /* If null is passed in here then it is the priority of the calling
1638 task that is being changed. */
1639 pxTCB = prvGetTCBFromHandle( xTask );
1641 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1643 #if ( configUSE_MUTEXES == 1 )
1645 uxCurrentBasePriority = pxTCB->uxBasePriority;
1649 uxCurrentBasePriority = pxTCB->uxPriority;
1653 if( uxCurrentBasePriority != uxNewPriority )
1655 /* The priority change may have readied a task of higher
1656 priority than the calling task. */
1657 if( uxNewPriority > uxCurrentBasePriority )
1659 if( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] )
1661 /* The priority of a task other than the currently
1662 running task is being raised. Is the priority being
1663 raised above that of the running task? */
1664 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1666 xYieldRequired = pdTRUE;
1668 else if ( pxTCB->xCoreID != xPortGetCoreID() )
1670 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1674 mtCOVERAGE_TEST_MARKER();
1679 /* The priority of the running task is being raised,
1680 but the running task must already be the highest
1681 priority task able to run so no yield is required. */
1684 else if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
1686 /* Setting the priority of the running task down means
1687 there may now be another task of higher priority that
1688 is ready to execute. */
1689 xYieldRequired = pdTRUE;
1693 /* Setting the priority of any other task down does not
1694 require a yield as the running task must be above the
1695 new priority of the task being modified. */
1698 /* Remember the ready list the task might be referenced from
1699 before its uxPriority member is changed so the
1700 taskRESET_READY_PRIORITY() macro can function correctly. */
1701 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1703 #if ( configUSE_MUTEXES == 1 )
1705 /* Only change the priority being used if the task is not
1706 currently using an inherited priority. */
1707 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1709 pxTCB->uxPriority = uxNewPriority;
1713 mtCOVERAGE_TEST_MARKER();
1716 /* The base priority gets set whatever. */
1717 pxTCB->uxBasePriority = uxNewPriority;
1721 pxTCB->uxPriority = uxNewPriority;
1725 /* Only reset the event list item value if the value is not
1726 being used for anything else. */
1727 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1729 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1733 mtCOVERAGE_TEST_MARKER();
1736 /* If the task is in the blocked or suspended list we need do
1737 nothing more than change it's priority variable. However, if
1738 the task is in a ready list it needs to be removed and placed
1739 in the list appropriate to its new priority. */
1740 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
1742 /* The task is currently in its ready list - remove before adding
1743 it to it's new ready list. As we are in a critical section we
1744 can do this even if the scheduler is suspended. */
1745 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1747 /* It is known that the task is in its ready list so
1748 there is no need to check again and the port level
1749 reset macro can be called directly. */
1750 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1754 mtCOVERAGE_TEST_MARKER();
1756 prvReaddTaskToReadyList( pxTCB );
1760 mtCOVERAGE_TEST_MARKER();
1763 if( xYieldRequired == pdTRUE )
1765 taskYIELD_IF_USING_PREEMPTION();
1769 mtCOVERAGE_TEST_MARKER();
1772 /* Remove compiler warning about unused variables when the port
1773 optimised task selection is not being used. */
1774 ( void ) uxPriorityUsedOnEntry;
1777 taskEXIT_CRITICAL(&xTaskQueueMutex);
1780 #endif /* INCLUDE_vTaskPrioritySet */
1781 /*-----------------------------------------------------------*/
1783 #if ( INCLUDE_vTaskSuspend == 1 )
1784 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1789 taskENTER_CRITICAL(&xTaskQueueMutex);
1791 /* If null is passed in here then it is the running task that is
1793 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1795 traceTASK_SUSPEND( pxTCB );
1797 /* Remove task from the ready/delayed list and place in the
1799 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1801 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1805 mtCOVERAGE_TEST_MARKER();
1808 /* Is the task waiting on an event also? */
1809 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1811 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1815 mtCOVERAGE_TEST_MARKER();
1817 traceMOVED_TASK_TO_SUSPENDED_LIST(pxTCB);
1818 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) );
1819 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
1821 taskEXIT_CRITICAL(&xTaskQueueMutex);
1823 if( pxTCB == curTCB )
1825 if( xSchedulerRunning != pdFALSE )
1827 /* The current task has just been suspended. */
1828 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1829 portYIELD_WITHIN_API();
1833 /* The scheduler is not running, but the task that was pointed
1834 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1835 must be adjusted to point to a different task. */
1836 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
1838 /* No other tasks are ready, so set pxCurrentTCB back to
1839 NULL so when the next task is created pxCurrentTCB will
1840 be set to point to it no matter what its relative priority
1842 taskENTER_CRITICAL(&xTaskQueueMutex);
1843 pxCurrentTCB[ xPortGetCoreID() ] = NULL;
1844 taskEXIT_CRITICAL(&xTaskQueueMutex);
1848 vTaskSwitchContext();
1854 if( xSchedulerRunning != pdFALSE )
1856 /* A task other than the currently running task was suspended,
1857 reset the next expected unblock time in case it referred to the
1858 task that is now in the Suspended state. */
1859 taskENTER_CRITICAL(&xTaskQueueMutex);
1861 prvResetNextTaskUnblockTime();
1863 taskEXIT_CRITICAL(&xTaskQueueMutex);
1867 mtCOVERAGE_TEST_MARKER();
1872 #endif /* INCLUDE_vTaskSuspend */
1873 /*-----------------------------------------------------------*/
1875 #if ( INCLUDE_vTaskSuspend == 1 )
1876 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1878 BaseType_t xReturn = pdFALSE;
1879 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1881 /* Accesses xPendingReadyList so must be called from a critical
1882 section (caller is required to hold xTaskQueueMutex). */
1884 /* It does not make sense to check if the calling task is suspended. */
1885 configASSERT( xTask );
1887 /* Is the task being resumed actually in the suspended list? */
1888 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE )
1890 /* Has the task already been resumed from within an ISR? */
1891 if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE )
1893 /* Is it in the suspended list because it is in the Suspended
1894 state, or because is is blocked with no timeout? */
1895 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE )
1901 mtCOVERAGE_TEST_MARKER();
1906 mtCOVERAGE_TEST_MARKER();
1911 mtCOVERAGE_TEST_MARKER();
1915 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1917 #endif /* INCLUDE_vTaskSuspend */
1918 /*-----------------------------------------------------------*/
1920 #if ( INCLUDE_vTaskSuspend == 1 )
1922 void vTaskResume( TaskHandle_t xTaskToResume )
1924 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1926 /* It does not make sense to resume the calling task. */
1927 configASSERT( xTaskToResume );
1929 taskENTER_CRITICAL(&xTaskQueueMutex);
1930 /* The parameter cannot be NULL as it is impossible to resume the
1931 currently executing task. */
1932 if( ( pxTCB != NULL ) && ( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] ) )
1935 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1937 traceTASK_RESUME( pxTCB );
1939 /* As we are in a critical section we can access the ready
1940 lists even if the scheduler is suspended. */
1941 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
1942 prvAddTaskToReadyList( pxTCB );
1944 /* We may have just resumed a higher priority task. */
1945 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1947 /* This yield may not cause the task just resumed to run,
1948 but will leave the lists in the correct state for the
1950 taskYIELD_IF_USING_PREEMPTION();
1952 else if( pxTCB->xCoreID != xPortGetCoreID() )
1954 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
1958 mtCOVERAGE_TEST_MARKER();
1963 mtCOVERAGE_TEST_MARKER();
1969 mtCOVERAGE_TEST_MARKER();
1971 taskEXIT_CRITICAL(&xTaskQueueMutex);
1974 #endif /* INCLUDE_vTaskSuspend */
1976 /*-----------------------------------------------------------*/
1978 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1980 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1982 BaseType_t xYieldRequired = pdFALSE;
1983 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1985 configASSERT( xTaskToResume );
1987 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1990 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1992 traceTASK_RESUME_FROM_ISR( pxTCB );
1994 /* Check the ready lists can be accessed. */
1995 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
1997 /* Ready lists can be accessed so move the task from the
1998 suspended list to the ready list directly. */
1999 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2000 prvAddTaskToReadyList( pxTCB );
2002 if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2004 xYieldRequired = pdTRUE;
2006 else if ( pxTCB->xCoreID != xPortGetCoreID() )
2008 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
2012 mtCOVERAGE_TEST_MARKER();
2017 /* The delayed or ready lists cannot be accessed so the task
2018 is held in the pending ready list until the scheduler is
2020 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
2025 mtCOVERAGE_TEST_MARKER();
2028 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2030 return xYieldRequired;
2033 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2034 /*-----------------------------------------------------------*/
2036 void vTaskStartScheduler( void )
2041 /* Add the per-core idle tasks at the lowest priority. */
2042 for ( i=0; i<portNUM_PROCESSORS; i++) {
2043 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2045 /* Create the idle task, storing its handle in xIdleTaskHandle so it can
2046 be returned by the xTaskGetIdleTaskHandle() function. */
2047 xReturn = xTaskCreatePinnedToCore( prvIdleTask, "IDLE", tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), &xIdleTaskHandle[i], i ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2051 /* Create the idle task without storing its handle. */
2052 xReturn = xTaskCreatePinnedToCore( prvIdleTask, "IDLE", tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), NULL, i); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2054 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2057 #if ( configUSE_TIMERS == 1 )
2059 if( xReturn == pdPASS )
2061 xReturn = xTimerCreateTimerTask();
2065 mtCOVERAGE_TEST_MARKER();
2068 #endif /* configUSE_TIMERS */
2070 if( xReturn == pdPASS )
2072 /* Interrupts are turned off here, to ensure a tick does not occur
2073 before or during the call to xPortStartScheduler(). The stacks of
2074 the created tasks contain a status word with interrupts switched on
2075 so interrupts will automatically get re-enabled when the first task
2077 portDISABLE_INTERRUPTS();
2080 xTickCount = ( TickType_t ) 0U;
2082 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2083 macro must be defined to configure the timer/counter used to generate
2084 the run time counter time base. */
2085 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2086 xSchedulerRunning = pdTRUE;
2088 /* Setting up the timer tick is hardware specific and thus in the
2089 portable interface. */
2090 if( xPortStartScheduler() != pdFALSE )
2092 /* Should not reach here as if the scheduler is running the
2093 function will not return. */
2097 /* Should only reach here if a task calls xTaskEndScheduler(). */
2102 /* This line will only be reached if the kernel could not be started,
2103 because there was not enough FreeRTOS heap to create the idle task
2104 or the timer task. */
2105 configASSERT( xReturn );
2108 /*-----------------------------------------------------------*/
2110 void vTaskEndScheduler( void )
2112 /* Stop the scheduler interrupts and call the portable scheduler end
2113 routine so the original ISRs can be restored if necessary. The port
2114 layer must ensure interrupts enable bit is left in the correct state. */
2115 portDISABLE_INTERRUPTS();
2116 xSchedulerRunning = pdFALSE;
2117 vPortEndScheduler();
2119 /*----------------------------------------------------------*/
2122 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2123 //Return global reent struct if FreeRTOS isn't running,
2124 struct _reent* __getreent() {
2125 //No lock needed because if this changes, we won't be running anymore.
2126 TCB_t *currTask=xTaskGetCurrentTaskHandle();
2127 if (currTask==NULL) {
2128 //No task running. Return global struct.
2129 return _GLOBAL_REENT;
2131 //We have a task; return its reentrant struct.
2132 return &currTask->xNewLib_reent;
2138 void vTaskSuspendAll( void )
2140 /* A critical section is not required as the variable is of type
2141 BaseType_t. Please read Richard Barry's reply in the following link to a
2142 post in the FreeRTOS support forum before reporting this as a bug! -
2143 http://goo.gl/wu4acr */
2146 state = portENTER_CRITICAL_NESTED();
2147 ++uxSchedulerSuspended[ xPortGetCoreID() ];
2148 portEXIT_CRITICAL_NESTED(state);
2150 /*----------------------------------------------------------*/
2152 #if ( configUSE_TICKLESS_IDLE != 0 )
2154 static BaseType_t xHaveReadyTasks()
2156 for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
2158 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
2164 mtCOVERAGE_TEST_MARKER();
2171 static TickType_t prvGetExpectedIdleTime( void )
2176 taskENTER_CRITICAL(&xTaskQueueMutex);
2177 if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
2181 #if portNUM_PROCESSORS > 1
2182 /* This function is called from Idle task; in single core case this
2183 * means that no higher priority tasks are ready to run, and we can
2184 * enter sleep. In SMP case, there might be ready tasks waiting for
2185 * the other CPU, so need to check all ready lists.
2187 else if( xHaveReadyTasks() )
2191 #endif // portNUM_PROCESSORS > 1
2192 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > portNUM_PROCESSORS )
2194 /* There are other idle priority tasks in the ready state. If
2195 time slicing is used then the very next tick interrupt must be
2201 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2202 xReturn = xNextTaskUnblockTime - xTickCount;
2203 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2205 taskEXIT_CRITICAL(&xTaskQueueMutex);
2210 #endif /* configUSE_TICKLESS_IDLE */
2211 /*----------------------------------------------------------*/
2213 BaseType_t xTaskResumeAll( void )
2216 BaseType_t xAlreadyYielded = pdFALSE;
2218 /* If uxSchedulerSuspended[ xPortGetCoreID() ] is zero then this function does not match a
2219 previous call to vTaskSuspendAll(). */
2220 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] );
2221 /* It is possible that an ISR caused a task to be removed from an event
2222 list while the scheduler was suspended. If this was the case then the
2223 removed task will have been added to the xPendingReadyList. Once the
2224 scheduler has been resumed it is safe to move all the pending ready
2225 tasks from this list into their appropriate ready list. */
2227 taskENTER_CRITICAL(&xTaskQueueMutex);
2229 --uxSchedulerSuspended[ xPortGetCoreID() ];
2231 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2233 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2235 /* Move any readied tasks from the pending list into the
2236 appropriate ready list. */
2237 while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE )
2239 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) );
2240 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2241 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2242 prvAddTaskToReadyList( pxTCB );
2244 /* If the moved task has a priority higher than the current
2245 task then a yield must be performed. */
2246 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2248 /* We can schedule the awoken task on this CPU. */
2249 xYieldPending[xPortGetCoreID()] = pdTRUE;
2253 mtCOVERAGE_TEST_MARKER();
2257 /* If any ticks occurred while the scheduler was suspended then
2258 they should be processed now. This ensures the tick count does
2259 not slip, and that any delayed tasks are resumed at the correct
2261 if( uxPendedTicks > ( UBaseType_t ) 0U )
2263 while( uxPendedTicks > ( UBaseType_t ) 0U )
2265 if( xTaskIncrementTick() != pdFALSE )
2267 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2271 mtCOVERAGE_TEST_MARKER();
2278 mtCOVERAGE_TEST_MARKER();
2281 if( xYieldPending[ xPortGetCoreID() ] == pdTRUE )
2283 #if( configUSE_PREEMPTION != 0 )
2285 xAlreadyYielded = pdTRUE;
2288 taskYIELD_IF_USING_PREEMPTION();
2292 mtCOVERAGE_TEST_MARKER();
2298 mtCOVERAGE_TEST_MARKER();
2301 taskEXIT_CRITICAL(&xTaskQueueMutex);
2303 return xAlreadyYielded;
2305 /*-----------------------------------------------------------*/
2307 TickType_t xTaskGetTickCount( void )
2311 /* Critical section required if running on a 16 bit processor. */
2312 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2314 xTicks = xTickCount;
2316 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2320 /*-----------------------------------------------------------*/
2322 TickType_t xTaskGetTickCountFromISR( void )
2326 taskENTER_CRITICAL_ISR(&xTickCountMutex);
2328 xReturn = xTickCount;
2329 // vPortCPUReleaseMutex( &xTickCountMutex );
2331 taskEXIT_CRITICAL_ISR(&xTickCountMutex);
2335 /*-----------------------------------------------------------*/
2337 UBaseType_t uxTaskGetNumberOfTasks( void )
2339 /* A critical section is not required because the variables are of type
2341 return uxCurrentNumberOfTasks;
2343 /*-----------------------------------------------------------*/
2345 #if ( INCLUDE_pcTaskGetTaskName == 1 )
2346 char *pcTaskGetTaskName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2350 /* If null is passed in here then the name of the calling task is being queried. */
2351 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2352 configASSERT( pxTCB );
2353 return &( pxTCB->pcTaskName[ 0 ] );
2356 #endif /* INCLUDE_pcTaskGetTaskName */
2357 /*-----------------------------------------------------------*/
2359 #if ( configUSE_TRACE_FACILITY == 1 )
2361 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2363 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2365 taskENTER_CRITICAL(&xTaskQueueMutex);
2367 /* Is there a space in the array for each task in the system? */
2368 if( uxArraySize >= uxCurrentNumberOfTasks )
2370 /* Fill in an TaskStatus_t structure with information on each
2371 task in the Ready state. */
2375 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2377 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2379 /* Fill in an TaskStatus_t structure with information on each
2380 task in the Blocked state. */
2381 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2382 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2384 #if( INCLUDE_vTaskDelete == 1 )
2386 /* Fill in an TaskStatus_t structure with information on
2387 each task that has been deleted but not yet cleaned up. */
2388 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2392 #if ( INCLUDE_vTaskSuspend == 1 )
2394 /* Fill in an TaskStatus_t structure with information on
2395 each task in the Suspended state. */
2396 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2400 #if ( configGENERATE_RUN_TIME_STATS == 1)
2402 if( pulTotalRunTime != NULL )
2404 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2405 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2407 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2413 if( pulTotalRunTime != NULL )
2415 *pulTotalRunTime = 0;
2422 mtCOVERAGE_TEST_MARKER();
2425 taskEXIT_CRITICAL(&xTaskQueueMutex);
2429 #endif /* configUSE_TRACE_FACILITY */
2430 /*----------------------------------------------------------*/
2432 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2434 TaskHandle_t xTaskGetIdleTaskHandle( void )
2436 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2437 started, then xIdleTaskHandle will be NULL. */
2438 configASSERT( ( xIdleTaskHandle[ xPortGetCoreID() ] != NULL ) );
2439 return xIdleTaskHandle[ xPortGetCoreID() ];
2442 TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid )
2444 TaskHandle_t xReturn = NULL;
2445 /* If xTaskGetIdleTaskHandleForCPU() is called before the scheduler has been
2446 started, then xIdleTaskHandle will be NULL. */
2447 if (cpuid < portNUM_PROCESSORS) {
2448 configASSERT( ( xIdleTaskHandle[ cpuid ] != NULL ) );
2449 xReturn = xIdleTaskHandle[ cpuid ];
2454 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2455 /*----------------------------------------------------------*/
2457 /* This conditional compilation should use inequality to 0, not equality to 1.
2458 This is to ensure vTaskStepTick() is available when user defined low power mode
2459 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
2461 #if ( configUSE_TICKLESS_IDLE != 0 )
2463 void vTaskStepTick( const TickType_t xTicksToJump )
2465 /* Correct the tick count value after a period during which the tick
2466 was suppressed. Note this does *not* call the tick hook function for
2467 each stepped tick. */
2468 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2469 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2470 xTickCount += xTicksToJump;
2471 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2472 traceINCREASE_TICK_COUNT( xTicksToJump );
2475 #endif /* configUSE_TICKLESS_IDLE */
2476 /*----------------------------------------------------------*/
2478 BaseType_t xTaskIncrementTick( void )
2481 TickType_t xItemValue;
2482 BaseType_t xSwitchRequired = pdFALSE;
2484 /* Called by the portable layer each time a tick interrupt occurs.
2485 Increments the tick then checks to see if the new tick value will cause any
2486 tasks to be unblocked. */
2488 /* Only let core 0 increase the tick count, to keep accurate track of time. */
2489 /* ToDo: This doesn't really play nice with the logic below: it means when core 1 is
2490 running a low-priority task, it will keep running it until there is a context
2491 switch, even when this routine (running on core 0) unblocks a bunch of high-priority
2492 tasks... this is less than optimal -- JD. */
2493 if ( xPortGetCoreID()!=0 ) {
2494 #if ( configUSE_TICK_HOOK == 1 )
2495 vApplicationTickHook();
2496 #endif /* configUSE_TICK_HOOK */
2497 esp_vApplicationTickHook();
2500 We can't really calculate what we need, that's done on core 0... just assume we need a switch.
2501 ToDo: Make this more intelligent? -- JD
2507 traceTASK_INCREMENT_TICK( xTickCount );
2509 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2511 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2512 /* Increment the RTOS tick, switching the delayed and overflowed
2513 delayed lists if it wraps to 0. */
2515 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2517 //The other CPU may decide to mess with the task queues, so this needs a mux.
2518 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2520 /* Minor optimisation. The tick count cannot change in this
2522 const TickType_t xConstTickCount = xTickCount;
2524 if( xConstTickCount == ( TickType_t ) 0U )
2526 taskSWITCH_DELAYED_LISTS();
2530 mtCOVERAGE_TEST_MARKER();
2533 /* See if this tick has made a timeout expire. Tasks are stored in
2534 the queue in the order of their wake time - meaning once one task
2535 has been found whose block time has not expired there is no need to
2536 look any further down the list. */
2537 if( xConstTickCount >= xNextTaskUnblockTime )
2541 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2543 /* The delayed list is empty. Set xNextTaskUnblockTime
2544 to the maximum possible value so it is extremely
2546 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2547 next time through. */
2548 xNextTaskUnblockTime = portMAX_DELAY;
2553 /* The delayed list is not empty, get the value of the
2554 item at the head of the delayed list. This is the time
2555 at which the task at the head of the delayed list must
2556 be removed from the Blocked state. */
2557 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
2558 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xGenericListItem ) );
2560 if( xConstTickCount < xItemValue )
2562 /* It is not time to unblock this item yet, but the
2563 item value is the time at which the task at the head
2564 of the blocked list must be removed from the Blocked
2565 state - so record the item value in
2566 xNextTaskUnblockTime. */
2567 xNextTaskUnblockTime = xItemValue;
2572 mtCOVERAGE_TEST_MARKER();
2575 /* It is time to remove the item from the Blocked state. */
2576 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2578 /* Is the task waiting on an event also? If so remove
2579 it from the event list. */
2580 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2582 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2586 mtCOVERAGE_TEST_MARKER();
2589 /* Place the unblocked task into the appropriate ready
2591 prvAddTaskToReadyList( pxTCB );
2593 /* A task being unblocked cannot cause an immediate
2594 context switch if preemption is turned off. */
2595 #if ( configUSE_PREEMPTION == 1 )
2597 /* Preemption is on, but a context switch should
2598 only be performed if the unblocked task has a
2599 priority that is equal to or higher than the
2600 currently executing task. */
2601 if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2603 xSwitchRequired = pdTRUE;
2607 mtCOVERAGE_TEST_MARKER();
2610 #endif /* configUSE_PREEMPTION */
2616 /* Tasks of equal priority to the currently running task will share
2617 processing time (time slice) if preemption is on, and the application
2618 writer has not explicitly turned time slicing off. */
2619 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2621 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2623 xSwitchRequired = pdTRUE;
2627 mtCOVERAGE_TEST_MARKER();
2630 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2633 /* Guard against the tick hook being called when the pended tick
2634 count is being unwound (when the scheduler is being unlocked). */
2635 if( uxPendedTicks == ( UBaseType_t ) 0U )
2637 #if ( configUSE_TICK_HOOK == 1 )
2638 vApplicationTickHook();
2639 #endif /* configUSE_TICK_HOOK */
2640 esp_vApplicationTickHook();
2644 mtCOVERAGE_TEST_MARKER();
2647 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2653 /* The tick hook gets called at regular intervals, even if the
2654 scheduler is locked. */
2655 #if ( configUSE_TICK_HOOK == 1 )
2657 vApplicationTickHook();
2660 esp_vApplicationTickHook();
2663 #if ( configUSE_PREEMPTION == 1 )
2665 if( xYieldPending [ xPortGetCoreID() ] != pdFALSE )
2667 xSwitchRequired = pdTRUE;
2671 mtCOVERAGE_TEST_MARKER();
2674 #endif /* configUSE_PREEMPTION */
2676 return xSwitchRequired;
2678 /*-----------------------------------------------------------*/
2680 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2682 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2686 /* If xTask is NULL then it is the task hook of the calling task that is
2690 xTCB = ( TCB_t * ) pxCurrentTCB[ xPortGetCoreID() ];
2694 xTCB = ( TCB_t * ) xTask;
2697 /* Save the hook function in the TCB. A critical section is required as
2698 the value can be accessed from an interrupt. */
2699 taskENTER_CRITICAL(&xTaskQueueMutex);
2700 xTCB->pxTaskTag = pxHookFunction;
2701 taskEXIT_CRITICAL(&xTaskQueueMutex);
2704 #endif /* configUSE_APPLICATION_TASK_TAG */
2705 /*-----------------------------------------------------------*/
2707 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2709 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2712 TaskHookFunction_t xReturn;
2714 /* If xTask is NULL then we are setting our own task hook. */
2717 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2721 xTCB = ( TCB_t * ) xTask;
2724 /* Save the hook function in the TCB. A critical section is required as
2725 the value can be accessed from an interrupt. */
2726 taskENTER_CRITICAL(&xTaskQueueMutex);
2728 xReturn = xTCB->pxTaskTag;
2730 taskEXIT_CRITICAL(&xTaskQueueMutex);
2735 #endif /* configUSE_APPLICATION_TASK_TAG */
2736 /*-----------------------------------------------------------*/
2738 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2740 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2745 /* If xTask is NULL then we are calling our own task hook. */
2748 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2752 xTCB = ( TCB_t * ) xTask;
2755 if( xTCB->pxTaskTag != NULL )
2757 xReturn = xTCB->pxTaskTag( pvParameter );
2767 #endif /* configUSE_APPLICATION_TASK_TAG */
2768 /*-----------------------------------------------------------*/
2770 void vTaskSwitchContext( void )
2772 //Theoretically, this is only called from either the tick interrupt or the crosscore interrupt, so disabling
2773 //interrupts shouldn't be necessary anymore. Still, for safety we'll leave it in for now.
2774 int irqstate=portENTER_CRITICAL_NESTED();
2776 if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
2778 /* The scheduler is currently suspended - do not allow a context
2780 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2784 xYieldPending[ xPortGetCoreID() ] = pdFALSE;
2785 xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
2786 traceTASK_SWITCHED_OUT();
2788 #if ( configGENERATE_RUN_TIME_STATS == 1 )
2790 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2791 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
2793 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2796 /* Add the amount of time the task has been running to the
2797 accumulated time so far. The time the task started running was
2798 stored in ulTaskSwitchedInTime. Note that there is no overflow
2799 protection here so count values are only valid until the timer
2800 overflows. The guard against negative values is to protect
2801 against suspect run time stat counter implementations - which
2802 are provided by the application, not the kernel. */
2803 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2804 if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
2806 pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
2810 mtCOVERAGE_TEST_MARKER();
2812 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2813 ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
2815 #endif /* configGENERATE_RUN_TIME_STATS */
2817 /* Check for stack overflow, if configured. */
2818 taskFIRST_CHECK_FOR_STACK_OVERFLOW();
2819 taskSECOND_CHECK_FOR_STACK_OVERFLOW();
2821 /* Select a new task to run */
2824 We cannot do taskENTER_CRITICAL_ISR(&xTaskQueueMutex); here because it saves the interrupt context to the task tcb, and we're
2825 swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
2826 need to acquire the mutex.
2828 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2829 vPortCPUAcquireMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2831 vPortCPUAcquireMutex( &xTaskQueueMutex );
2834 unsigned portBASE_TYPE foundNonExecutingWaiter = pdFALSE, ableToSchedule = pdFALSE, resetListHead;
2835 portBASE_TYPE uxDynamicTopReady = uxTopReadyPriority;
2836 unsigned portBASE_TYPE holdTop=pdFALSE;
2839 * ToDo: This scheduler doesn't correctly implement the round-robin scheduling as done in the single-core
2840 * FreeRTOS stack when multiple tasks have the same priority and are all ready; it just keeps grabbing the
2841 * first one. ToDo: fix this.
2842 * (Is this still true? if any, there's the issue with one core skipping over the processes for the other
2843 * core, potentially not giving the skipped-over processes any time.)
2846 while ( ableToSchedule == pdFALSE && uxDynamicTopReady >= 0 )
2848 resetListHead = pdFALSE;
2849 // Nothing to do for empty lists
2850 if (!listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxDynamicTopReady ] ) )) {
2852 ableToSchedule = pdFALSE;
2855 /* Remember the current list item so that we
2856 can detect if all items have been inspected.
2857 Once this happens, we move on to a lower
2858 priority list (assuming nothing is suitable
2859 for scheduling). Note: This can return NULL if
2860 the list index is at the listItem */
2861 pxRefTCB = pxReadyTasksLists[ uxDynamicTopReady ].pxIndex->pvOwner;
2863 if ((void*)pxReadyTasksLists[ uxDynamicTopReady ].pxIndex==(void*)&pxReadyTasksLists[ uxDynamicTopReady ].xListEnd) {
2864 //pxIndex points to the list end marker. Skip that and just get the next item.
2865 listGET_OWNER_OF_NEXT_ENTRY( pxRefTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2869 listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2870 /* Find out if the next task in the list is
2871 already being executed by another core */
2872 foundNonExecutingWaiter = pdTRUE;
2873 portBASE_TYPE i = 0;
2874 for ( i=0; i<portNUM_PROCESSORS; i++ ) {
2875 if (i == xPortGetCoreID()) {
2877 } else if (pxCurrentTCB[i] == pxTCB) {
2878 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2879 foundNonExecutingWaiter = pdFALSE;
2884 if (foundNonExecutingWaiter == pdTRUE) {
2885 /* If the task is not being executed
2886 by another core and its affinity is
2887 compatible with the current one,
2888 prepare it to be swapped in */
2889 if (pxTCB->xCoreID == tskNO_AFFINITY) {
2890 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2891 ableToSchedule = pdTRUE;
2892 } else if (pxTCB->xCoreID == xPortGetCoreID()) {
2893 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2894 ableToSchedule = pdTRUE;
2896 ableToSchedule = pdFALSE;
2897 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2900 ableToSchedule = pdFALSE;
2903 if (ableToSchedule == pdFALSE) {
2904 resetListHead = pdTRUE;
2905 } else if ((ableToSchedule == pdTRUE) && (resetListHead == pdTRUE)) {
2906 tskTCB * pxResetTCB;
2908 listGET_OWNER_OF_NEXT_ENTRY( pxResetTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2909 } while(pxResetTCB != pxRefTCB);
2911 } while ((ableToSchedule == pdFALSE) && (pxTCB != pxRefTCB));
2913 if (!holdTop) --uxTopReadyPriority;
2915 --uxDynamicTopReady;
2918 traceTASK_SWITCHED_IN();
2919 xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
2921 //Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
2922 //exit the function.
2923 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2924 vPortCPUReleaseMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2926 vPortCPUReleaseMutex( &xTaskQueueMutex );
2929 #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
2930 vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
2934 portEXIT_CRITICAL_NESTED(irqstate);
2936 /*-----------------------------------------------------------*/
2938 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
2940 TickType_t xTimeToWake;
2942 configASSERT( pxEventList );
2944 taskENTER_CRITICAL(&xTaskQueueMutex);
2946 /* Place the event list item of the TCB in the appropriate event list.
2947 This is placed in the list in priority order so the highest priority task
2948 is the first to be woken by the event. The queue that contains the event
2949 list is locked, preventing simultaneous access from interrupts. */
2950 vListInsert( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
2952 /* The task must be removed from from the ready list before it is added to
2953 the blocked list as the same list item is used for both lists. Exclusive
2954 access to the ready lists guaranteed because the scheduler is locked. */
2955 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
2957 /* The current task must be in a ready list, so there is no need to
2958 check, and the port reset macro can be called directly. */
2959 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
2963 mtCOVERAGE_TEST_MARKER();
2966 #if ( INCLUDE_vTaskSuspend == 1 )
2968 if( xTicksToWait == portMAX_DELAY )
2970 /* Add the task to the suspended task list instead of a delayed task
2971 list to ensure the task is not woken by a timing event. It will
2972 block indefinitely. */
2973 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
2974 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
2978 /* Calculate the time at which the task should be woken if the event
2979 does not occur. This may overflow but this doesn't matter, the
2980 scheduler will handle it. */
2981 xTimeToWake = xTickCount + xTicksToWait;
2982 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
2985 #else /* INCLUDE_vTaskSuspend */
2987 /* Calculate the time at which the task should be woken if the event does
2988 not occur. This may overflow but this doesn't matter, the scheduler
2990 xTimeToWake = xTickCount + xTicksToWait;
2991 prvAddCurrentTaskToDelayedList( xTimeToWake );
2993 #endif /* INCLUDE_vTaskSuspend */
2995 taskEXIT_CRITICAL(&xTaskQueueMutex);
2998 /*-----------------------------------------------------------*/
3000 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
3002 TickType_t xTimeToWake;
3004 configASSERT( pxEventList );
3006 taskENTER_CRITICAL(&xTaskQueueMutex);
3008 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3009 the event groups implementation. */
3010 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 );
3012 /* Store the item value in the event list item. It is safe to access the
3013 event list item here as interrupts won't access the event list item of a
3014 task that is not in the Blocked state. */
3015 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3017 /* Place the event list item of the TCB at the end of the appropriate event
3018 list. It is safe to access the event list here because it is part of an
3019 event group implementation - and interrupts don't access event groups
3020 directly (instead they access them indirectly by pending function calls to
3022 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
3024 /* The task must be removed from the ready list before it is added to the
3025 blocked list. Exclusive access can be assured to the ready list as the
3026 scheduler is locked. */
3027 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
3029 /* The current task must be in a ready list, so there is no need to
3030 check, and the port reset macro can be called directly. */
3031 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
3035 mtCOVERAGE_TEST_MARKER();
3038 #if ( INCLUDE_vTaskSuspend == 1 )
3040 if( xTicksToWait == portMAX_DELAY )
3042 /* Add the task to the suspended task list instead of a delayed task
3043 list to ensure it is not woken by a timing event. It will block
3045 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
3049 /* Calculate the time at which the task should be woken if the event
3050 does not occur. This may overflow but this doesn't matter, the
3051 kernel will manage it correctly. */
3052 xTimeToWake = xTickCount + xTicksToWait;
3053 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
3056 #else /* INCLUDE_vTaskSuspend */
3058 /* Calculate the time at which the task should be woken if the event does
3059 not occur. This may overflow but this doesn't matter, the kernel
3060 will manage it correctly. */
3061 xTimeToWake = xTickCount + xTicksToWait;
3062 prvAddCurrentTaskToDelayedList( xTimeToWake );
3064 #endif /* INCLUDE_vTaskSuspend */
3066 taskEXIT_CRITICAL(&xTaskQueueMutex);
3068 /*-----------------------------------------------------------*/
3070 #if configUSE_TIMERS == 1
3072 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, const TickType_t xTicksToWait )
3074 TickType_t xTimeToWake;
3076 taskENTER_CRITICAL(&xTaskQueueMutex);
3077 configASSERT( pxEventList );
3079 /* This function should not be called by application code hence the
3080 'Restricted' in its name. It is not part of the public API. It is
3081 designed for use by kernel code, and has special calling requirements -
3082 it should be called from a critical section. */
3085 /* Place the event list item of the TCB in the appropriate event list.
3086 In this case it is assume that this is the only task that is going to
3087 be waiting on this event list, so the faster vListInsertEnd() function
3088 can be used in place of vListInsert. */
3089 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
3091 /* We must remove this task from the ready list before adding it to the
3092 blocked list as the same list item is used for both lists. This
3093 function is called form a critical section. */
3094 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
3096 /* The current task must be in a ready list, so there is no need to
3097 check, and the port reset macro can be called directly. */
3098 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
3102 mtCOVERAGE_TEST_MARKER();
3105 /* Calculate the time at which the task should be woken if the event does
3106 not occur. This may overflow but this doesn't matter. */
3107 xTimeToWake = xTickCount + xTicksToWait;
3109 traceTASK_DELAY_UNTIL();
3110 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
3111 taskEXIT_CRITICAL(&xTaskQueueMutex);
3115 #endif /* configUSE_TIMERS */
3116 /*-----------------------------------------------------------*/
3118 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3120 TCB_t *pxUnblockedTCB;
3122 BaseType_t xTaskCanBeReady;
3123 UBaseType_t i, uxTargetCPU;
3125 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3126 called from a critical section within an ISR. */
3127 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
3128 /* The event list is sorted in priority order, so the first in the list can
3129 be removed as it is known to be the highest priority. Remove the TCB from
3130 the delayed list, and add it to the ready list.
3132 If an event is for a queue that is locked then this function will never
3133 get called - the lock count on the queue will get modified instead. This
3134 means exclusive access to the event list is guaranteed here.
3136 This function assumes that a check has already been made to ensure that
3137 pxEventList is not empty. */
3138 if ( ( listLIST_IS_EMPTY( pxEventList ) ) == pdFALSE ) {
3139 pxUnblockedTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
3140 configASSERT( pxUnblockedTCB );
3141 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3143 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3147 /* Determine if the task can possibly be run on either CPU now, either because the scheduler
3148 the task is pinned to is running or because a scheduler is running on any CPU. */
3149 xTaskCanBeReady = pdFALSE;
3150 if ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) {
3151 uxTargetCPU = xPortGetCoreID();
3152 for (i = 0; i < portNUM_PROCESSORS; i++) {
3153 if ( uxSchedulerSuspended[ i ] == ( UBaseType_t ) pdFALSE ) {
3154 xTaskCanBeReady = pdTRUE;
3159 uxTargetCPU = pxUnblockedTCB->xCoreID;
3160 xTaskCanBeReady = uxSchedulerSuspended[ uxTargetCPU ] == ( UBaseType_t ) pdFALSE;
3164 if( xTaskCanBeReady )
3166 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3167 prvAddTaskToReadyList( pxUnblockedTCB );
3171 /* The delayed and ready lists cannot be accessed, so hold this task
3172 pending until the scheduler is resumed on this CPU. */
3173 vListInsertEnd( &( xPendingReadyList[ uxTargetCPU ] ), &( pxUnblockedTCB->xEventListItem ) );
3176 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3178 /* Return true if the task removed from the event list has a higher
3179 priority than the calling task. This allows the calling task to know if
3180 it should force a context switch now. */
3183 /* Mark that a yield is pending in case the user is not using the
3184 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3185 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3187 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3189 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3197 #if( configUSE_TICKLESS_IDLE == 1 )
3199 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3200 might be set to the blocked task's time out time. If the task is
3201 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3202 normally left unchanged, because it is automatically get reset to a new
3203 value when the tick count equals xNextTaskUnblockTime. However if
3204 tickless idling is used it might be more important to enter sleep mode
3205 at the earliest possible time - so reset xNextTaskUnblockTime here to
3206 ensure it is updated at the earliest possible time. */
3207 prvResetNextTaskUnblockTime();
3210 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3214 /*-----------------------------------------------------------*/
3216 BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3218 TCB_t *pxUnblockedTCB;
3221 taskENTER_CRITICAL(&xTaskQueueMutex);
3222 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3223 the event flags implementation. */
3224 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != pdFALSE );
3226 /* Store the new item value in the event list. */
3227 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3229 /* Remove the event list form the event flag. Interrupts do not access
3231 pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem );
3232 configASSERT( pxUnblockedTCB );
3233 ( void ) uxListRemove( pxEventListItem );
3235 /* Remove the task from the delayed list and add it to the ready list. The
3236 scheduler is suspended so interrupts will not be accessing the ready
3238 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3239 prvAddTaskToReadyList( pxUnblockedTCB );
3241 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3243 /* Return true if the task removed from the event list has
3244 a higher priority than the calling task. This allows
3245 the calling task to know if it should force a context
3249 /* Mark that a yield is pending in case the user is not using the
3250 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3251 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3253 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3255 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3263 taskEXIT_CRITICAL(&xTaskQueueMutex);
3266 /*-----------------------------------------------------------*/
3268 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3270 configASSERT( pxTimeOut );
3271 pxTimeOut->xOverflowCount = xNumOfOverflows;
3272 pxTimeOut->xTimeOnEntering = xTickCount;
3274 /*-----------------------------------------------------------*/
3276 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3280 configASSERT( pxTimeOut );
3281 configASSERT( pxTicksToWait );
3283 taskENTER_CRITICAL(&xTickCountMutex);
3285 /* Minor optimisation. The tick count cannot change in this block. */
3286 const TickType_t xConstTickCount = xTickCount;
3288 #if ( INCLUDE_vTaskSuspend == 1 )
3289 /* If INCLUDE_vTaskSuspend is set to 1 and the block time specified is
3290 the maximum block time then the task should block indefinitely, and
3291 therefore never time out. */
3292 if( *pxTicksToWait == portMAX_DELAY )
3296 else /* We are not blocking indefinitely, perform the checks below. */
3299 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3301 /* The tick count is greater than the time at which vTaskSetTimeout()
3302 was called, but has also overflowed since vTaskSetTimeOut() was called.
3303 It must have wrapped all the way around and gone past us again. This
3304 passed since vTaskSetTimeout() was called. */
3307 else if( ( xConstTickCount - pxTimeOut->xTimeOnEntering ) < *pxTicksToWait )
3309 /* Not a genuine timeout. Adjust parameters for time remaining. */
3310 *pxTicksToWait -= ( xConstTickCount - pxTimeOut->xTimeOnEntering );
3311 vTaskSetTimeOutState( pxTimeOut );
3319 taskEXIT_CRITICAL(&xTickCountMutex);
3323 /*-----------------------------------------------------------*/
3325 void vTaskMissedYield( void )
3327 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3329 /*-----------------------------------------------------------*/
3331 #if ( configUSE_TRACE_FACILITY == 1 )
3333 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3335 UBaseType_t uxReturn;
3340 pxTCB = ( TCB_t * ) xTask;
3341 uxReturn = pxTCB->uxTaskNumber;
3351 #endif /* configUSE_TRACE_FACILITY */
3352 /*-----------------------------------------------------------*/
3354 #if ( configUSE_TRACE_FACILITY == 1 )
3356 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3362 pxTCB = ( TCB_t * ) xTask;
3363 pxTCB->uxTaskNumber = uxHandle;
3367 #endif /* configUSE_TRACE_FACILITY */
3370 * -----------------------------------------------------------
3372 * ----------------------------------------------------------
3374 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3375 * language extensions. The equivalent prototype for this function is:
3377 * void prvIdleTask( void *pvParameters );
3380 static portTASK_FUNCTION( prvIdleTask, pvParameters )
3382 /* Stop warnings. */
3383 ( void ) pvParameters;
3387 /* See if any tasks have been deleted. */
3388 prvCheckTasksWaitingTermination();
3390 #if ( configUSE_PREEMPTION == 0 )
3392 /* If we are not using preemption we keep forcing a task switch to
3393 see if any other task has become available. If we are using
3394 preemption we don't need to do this as any task becoming available
3395 will automatically get the processor anyway. */
3398 #endif /* configUSE_PREEMPTION */
3400 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3402 /* When using preemption tasks of equal priority will be
3403 timesliced. If a task that is sharing the idle priority is ready
3404 to run then the idle task should yield before the end of the
3407 A critical region is not required here as we are just reading from
3408 the list, and an occasional incorrect value will not matter. If
3409 the ready list at the idle priority contains more than one task
3410 then a task other than the idle task is ready to execute. */
3411 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3417 mtCOVERAGE_TEST_MARKER();
3420 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3422 #if ( configUSE_IDLE_HOOK == 1 )
3424 extern void vApplicationIdleHook( void );
3426 /* Call the user defined function from within the idle task. This
3427 allows the application designer to add background functionality
3428 without the overhead of a separate task.
3429 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3430 CALL A FUNCTION THAT MIGHT BLOCK. */
3431 vApplicationIdleHook();
3433 #endif /* configUSE_IDLE_HOOK */
3435 /* Call the esp-idf hook system */
3436 esp_vApplicationIdleHook();
3440 /* This conditional compilation should use inequality to 0, not equality
3441 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3442 user defined low power mode implementations require
3443 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3444 #if ( configUSE_TICKLESS_IDLE != 0 )
3446 TickType_t xExpectedIdleTime;
3447 BaseType_t xEnteredSleep = pdFALSE;
3449 /* It is not desirable to suspend then resume the scheduler on
3450 each iteration of the idle task. Therefore, a preliminary
3451 test of the expected idle time is performed without the
3452 scheduler suspended. The result here is not necessarily
3454 xExpectedIdleTime = prvGetExpectedIdleTime();
3456 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3458 taskENTER_CRITICAL(&xTaskQueueMutex);
3460 /* Now the scheduler is suspended, the expected idle
3461 time can be sampled again, and this time its value can
3463 configASSERT( xNextTaskUnblockTime >= xTickCount );
3464 xExpectedIdleTime = prvGetExpectedIdleTime();
3466 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3468 traceLOW_POWER_IDLE_BEGIN();
3469 xEnteredSleep = portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3470 traceLOW_POWER_IDLE_END();
3474 mtCOVERAGE_TEST_MARKER();
3477 taskEXIT_CRITICAL(&xTaskQueueMutex);
3481 mtCOVERAGE_TEST_MARKER();
3483 /* It might be possible to enter tickless idle again, so skip
3484 * the fallback sleep hook if tickless idle was successful
3486 if ( !xEnteredSleep )
3488 esp_vApplicationWaitiHook();
3492 esp_vApplicationWaitiHook();
3493 #endif /* configUSE_TICKLESS_IDLE */
3496 /*-----------------------------------------------------------*/
3498 #if configUSE_TICKLESS_IDLE != 0
3500 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3502 eSleepModeStatus eReturn = eStandardSleep;
3503 taskENTER_CRITICAL(&xTaskQueueMutex);
3505 if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
3507 /* A task was made ready while the scheduler was suspended. */
3508 eReturn = eAbortSleep;
3510 else if( xYieldPending[ xPortGetCoreID() ] != pdFALSE )
3512 /* A yield was pended while the scheduler was suspended. */
3513 eReturn = eAbortSleep;
3517 #if configUSE_TIMERS == 0
3519 /* The idle task exists in addition to the application tasks. */
3520 const UBaseType_t uxNonApplicationTasks = 1;
3522 /* If timers are not being used and all the tasks are in the
3523 suspended list (which might mean they have an infinite block
3524 time rather than actually being suspended) then it is safe to
3525 turn all clocks off and just wait for external interrupts. */
3526 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3528 eReturn = eNoTasksWaitingTimeout;
3532 mtCOVERAGE_TEST_MARKER();
3535 #endif /* configUSE_TIMERS */
3537 taskEXIT_CRITICAL(&xTaskQueueMutex);
3541 #endif /* configUSE_TICKLESS_IDLE */
3542 /*-----------------------------------------------------------*/
3544 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3546 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3548 void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue , TlsDeleteCallbackFunction_t xDelCallback)
3552 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3554 taskENTER_CRITICAL(&xTaskQueueMutex);
3555 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3556 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3557 pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
3558 taskEXIT_CRITICAL(&xTaskQueueMutex);
3562 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3564 vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, (TlsDeleteCallbackFunction_t)NULL );
3569 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3573 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3575 taskENTER_CRITICAL(&xTaskQueueMutex);
3576 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3577 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3578 taskEXIT_CRITICAL(&xTaskQueueMutex);
3581 #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
3583 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3584 /*-----------------------------------------------------------*/
3586 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3588 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3590 void *pvReturn = NULL;
3593 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3595 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3596 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3606 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3609 #if ( portUSING_MPU_WRAPPERS == 1 )
3610 /* ToDo: Check for multicore */
3611 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3615 UNTESTED_FUNCTION();
3616 /* If null is passed in here then we are deleting ourselves. */
3617 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3619 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3622 #endif /* portUSING_MPU_WRAPPERS */
3623 /*-----------------------------------------------------------*/
3625 static void prvInitialiseTaskLists( void )
3627 UBaseType_t uxPriority;
3629 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3631 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3634 vListInitialise( &xDelayedTaskList1 );
3635 vListInitialise( &xDelayedTaskList2 );
3636 vListInitialise( &xPendingReadyList[ 0 ] );
3637 if (portNUM_PROCESSORS == 2) {
3638 vListInitialise( &xPendingReadyList[ 1 ] );
3641 #if ( INCLUDE_vTaskDelete == 1 )
3643 vListInitialise( &xTasksWaitingTermination );
3645 #endif /* INCLUDE_vTaskDelete */
3647 #if ( INCLUDE_vTaskSuspend == 1 )
3649 vListInitialise( &xSuspendedTaskList );
3651 #endif /* INCLUDE_vTaskSuspend */
3653 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3655 pxDelayedTaskList = &xDelayedTaskList1;
3656 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3658 /*-----------------------------------------------------------*/
3660 static void prvCheckTasksWaitingTermination( void )
3662 #if ( INCLUDE_vTaskDelete == 1 )
3664 BaseType_t xListIsEmpty;
3665 int core = xPortGetCoreID();
3667 /* ucTasksDeleted is used to prevent vTaskSuspendAll() being called
3668 too often in the idle task. */
3669 while(uxTasksDeleted > ( UBaseType_t ) 0U )
3671 TCB_t *pxTCB = NULL;
3673 taskENTER_CRITICAL(&xTaskQueueMutex);
3675 xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
3676 if( xListIsEmpty == pdFALSE )
3678 /* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
3679 be called on the core the process is pinned on, if any */
3680 ListItem_t *target = listGET_HEAD_ENTRY(&xTasksWaitingTermination);
3681 for( ; target != listGET_END_MARKER(&xTasksWaitingTermination); target = listGET_NEXT(target) ){ //Walk the list
3682 TCB_t *tgt_tcb = ( TCB_t * )listGET_LIST_ITEM_OWNER(target);
3683 int affinity = tgt_tcb->xCoreID;
3684 //Self deleting tasks are added to Termination List before they switch context. Ensure they aren't still currently running
3685 if( pxCurrentTCB[core] == tgt_tcb || (portNUM_PROCESSORS > 1 && pxCurrentTCB[!core] == tgt_tcb) ){
3686 continue; //Can't free memory of task that is still running
3688 if(affinity == core || affinity == tskNO_AFFINITY){ //Find first item not pinned to other core
3694 ( void ) uxListRemove( target ); //Remove list item from list
3695 --uxCurrentNumberOfTasks;
3700 taskEXIT_CRITICAL(&xTaskQueueMutex); //Need to call deletion callbacks outside critical section
3702 if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
3703 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3704 prvDeleteTLS( pxTCB );
3706 prvDeleteTCB( pxTCB );
3710 mtCOVERAGE_TEST_MARKER();
3711 break; //No TCB found that could be freed by this core, break out of loop
3715 #endif /* vTaskDelete */
3717 /*-----------------------------------------------------------*/
3719 //This should be called with the taskqueuemutex grabbed. -JD
3720 static void prvAddCurrentTaskToDelayedList( const BaseType_t xCoreID, const TickType_t xTimeToWake )
3722 /* The list item will be inserted in wake time order. */
3723 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xGenericListItem ), xTimeToWake );
3725 if( xTimeToWake < xTickCount )
3727 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
3728 /* Wake time has overflowed. Place this item in the overflow list. */
3729 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3733 traceMOVED_TASK_TO_DELAYED_LIST();
3734 /* The wake time has not overflowed, so the current block list is used. */
3735 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3737 /* If the task entering the blocked state was placed at the head of the
3738 list of blocked tasks then xNextTaskUnblockTime needs to be updated
3740 if( xTimeToWake < xNextTaskUnblockTime )
3742 xNextTaskUnblockTime = xTimeToWake;
3746 mtCOVERAGE_TEST_MARKER();
3750 /*-----------------------------------------------------------*/
3752 BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
3756 pxTCB = prvGetTCBFromHandle( xTask );
3758 return pxTCB->xCoreID;
3760 /*-----------------------------------------------------------*/
3763 #if ( configUSE_TRACE_FACILITY == 1 )
3765 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3767 volatile TCB_t *pxNextTCB, *pxFirstTCB;
3768 UBaseType_t uxTask = 0;
3770 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3772 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
3774 /* Populate an TaskStatus_t structure within the
3775 pxTaskStatusArray array for each task that is referenced from
3776 pxList. See the definition of TaskStatus_t in task.h for the
3777 meaning of each TaskStatus_t structure member. */
3780 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
3782 pxTaskStatusArray[ uxTask ].xHandle = ( TaskHandle_t ) pxNextTCB;
3783 pxTaskStatusArray[ uxTask ].pcTaskName = ( const char * ) &( pxNextTCB->pcTaskName [ 0 ] );
3784 pxTaskStatusArray[ uxTask ].xTaskNumber = pxNextTCB->uxTCBNumber;
3785 pxTaskStatusArray[ uxTask ].eCurrentState = eState;
3786 pxTaskStatusArray[ uxTask ].uxCurrentPriority = pxNextTCB->uxPriority;
3787 pxTaskStatusArray[ uxTask ].xCoreID = pxNextTCB->xCoreID;
3789 #if ( INCLUDE_vTaskSuspend == 1 )
3791 /* If the task is in the suspended list then there is a chance
3792 it is actually just blocked indefinitely - so really it should
3793 be reported as being in the Blocked state. */
3794 if( eState == eSuspended )
3796 if( listLIST_ITEM_CONTAINER( &( pxNextTCB->xEventListItem ) ) != NULL )
3798 pxTaskStatusArray[ uxTask ].eCurrentState = eBlocked;
3802 #endif /* INCLUDE_vTaskSuspend */
3804 #if ( configUSE_MUTEXES == 1 )
3806 pxTaskStatusArray[ uxTask ].uxBasePriority = pxNextTCB->uxBasePriority;
3810 pxTaskStatusArray[ uxTask ].uxBasePriority = 0;
3814 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3816 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = pxNextTCB->ulRunTimeCounter;
3820 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = 0;
3824 #if ( portSTACK_GROWTH > 0 )
3826 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxEndOfStack );
3830 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxStack );
3836 } while( pxNextTCB != pxFirstTCB );
3840 mtCOVERAGE_TEST_MARKER();
3846 #endif /* configUSE_TRACE_FACILITY */
3847 /*-----------------------------------------------------------*/
3849 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
3851 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3853 uint32_t ulCount = 0U;
3855 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3857 pucStackByte -= portSTACK_GROWTH;
3861 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3863 return ( uint32_t ) ulCount;
3866 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */
3867 /*-----------------------------------------------------------*/
3869 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3871 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3874 uint8_t *pucEndOfStack;
3875 UBaseType_t uxReturn;
3877 pxTCB = prvGetTCBFromHandle( xTask );
3879 #if portSTACK_GROWTH < 0
3881 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3885 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3889 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
3894 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3895 /*-----------------------------------------------------------*/
3897 #if (INCLUDE_pxTaskGetStackStart == 1)
3899 uint8_t* pxTaskGetStackStart( TaskHandle_t xTask)
3904 pxTCB = prvGetTCBFromHandle( xTask );
3905 uxReturn = (uint8_t*)pxTCB->pxStack;
3910 #endif /* INCLUDE_pxTaskGetStackStart */
3911 /*-----------------------------------------------------------*/
3913 #if ( INCLUDE_vTaskDelete == 1 )
3915 static void prvDeleteTCB( TCB_t *pxTCB )
3917 /* This call is required for any port specific cleanup related to task.
3918 It must be above the vPortFree() calls. */
3919 portCLEAN_UP_TCB( pxTCB );
3921 /* Free up the memory allocated by the scheduler for the task. It is up
3922 to the task to free any memory allocated at the application level. */
3923 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3925 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3927 #endif /* configUSE_NEWLIB_REENTRANT */
3929 #if ( portUSING_MPU_WRAPPERS == 1 )
3930 vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
3933 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3935 /* The task can only have been allocated dynamically - free both
3936 the stack and TCB. */
3937 vPortFreeAligned( pxTCB->pxStack );
3940 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
3942 /* The task could have been allocated statically or dynamically, so
3943 check what was statically allocated before trying to free the
3945 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3947 /* Both the stack and TCB were allocated dynamically, so both
3949 vPortFreeAligned( pxTCB->pxStack );
3952 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3954 /* Only the stack was statically allocated, so the TCB is the
3955 only memory that must be freed. */
3960 /* Neither the stack nor the TCB were allocated dynamically, so
3961 nothing needs to be freed. */
3962 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
3963 mtCOVERAGE_TEST_MARKER();
3966 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3969 #endif /* INCLUDE_vTaskDelete */
3970 /*-----------------------------------------------------------*/
3972 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3974 static void prvDeleteTLS( TCB_t *pxTCB )
3976 configASSERT( pxTCB );
3977 for( int x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
3979 if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL) //If del cb is set
3981 pxTCB->pvThreadLocalStoragePointersDelCallback[ x ](x, pxTCB->pvThreadLocalStoragePointers[ x ]); //Call del cb
3986 #endif /* ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) */
3987 /*-----------------------------------------------------------*/
3989 static void prvResetNextTaskUnblockTime( void )
3993 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3995 /* The new current delayed list is empty. Set
3996 xNextTaskUnblockTime to the maximum possible value so it is
3997 extremely unlikely that the
3998 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
3999 there is an item in the delayed list. */
4000 xNextTaskUnblockTime = portMAX_DELAY;
4004 /* The new current delayed list is not empty, get the value of
4005 the item at the head of the delayed list. This is the time at
4006 which the task at the head of the delayed list should be removed
4007 from the Blocked state. */
4008 ( pxTCB ) = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
4009 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xGenericListItem ) );
4012 /*-----------------------------------------------------------*/
4014 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
4016 TaskHandle_t xTaskGetCurrentTaskHandle( void )
4018 TaskHandle_t xReturn;
4021 state = portENTER_CRITICAL_NESTED();
4022 xReturn = pxCurrentTCB[ xPortGetCoreID() ];
4023 portEXIT_CRITICAL_NESTED(state);
4028 TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid )
4030 TaskHandle_t xReturn=NULL;
4032 //Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock.
4033 if (cpuid < portNUM_PROCESSORS) {
4034 xReturn = pxCurrentTCB[ cpuid ];
4041 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
4042 /*-----------------------------------------------------------*/
4044 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
4046 BaseType_t xTaskGetSchedulerState( void )
4051 state = portENTER_CRITICAL_NESTED();
4052 if( xSchedulerRunning == pdFALSE )
4054 xReturn = taskSCHEDULER_NOT_STARTED;
4058 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
4060 xReturn = taskSCHEDULER_RUNNING;
4064 xReturn = taskSCHEDULER_SUSPENDED;
4067 portEXIT_CRITICAL_NESTED(state);
4072 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4073 /*-----------------------------------------------------------*/
4075 #if ( configUSE_MUTEXES == 1 )
4077 void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4079 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
4081 taskENTER_CRITICAL(&xTickCountMutex);
4082 /* If the mutex was given back by an interrupt while the queue was
4083 locked then the mutex holder might now be NULL. */
4084 if( pxMutexHolder != NULL )
4086 if( pxTCB->uxPriority < pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4088 taskENTER_CRITICAL(&xTaskQueueMutex);
4089 /* Adjust the mutex holder state to account for its new
4090 priority. Only reset the event list item value if the value is
4091 not being used for anything else. */
4092 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4094 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4098 mtCOVERAGE_TEST_MARKER();
4101 /* If the task being modified is in the ready state it will need to
4102 be moved into a new list. */
4103 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxTCB->uxPriority ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
4105 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4107 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4111 mtCOVERAGE_TEST_MARKER();
4114 /* Inherit the priority before being moved into the new list. */
4115 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4116 prvReaddTaskToReadyList( pxTCB );
4120 /* Just inherit the priority. */
4121 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4124 taskEXIT_CRITICAL(&xTaskQueueMutex);
4126 traceTASK_PRIORITY_INHERIT( pxTCB, pxCurrentTCB[ xPortGetCoreID() ]->uxPriority );
4130 mtCOVERAGE_TEST_MARKER();
4135 mtCOVERAGE_TEST_MARKER();
4138 taskEXIT_CRITICAL(&xTickCountMutex);
4142 #endif /* configUSE_MUTEXES */
4143 /*-----------------------------------------------------------*/
4145 #if ( configUSE_MUTEXES == 1 )
4147 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4149 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
4150 BaseType_t xReturn = pdFALSE;
4151 taskENTER_CRITICAL(&xTickCountMutex);
4153 if( pxMutexHolder != NULL )
4155 configASSERT( pxTCB->uxMutexesHeld );
4156 ( pxTCB->uxMutexesHeld )--;
4158 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4160 /* Only disinherit if no other mutexes are held. */
4161 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4163 taskENTER_CRITICAL(&xTaskQueueMutex);
4164 /* A task can only have an inhertied priority if it holds
4165 the mutex. If the mutex is held by a task then it cannot be
4166 given from an interrupt, and if a mutex is given by the
4167 holding task then it must be the running state task. Remove
4168 the holding task from the ready list. */
4169 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4171 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4175 mtCOVERAGE_TEST_MARKER();
4178 /* Disinherit the priority before adding the task into the
4180 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4181 pxTCB->uxPriority = pxTCB->uxBasePriority;
4183 /* Reset the event list item value. It cannot be in use for
4184 any other purpose if this task is running, and it must be
4185 running to give back the mutex. */
4186 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4187 prvReaddTaskToReadyList( pxTCB );
4189 /* Return true to indicate that a context switch is required.
4190 This is only actually required in the corner case whereby
4191 multiple mutexes were held and the mutexes were given back
4192 in an order different to that in which they were taken.
4193 If a context switch did not occur when the first mutex was
4194 returned, even if a task was waiting on it, then a context
4195 switch should occur when the last mutex is returned whether
4196 a task is waiting on it or not. */
4198 taskEXIT_CRITICAL(&xTaskQueueMutex);
4202 mtCOVERAGE_TEST_MARKER();
4207 mtCOVERAGE_TEST_MARKER();
4212 mtCOVERAGE_TEST_MARKER();
4215 taskEXIT_CRITICAL(&xTickCountMutex);
4219 #endif /* configUSE_MUTEXES */
4220 /*-----------------------------------------------------------*/
4222 /* For multicore, this assumes the vPortCPUAquireMutex is recursive, that is, it can be called multiple
4223 times and the release call will have to be called as many times for the mux to unlock. */
4225 /* Gotcha (which seems to be deliberate in FreeRTOS, according to
4226 http://www.freertos.org/FreeRTOS_Support_Forum_Archive/December_2012/freertos_PIC32_Bug_-_vTaskEnterCritical_6400806.html
4227 ) is that calling vTaskEnterCritical followed by vTaskExitCritical will leave the interrupts DISABLED when the scheduler
4228 is not running. Re-enabling the scheduler will re-enable the interrupts instead.
4230 For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and portENTER_CRITICAL_ISR.
4233 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4235 #include "portmux_impl.h"
4237 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4238 void vTaskEnterCritical( portMUX_TYPE *mux, const char *function, int line )
4240 void vTaskEnterCritical( portMUX_TYPE *mux )
4243 BaseType_t oldInterruptLevel=0;
4244 BaseType_t schedulerRunning = xSchedulerRunning;
4245 if( schedulerRunning != pdFALSE )
4247 //Interrupts may already be disabled (because we're doing this recursively) but we can't get the interrupt level after
4248 //vPortCPUAquireMutex, because it also may mess with interrupts. Get it here first, then later figure out if we're nesting
4249 //and save for real there.
4250 oldInterruptLevel=portENTER_CRITICAL_NESTED();
4252 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4253 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT, function, line );
4255 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT );
4258 if( schedulerRunning != pdFALSE )
4260 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4261 BaseType_t newNesting = tcb->uxCriticalNesting + 1;
4262 tcb->uxCriticalNesting = newNesting;
4263 if( newNesting == 1 )
4265 //This is the first time we get called. Save original interrupt level.
4266 tcb->uxOldInterruptState = oldInterruptLevel;
4269 /* Original FreeRTOS comment, saved for reference:
4270 This is not the interrupt safe version of the enter critical
4271 function so assert() if it is being called from an interrupt
4272 context. Only API functions that end in "FromISR" can be used in an
4273 interrupt. Only assert if the critical nesting count is 1 to
4274 protect against recursive calls if the assert function also uses a
4275 critical section. */
4277 /* DISABLED in the esp32 port - because of SMP, For ESP32
4278 FreeRTOS, vTaskEnterCritical implements both
4279 portENTER_CRITICAL and portENTER_CRITICAL_ISR. vTaskEnterCritical
4280 has to be used in way more places than before, and some are called
4281 both from ISR as well as non-ISR code, thus we re-organized
4282 vTaskEnterCritical to also work in ISRs. */
4284 if( newNesting == 1 )
4286 portASSERT_IF_IN_ISR();
4293 mtCOVERAGE_TEST_MARKER();
4297 #endif /* portCRITICAL_NESTING_IN_TCB */
4298 /*-----------------------------------------------------------*/
4302 For ESP32 FreeRTOS, vTaskExitCritical implements both portEXIT_CRITICAL and portEXIT_CRITICAL_ISR.
4304 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4306 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4307 void vTaskExitCritical( portMUX_TYPE *mux, const char *function, int line )
4309 void vTaskExitCritical( portMUX_TYPE *mux )
4312 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4313 vPortCPUReleaseMutexIntsDisabled( mux, function, line );
4315 vPortCPUReleaseMutexIntsDisabled( mux );
4317 if( xSchedulerRunning != pdFALSE )
4319 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4320 BaseType_t nesting = tcb->uxCriticalNesting;
4324 tcb->uxCriticalNesting = nesting;
4328 portEXIT_CRITICAL_NESTED(tcb->uxOldInterruptState);
4332 mtCOVERAGE_TEST_MARKER();
4337 mtCOVERAGE_TEST_MARKER();
4342 mtCOVERAGE_TEST_MARKER();
4346 #endif /* portCRITICAL_NESTING_IN_TCB */
4347 /*-----------------------------------------------------------*/
4349 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4351 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4355 /* Start by copying the entire string. */
4356 strcpy( pcBuffer, pcTaskName );
4358 /* Pad the end of the string with spaces to ensure columns line up when
4360 for( x = strlen( pcBuffer ); x < ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4362 pcBuffer[ x ] = ' ';
4366 pcBuffer[ x ] = 0x00;
4368 /* Return the new end of string. */
4369 return &( pcBuffer[ x ] );
4372 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4373 /*-----------------------------------------------------------*/
4375 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4377 void vTaskList( char * pcWriteBuffer )
4379 TaskStatus_t *pxTaskStatusArray;
4380 volatile UBaseType_t uxArraySize, x;
4386 * This function is provided for convenience only, and is used by many
4387 * of the demo applications. Do not consider it to be part of the
4390 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4391 * uxTaskGetSystemState() output into a human readable table that
4392 * displays task names, states and stack usage.
4394 * vTaskList() has a dependency on the sprintf() C library function that
4395 * might bloat the code size, use a lot of stack, and provide different
4396 * results on different platforms. An alternative, tiny, third party,
4397 * and limited functionality implementation of sprintf() is provided in
4398 * many of the FreeRTOS/Demo sub-directories in a file called
4399 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4400 * snprintf() implementation!).
4402 * It is recommended that production systems call uxTaskGetSystemState()
4403 * directly to get access to raw stats data, rather than indirectly
4404 * through a call to vTaskList().
4408 /* Make sure the write buffer does not contain a string. */
4409 *pcWriteBuffer = 0x00;
4411 /* Take a snapshot of the number of tasks in case it changes while this
4412 function is executing. */
4413 uxArraySize = uxCurrentNumberOfTasks;
4415 /* Allocate an array index for each task. NOTE! if
4416 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4418 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4420 if( pxTaskStatusArray != NULL )
4422 /* Generate the (binary) data. */
4423 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
4425 /* Create a human readable table from the binary data. */
4426 for( x = 0; x < uxArraySize; x++ )
4428 switch( pxTaskStatusArray[ x ].eCurrentState )
4430 case eReady: cStatus = tskREADY_CHAR;
4433 case eBlocked: cStatus = tskBLOCKED_CHAR;
4436 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4439 case eDeleted: cStatus = tskDELETED_CHAR;
4442 default: /* Should not get here, but it is included
4443 to prevent static checking errors. */
4448 /* Write the task name to the string, padding with spaces so it
4449 can be printed in tabular form more easily. */
4450 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4452 /* Write the rest of the string. */
4453 #ifdef CONFIG_FREERTOS_VTASKLIST_INCLUDE_COREID
4454 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\t%hd\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber, ( int ) pxTaskStatusArray[ x ].xCoreID );
4456 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
4458 pcWriteBuffer += strlen( pcWriteBuffer );
4461 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4462 is 0 then vPortFree() will be #defined to nothing. */
4463 vPortFree( pxTaskStatusArray );
4467 mtCOVERAGE_TEST_MARKER();
4471 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4472 /*----------------------------------------------------------*/
4474 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4476 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4478 TaskStatus_t *pxTaskStatusArray;
4479 volatile UBaseType_t uxArraySize, x;
4480 uint32_t ulTotalTime, ulStatsAsPercentage;
4482 #if( configUSE_TRACE_FACILITY != 1 )
4484 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4491 * This function is provided for convenience only, and is used by many
4492 * of the demo applications. Do not consider it to be part of the
4495 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4496 * of the uxTaskGetSystemState() output into a human readable table that
4497 * displays the amount of time each task has spent in the Running state
4498 * in both absolute and percentage terms.
4500 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4501 * function that might bloat the code size, use a lot of stack, and
4502 * provide different results on different platforms. An alternative,
4503 * tiny, third party, and limited functionality implementation of
4504 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4505 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4506 * a full snprintf() implementation!).
4508 * It is recommended that production systems call uxTaskGetSystemState()
4509 * directly to get access to raw stats data, rather than indirectly
4510 * through a call to vTaskGetRunTimeStats().
4513 /* Make sure the write buffer does not contain a string. */
4514 *pcWriteBuffer = 0x00;
4516 /* Take a snapshot of the number of tasks in case it changes while this
4517 function is executing. */
4518 uxArraySize = uxCurrentNumberOfTasks;
4520 /* Allocate an array index for each task. NOTE! If
4521 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4523 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4525 if( pxTaskStatusArray != NULL )
4527 /* Generate the (binary) data. */
4528 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4530 /* For percentage calculations. */
4531 ulTotalTime /= 100UL;
4533 /* Avoid divide by zero errors. */
4534 if( ulTotalTime > 0 )
4536 /* Create a human readable table from the binary data. */
4537 for( x = 0; x < uxArraySize; x++ )
4539 /* What percentage of the total run time has the task used?
4540 This will always be rounded down to the nearest integer.
4541 ulTotalRunTimeDiv100 has already been divided by 100. */
4542 /* Also need to consider total run time of all */
4543 ulStatsAsPercentage = (pxTaskStatusArray[ x ].ulRunTimeCounter/portNUM_PROCESSORS)/ ulTotalTime;
4545 /* Write the task name to the string, padding with
4546 spaces so it can be printed in tabular form more
4548 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4550 if( ulStatsAsPercentage > 0UL )
4552 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4554 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4558 /* sizeof( int ) == sizeof( long ) so a smaller
4559 printf() library can be used. */
4560 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage );
4566 /* If the percentage is zero here then the task has
4567 consumed less than 1% of the total run time. */
4568 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4570 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4574 /* sizeof( int ) == sizeof( long ) so a smaller
4575 printf() library can be used. */
4576 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
4581 pcWriteBuffer += strlen( pcWriteBuffer );
4586 mtCOVERAGE_TEST_MARKER();
4589 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4590 is 0 then vPortFree() will be #defined to nothing. */
4591 vPortFree( pxTaskStatusArray );
4595 mtCOVERAGE_TEST_MARKER();
4599 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4600 /*-----------------------------------------------------------*/
4602 TickType_t uxTaskResetEventItemValue( void )
4604 TickType_t uxReturn;
4605 taskENTER_CRITICAL(&xTaskQueueMutex);
4606 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
4608 /* Reset the event list item to its normal value - so it can be used with
4609 queues and semaphores. */
4610 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4611 taskEXIT_CRITICAL(&xTaskQueueMutex);
4615 /*-----------------------------------------------------------*/
4617 #if ( configUSE_MUTEXES == 1 )
4619 void *pvTaskIncrementMutexHeldCount( void )
4623 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4624 then pxCurrentTCB will be NULL. */
4625 taskENTER_CRITICAL(&xTaskQueueMutex);
4626 if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
4628 ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
4630 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
4631 taskEXIT_CRITICAL(&xTaskQueueMutex);
4636 #endif /* configUSE_MUTEXES */
4637 /*-----------------------------------------------------------*/
4639 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4641 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4643 TickType_t xTimeToWake;
4646 taskENTER_CRITICAL(&xTaskQueueMutex);
4648 /* Only block if the notification count is not already non-zero. */
4649 if( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue == 0UL )
4651 /* Mark this task as waiting for a notification. */
4652 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4654 if( xTicksToWait > ( TickType_t ) 0 )
4656 /* The task is going to block. First it must be removed
4657 from the ready list. */
4658 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4660 /* The current task must be in a ready list, so there is
4661 no need to check, and the port reset macro can be called
4663 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4667 mtCOVERAGE_TEST_MARKER();
4670 #if ( INCLUDE_vTaskSuspend == 1 )
4672 if( xTicksToWait == portMAX_DELAY )
4674 /* Add the task to the suspended task list instead
4675 of a delayed task list to ensure the task is not
4676 woken by a timing event. It will block
4678 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4679 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4683 /* Calculate the time at which the task should be
4684 woken if no notification events occur. This may
4685 overflow but this doesn't matter, the scheduler will
4687 xTimeToWake = xTickCount + xTicksToWait;
4688 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4691 #else /* INCLUDE_vTaskSuspend */
4693 /* Calculate the time at which the task should be
4694 woken if the event does not occur. This may
4695 overflow but this doesn't matter, the scheduler will
4697 xTimeToWake = xTickCount + xTicksToWait;
4698 prvAddCurrentTaskToDelayedList( xTimeToWake );
4700 #endif /* INCLUDE_vTaskSuspend */
4702 /* All ports are written to allow a yield in a critical
4703 section (some will yield immediately, others wait until the
4704 critical section exits) - but it is not something that
4705 application code should ever do. */
4706 portYIELD_WITHIN_API();
4710 mtCOVERAGE_TEST_MARKER();
4715 mtCOVERAGE_TEST_MARKER();
4718 taskEXIT_CRITICAL(&xTaskQueueMutex);
4720 taskENTER_CRITICAL(&xTaskQueueMutex);
4722 ulReturn = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4724 if( ulReturn != 0UL )
4726 if( xClearCountOnExit != pdFALSE )
4728 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue = 0UL;
4732 ( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue )--;
4737 mtCOVERAGE_TEST_MARKER();
4740 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4742 taskEXIT_CRITICAL(&xTaskQueueMutex);
4747 #endif /* configUSE_TASK_NOTIFICATIONS */
4748 /*-----------------------------------------------------------*/
4750 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4752 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4754 TickType_t xTimeToWake;
4757 taskENTER_CRITICAL(&xTaskQueueMutex);
4759 /* Only block if a notification is not already pending. */
4760 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState != eNotified )
4762 /* Clear bits in the task's notification value as bits may get
4763 set by the notifying task or interrupt. This can be used to
4764 clear the value to zero. */
4765 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnEntry;
4767 /* Mark this task as waiting for a notification. */
4768 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4770 if( xTicksToWait > ( TickType_t ) 0 )
4772 /* The task is going to block. First it must be removed
4773 from the ready list. */
4774 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4776 /* The current task must be in a ready list, so there is
4777 no need to check, and the port reset macro can be called
4779 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4783 mtCOVERAGE_TEST_MARKER();
4786 #if ( INCLUDE_vTaskSuspend == 1 )
4788 if( xTicksToWait == portMAX_DELAY )
4790 /* Add the task to the suspended task list instead
4791 of a delayed task list to ensure the task is not
4792 woken by a timing event. It will block
4794 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4795 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4799 /* Calculate the time at which the task should be
4800 woken if no notification events occur. This may
4801 overflow but this doesn't matter, the scheduler will
4803 xTimeToWake = xTickCount + xTicksToWait;
4804 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4807 #else /* INCLUDE_vTaskSuspend */
4809 /* Calculate the time at which the task should be
4810 woken if the event does not occur. This may
4811 overflow but this doesn't matter, the scheduler will
4813 xTimeToWake = xTickCount + xTicksToWait;
4814 prvAddCurrentTaskToDelayedList( xTimeToWake );
4816 #endif /* INCLUDE_vTaskSuspend */
4818 /* All ports are written to allow a yield in a critical
4819 section (some will yield immediately, others wait until the
4820 critical section exits) - but it is not something that
4821 application code should ever do. */
4822 portYIELD_WITHIN_API();
4826 mtCOVERAGE_TEST_MARKER();
4831 mtCOVERAGE_TEST_MARKER();
4834 taskEXIT_CRITICAL(&xTaskQueueMutex);
4836 taskENTER_CRITICAL(&xTaskQueueMutex);
4838 if( pulNotificationValue != NULL )
4840 /* Output the current notification value, which may or may not
4842 *pulNotificationValue = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4845 /* If eNotifyValue is set then either the task never entered the
4846 blocked state (because a notification was already pending) or the
4847 task unblocked because of a notification. Otherwise the task
4848 unblocked because of a timeout. */
4849 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState == eWaitingNotification )
4851 /* A notification was not received. */
4856 /* A notification was already pending or a notification was
4857 received while the task was waiting. */
4858 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnExit;
4862 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4864 taskEXIT_CRITICAL(&xTaskQueueMutex);
4869 #endif /* configUSE_TASK_NOTIFICATIONS */
4870 /*-----------------------------------------------------------*/
4872 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4874 BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction )
4877 eNotifyValue eOriginalNotifyState;
4878 BaseType_t xReturn = pdPASS;
4880 configASSERT( xTaskToNotify );
4881 pxTCB = ( TCB_t * ) xTaskToNotify;
4883 taskENTER_CRITICAL(&xTaskQueueMutex);
4885 eOriginalNotifyState = pxTCB->eNotifyState;
4887 pxTCB->eNotifyState = eNotified;
4892 pxTCB->ulNotifiedValue |= ulValue;
4896 ( pxTCB->ulNotifiedValue )++;
4899 case eSetValueWithOverwrite :
4900 pxTCB->ulNotifiedValue = ulValue;
4903 case eSetValueWithoutOverwrite :
4904 if( eOriginalNotifyState != eNotified )
4906 pxTCB->ulNotifiedValue = ulValue;
4910 /* The value could not be written to the task. */
4916 /* The task is being notified without its notify value being
4922 /* If the task is in the blocked state specifically to wait for a
4923 notification then unblock it now. */
4924 if( eOriginalNotifyState == eWaitingNotification )
4926 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
4927 prvAddTaskToReadyList( pxTCB );
4929 /* The task should not have been on an event list. */
4930 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4932 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4934 /* The notified task has a priority above the currently
4935 executing task so a yield is required. */
4936 portYIELD_WITHIN_API();
4938 else if ( pxTCB->xCoreID != xPortGetCoreID() )
4940 taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
4944 mtCOVERAGE_TEST_MARKER();
4949 mtCOVERAGE_TEST_MARKER();
4952 taskEXIT_CRITICAL(&xTaskQueueMutex);
4957 #endif /* configUSE_TASK_NOTIFICATIONS */
4958 /*-----------------------------------------------------------*/
4960 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4962 BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken )
4965 eNotifyValue eOriginalNotifyState;
4966 BaseType_t xReturn = pdPASS;
4968 configASSERT( xTaskToNotify );
4970 pxTCB = ( TCB_t * ) xTaskToNotify;
4972 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
4975 eOriginalNotifyState = pxTCB->eNotifyState;
4977 pxTCB->eNotifyState = eNotified;
4982 pxTCB->ulNotifiedValue |= ulValue;
4986 ( pxTCB->ulNotifiedValue )++;
4989 case eSetValueWithOverwrite :
4990 pxTCB->ulNotifiedValue = ulValue;
4993 case eSetValueWithoutOverwrite :
4994 if( eOriginalNotifyState != eNotified )
4996 pxTCB->ulNotifiedValue = ulValue;
5000 /* The value could not be written to the task. */
5006 /* The task is being notified without its notify value being
5012 /* If the task is in the blocked state specifically to wait for a
5013 notification then unblock it now. */
5014 if( eOriginalNotifyState == eWaitingNotification )
5016 /* The task should not have been on an event list. */
5017 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5019 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
5021 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
5022 prvAddTaskToReadyList( pxTCB );
5026 /* The delayed and ready lists cannot be accessed, so hold
5027 this task pending until the scheduler is resumed. */
5028 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
5031 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5033 /* The notified task has a priority above the currently
5034 executing task so a yield is required. */
5035 if( pxHigherPriorityTaskWoken != NULL )
5037 *pxHigherPriorityTaskWoken = pdTRUE;
5040 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5042 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5046 mtCOVERAGE_TEST_MARKER();
5050 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5055 #endif /* configUSE_TASK_NOTIFICATIONS */
5056 /*-----------------------------------------------------------*/
5058 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5060 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
5063 eNotifyValue eOriginalNotifyState;
5065 configASSERT( xTaskToNotify );
5068 pxTCB = ( TCB_t * ) xTaskToNotify;
5070 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
5072 eOriginalNotifyState = pxTCB->eNotifyState;
5073 pxTCB->eNotifyState = eNotified;
5075 /* 'Giving' is equivalent to incrementing a count in a counting
5077 ( pxTCB->ulNotifiedValue )++;
5079 /* If the task is in the blocked state specifically to wait for a
5080 notification then unblock it now. */
5081 if( eOriginalNotifyState == eWaitingNotification )
5083 /* The task should not have been on an event list. */
5084 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5086 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
5088 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
5089 prvAddTaskToReadyList( pxTCB );
5093 /* The delayed and ready lists cannot be accessed, so hold
5094 this task pending until the scheduler is resumed. */
5095 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
5098 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5100 /* The notified task has a priority above the currently
5101 executing task so a yield is required. */
5102 if( pxHigherPriorityTaskWoken != NULL )
5104 *pxHigherPriorityTaskWoken = pdTRUE;
5107 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5109 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5113 mtCOVERAGE_TEST_MARKER();
5117 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5120 #endif /* configUSE_TASK_NOTIFICATIONS */
5122 #if ( configENABLE_TASK_SNAPSHOT == 1 )
5123 static void prvTaskGetSnapshot( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, TCB_t *pxTCB )
5125 if (pxTCB == NULL) {
5128 pxTaskSnapshotArray[ *uxTask ].pxTCB = pxTCB;
5129 pxTaskSnapshotArray[ *uxTask ].pxTopOfStack = (StackType_t *)pxTCB->pxTopOfStack;
5130 #if( portSTACK_GROWTH < 0 )
5132 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxEndOfStack;
5136 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxStack;
5142 static void prvTaskGetSnapshotsFromList( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, const UBaseType_t uxArraySize, List_t *pxList )
5144 TCB_t *pxNextTCB, *pxFirstTCB;
5146 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
5148 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
5151 if( *uxTask >= uxArraySize )
5154 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
5155 prvTaskGetSnapshot( pxTaskSnapshotArray, uxTask, pxNextTCB );
5156 } while( pxNextTCB != pxFirstTCB );
5160 mtCOVERAGE_TEST_MARKER();
5164 UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray, const UBaseType_t uxArraySize, UBaseType_t * const pxTcbSz )
5166 UBaseType_t uxTask = 0, i = 0;
5169 *pxTcbSz = sizeof(TCB_t);
5170 /* Fill in an TaskStatus_t structure with information on each
5171 task in the Ready state. */
5172 i = configMAX_PRIORITIES;
5176 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( pxReadyTasksLists[ i ] ) );
5177 } while( i > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
5179 /* Fill in an TaskStatus_t structure with information on each
5180 task in the Blocked state. */
5181 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxDelayedTaskList );
5182 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxOverflowDelayedTaskList );
5183 for (i = 0; i < portNUM_PROCESSORS; i++) {
5184 if( uxTask >= uxArraySize )
5186 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( xPendingReadyList[ i ] ) );
5189 #if( INCLUDE_vTaskDelete == 1 )
5191 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xTasksWaitingTermination );
5195 #if ( INCLUDE_vTaskSuspend == 1 )
5197 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xSuspendedTaskList );
5205 #ifdef FREERTOS_MODULE_TEST
5206 #include "tasks_test_access_functions.h"