2 FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
7 This file is part of the FreeRTOS distribution.
9 FreeRTOS is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License (version 2) as published by the
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
13 ***************************************************************************
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
15 >>! distribute a combined work that includes FreeRTOS without being !<<
16 >>! obliged to provide the source code for proprietary components !<<
17 >>! outside of the FreeRTOS kernel. !<<
18 ***************************************************************************
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
23 link: http://www.freertos.org/a00114.html
25 ***************************************************************************
27 * FreeRTOS provides completely free yet professionally developed, *
28 * robust, strictly quality controlled, supported, and cross *
29 * platform software that is more than just the market leader, it *
30 * is the industry's de facto standard. *
32 * Help yourself get started quickly while simultaneously helping *
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
34 * tutorial book, reference manual, or both: *
35 * http://www.FreeRTOS.org/Documentation *
37 ***************************************************************************
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
40 the FAQ page "My application does not run, what could be wrong?". Have you
41 defined configASSERT()?
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
44 embedded software for free we request you assist our global community by
45 participating in the support forum.
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
48 be as productive as possible as early as possible. Now you can receive
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
50 Ltd, and the world's leading authority on the world's leading RTOS.
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
61 licenses offer ticketed support, indemnification and commercial middleware.
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
64 engineered and independently SIL3 certified version for use in safety and
65 mission critical applications that require provable dependability.
70 /* Standard includes. */
74 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
75 all the API functions to use the MPU wrappers. That should only be done when
76 task.h is included from an application file. */
77 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
79 #include "rom/ets_sys.h"
80 #include "esp_newlib.h"
81 #include "esp_panic.h"
83 /* FreeRTOS includes. */
87 #include "StackMacros.h"
88 #include "portmacro.h"
91 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
92 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
93 header files above, but not in this file, in order to generate the correct
94 privileged Vs unprivileged linkage and placement. */
95 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
97 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
98 functions but without including stdio.h here. */
99 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
100 /* At the bottom of this file are two optional functions that can be used
101 to generate human readable text from the raw data generated by the
102 uxTaskGetSystemState() function. Note the formatting functions are provided
103 for convenience only, and are NOT considered part of the kernel. */
105 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
107 /* Sanity check the configuration. */
108 #if configUSE_TICKLESS_IDLE != 0
109 #if INCLUDE_vTaskSuspend != 1
110 #error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0
111 #endif /* INCLUDE_vTaskSuspend */
112 #endif /* configUSE_TICKLESS_IDLE */
115 * Defines the size, in bytes, of the stack allocated to the idle task.
117 #define tskIDLE_STACK_SIZE configIDLE_TASK_STACK_SIZE
119 #if( configUSE_PREEMPTION == 0 )
120 /* If the cooperative scheduler is being used then a yield should not be
121 performed just because a higher priority task has been woken. */
122 #define taskYIELD_IF_USING_PREEMPTION()
124 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
130 /* Value that can be assigned to the eNotifyState member of the TCB. */
133 eNotWaitingNotification = 0,
134 eWaitingNotification,
138 /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using
139 dynamically allocated RAM, in which case when any task is deleted it is known
140 that both the task's stack and TCB need to be freed. Sometimes the
141 FreeRTOSConfig.h settings only allow a task to be created using statically
142 allocated RAM, in which case when any task is deleted it is known that neither
143 the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h
144 settings allow a task to be created using either statically or dynamically
145 allocated RAM, in which case a member of the TCB is used to record whether the
146 stack and/or TCB were allocated statically or dynamically, so when a task is
147 deleted the RAM that was allocated dynamically is freed again and no attempt is
148 made to free the RAM that was allocated statically.
149 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a
150 task to be created using either statically or dynamically allocated RAM. Note
151 that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with
152 a statically allocated stack and a dynamically allocated TCB. */
153 #define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) || ( portUSING_MPU_WRAPPERS == 1 ) )
154 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
155 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
156 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
159 * Task control block. A task control block (TCB) is allocated for each task,
160 * and stores task state information, including a pointer to the task's context
161 * (the task's run time environment, including register values)
163 typedef struct tskTaskControlBlock
165 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
167 #if ( portUSING_MPU_WRAPPERS == 1 )
168 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
171 ListItem_t xGenericListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
172 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
173 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
174 StackType_t *pxStack; /*< Points to the start of the stack. */
175 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
176 BaseType_t xCoreID; /*< Core this task is pinned to */
177 /* If this moves around (other than pcTaskName size changes), please change the define in xtensa_vectors.S as well. */
178 #if ( portSTACK_GROWTH > 0 || configENABLE_TASK_SNAPSHOT == 1 )
179 StackType_t *pxEndOfStack; /*< Points to the end of the stack on architectures where the stack grows up from low memory. */
182 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
183 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
184 uint32_t uxOldInterruptState; /*< Interrupt state before the outer taskEnterCritical was called */
187 #if ( configUSE_TRACE_FACILITY == 1 )
188 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
189 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
192 #if ( configUSE_MUTEXES == 1 )
193 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
194 UBaseType_t uxMutexesHeld;
197 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
198 TaskHookFunction_t pxTaskTag;
201 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
202 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
203 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
204 TlsDeleteCallbackFunction_t pvThreadLocalStoragePointersDelCallback[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
208 #if ( configGENERATE_RUN_TIME_STATS == 1 )
209 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
212 #if ( configUSE_NEWLIB_REENTRANT == 1 )
213 /* Allocate a Newlib reent structure that is specific to this task.
214 Note Newlib support has been included by popular demand, but is not
215 used by the FreeRTOS maintainers themselves. FreeRTOS is not
216 responsible for resulting newlib operation. User must be familiar with
217 newlib and must provide system-wide implementations of the necessary
218 stubs. Be warned that (at the time of writing) the current newlib design
219 implements a system-wide malloc() that must be provided with locks. */
220 struct _reent xNewLib_reent;
223 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
224 volatile uint32_t ulNotifiedValue;
225 volatile eNotifyValue eNotifyState;
228 /* See the comments above the definition of
229 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
230 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
231 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
236 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
237 below to enable the use of older kernel aware debuggers. */
238 typedef tskTCB TCB_t;
240 #if __GNUC_PREREQ(4, 6)
241 _Static_assert(sizeof(StaticTask_t) == sizeof(TCB_t), "StaticTask_t != TCB_t");
245 * Some kernel aware debuggers require the data the debugger needs access to to
246 * be global, rather than file scope.
248 #ifdef portREMOVE_STATIC_QUALIFIER
252 /*lint -e956 A manual analysis and inspection has been used to determine which
253 static variables must be declared volatile. */
255 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB[ portNUM_PROCESSORS ] = { NULL };
257 /* Lists for ready and blocked tasks. --------------------*/
258 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
259 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
260 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
261 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
262 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
263 PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
265 #if ( INCLUDE_vTaskDelete == 1 )
267 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. Protected by xTaskQueueMutex.*/
268 PRIVILEGED_DATA static volatile UBaseType_t uxTasksDeleted = ( UBaseType_t ) 0U;
272 #if ( INCLUDE_vTaskSuspend == 1 )
274 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
278 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
280 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[portNUM_PROCESSORS] = {NULL}; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
284 /* Other file private variables. --------------------------------*/
285 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
286 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) 0U;
287 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
288 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
289 PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
290 PRIVILEGED_DATA static volatile BaseType_t xYieldPending[portNUM_PROCESSORS] = {pdFALSE};
291 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
292 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
293 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = portMAX_DELAY;
295 /* Context switches are held pending while the scheduler is suspended. Also,
296 interrupts must not manipulate the xGenericListItem of a TCB, or any of the
297 lists the xGenericListItem can be referenced from, if the scheduler is suspended.
298 If an interrupt needs to unblock a task while the scheduler is suspended then it
299 moves the task's event list item into the xPendingReadyList, ready for the
300 kernel to move the task from the pending ready list into the real ready list
301 when the scheduler is unsuspended. The pending ready list itself can only be
302 accessed from a critical section. */
303 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
305 /* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
306 that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
307 PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
308 PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
310 #if ( configGENERATE_RUN_TIME_STATS == 1 )
312 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime[portNUM_PROCESSORS] = {0U}; /*< Holds the value of a timer/counter the last time a task was switched in on a particular core. */
313 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
318 // per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules
319 // in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting
320 // for locks to be free or for host to read full trace buffer
321 PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ portNUM_PROCESSORS ] = { pdFALSE };
325 /* Debugging and trace facilities private variables and macros. ------------*/
328 * The value used to fill the stack of a task when the task is created. This
329 * is used purely for checking the high water mark for tasks.
331 #define tskSTACK_FILL_BYTE ( 0xa5U )
334 * Macros used by vListTask to indicate which state a task is in.
336 #define tskBLOCKED_CHAR ( 'B' )
337 #define tskREADY_CHAR ( 'R' )
338 #define tskDELETED_CHAR ( 'D' )
339 #define tskSUSPENDED_CHAR ( 'S' )
341 /*-----------------------------------------------------------*/
344 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
346 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
347 performed in a generic way that is not optimised to any particular
348 microcontroller architecture. */
350 /* uxTopReadyPriority holds the priority of the highest priority ready
352 #define taskRECORD_READY_PRIORITY( uxPriority ) \
354 if( ( uxPriority ) > uxTopReadyPriority ) \
356 uxTopReadyPriority = ( uxPriority ); \
358 } /* taskRECORD_READY_PRIORITY */
360 /*-----------------------------------------------------------*/
362 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
364 /* Find the highest priority queue that contains ready tasks. */ \
365 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopReadyPriority ] ) ) ) \
367 configASSERT( uxTopReadyPriority ); \
368 --uxTopReadyPriority; \
371 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
372 the same priority get an equal share of the processor time. */ \
373 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopReadyPriority ] ) ); \
374 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
376 /*-----------------------------------------------------------*/
378 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
379 they are only required when a port optimised method of task selection is
381 #define taskRESET_READY_PRIORITY( uxPriority )
382 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
384 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
386 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
387 performed in a way that is tailored to the particular microcontroller
388 architecture being used. */
390 /* A port optimised version is provided. Call the port defined macros. */
391 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
393 /*-----------------------------------------------------------*/
395 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
397 UBaseType_t uxTopPriority; \
399 /* Find the highest priority queue that contains ready tasks. */ \
400 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
401 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
402 listGET_OWNER_OF_NEXT_ENTRY( xTaskGetCurrentTaskHandle(), &( pxReadyTasksLists[ uxTopPriority ] ) ); \
403 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
405 /*-----------------------------------------------------------*/
407 /* A port optimised version is provided, call it only if the TCB being reset
408 is being referenced from a ready list. If it is referenced from a delayed
409 or suspended list then it won't be in a ready list. */
410 #define taskRESET_READY_PRIORITY( uxPriority ) \
412 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
414 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
418 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
420 /*-----------------------------------------------------------*/
422 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
424 #define taskSWITCH_DELAYED_LISTS() \
428 /* The delayed tasks list should be empty when the lists are switched. */ \
429 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
431 pxTemp = pxDelayedTaskList; \
432 pxDelayedTaskList = pxOverflowDelayedTaskList; \
433 pxOverflowDelayedTaskList = pxTemp; \
435 prvResetNextTaskUnblockTime(); \
438 /*-----------------------------------------------------------*/
441 * Place the task represented by pxTCB into the appropriate ready list for
442 * the task. It is inserted at the end of the list.
444 #define prvAddTaskToReadyList( pxTCB ) \
445 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
446 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
447 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
449 * Place the task represented by pxTCB which has been in a ready list before
450 * into the appropriate ready list for the task.
451 * It is inserted at the end of the list.
453 #define prvReaddTaskToReadyList( pxTCB ) \
454 traceREADDED_TASK_TO_READY_STATE( pxTCB ); \
455 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
456 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
457 /*-----------------------------------------------------------*/
459 #define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
462 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
463 * where NULL is used to indicate that the handle of the currently executing
464 * task should be used in place of the parameter. This macro simply checks to
465 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
467 /* ToDo: See if this still works for multicore. */
468 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? ( TCB_t * ) xTaskGetCurrentTaskHandle() : ( TCB_t * ) ( pxHandle ) )
470 /* The item value of the event list item is normally used to hold the priority
471 of the task to which it belongs (coded to allow it to be held in reverse
472 priority order). However, it is occasionally borrowed for other purposes. It
473 is important its value is not updated due to a task priority change while it is
474 being used for another purpose. The following bit definition is used to inform
475 the scheduler that the value should not be changed - in which case it is the
476 responsibility of whichever module is using the value to ensure it gets set back
477 to its original value when it is released. */
478 #if configUSE_16_BIT_TICKS == 1
479 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
481 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
484 /* Callback function prototypes. --------------------------*/
485 #if configCHECK_FOR_STACK_OVERFLOW > 0
486 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
489 #if configUSE_TICK_HOOK > 0
490 extern void vApplicationTickHook( void );
492 extern void esp_vApplicationTickHook( void );
494 #if portFIRST_TASK_HOOK
495 extern void vPortFirstTaskHook(TaskFunction_t taskfn);
499 /* File private functions. --------------------------------*/
502 * Utility task that simply returns pdTRUE if the task referenced by xTask is
503 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
504 * is in any other state.
506 * Caller must hold xTaskQueueMutex before calling this function.
508 #if ( INCLUDE_vTaskSuspend == 1 )
509 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
510 #endif /* INCLUDE_vTaskSuspend */
513 * Utility to ready all the lists used by the scheduler. This is called
514 * automatically upon the creation of the first task.
516 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
519 * The idle task, which as all tasks is implemented as a never ending loop.
520 * The idle task is automatically created and added to the ready lists upon
521 * creation of the first user task.
523 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
524 * language extensions. The equivalent prototype for this function is:
526 * void prvIdleTask( void *pvParameters );
529 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
532 * Utility to free all memory allocated by the scheduler to hold a TCB,
533 * including the stack pointed to by the TCB.
535 * This does not free memory allocated by the task itself (i.e. memory
536 * allocated by calls to pvPortMalloc from within the tasks application code).
538 #if ( INCLUDE_vTaskDelete == 1 )
540 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
545 * Used only by the idle task. This checks to see if anything has been placed
546 * in the list of tasks waiting to be deleted. If so the task is cleaned up
547 * and its TCB deleted.
549 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
552 * The currently executing task is entering the Blocked state. Add the task to
553 * either the current or the overflow delayed task list.
555 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTimeToWake ) PRIVILEGED_FUNCTION;
558 * Fills an TaskStatus_t structure with information on each task that is
559 * referenced from the pxList list (which may be a ready list, a delayed list,
560 * a suspended list, etc.).
562 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
563 * NORMAL APPLICATION CODE.
565 #if ( configUSE_TRACE_FACILITY == 1 )
567 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
572 * When a task is created, the stack of the task is filled with a known value.
573 * This function determines the 'high water mark' of the task stack by
574 * determining how much of the stack remains at the original preset value.
576 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
578 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
583 * Return the amount of time, in ticks, that will pass before the kernel will
584 * next move a task from the Blocked state to the Running state.
586 * This conditional compilation should use inequality to 0, not equality to 1.
587 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
588 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
589 * set to a value other than 1.
591 #if ( configUSE_TICKLESS_IDLE != 0 )
593 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
598 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
599 * will exit the Blocked state.
601 static void prvResetNextTaskUnblockTime( void );
603 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
606 * Helper function used to pad task names with spaces when printing out
607 * human readable tables of task information.
609 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName );
614 * Called after a Task_t structure has been allocated either statically or
615 * dynamically to fill in the structure's members.
617 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
618 const char * const pcName,
619 const uint32_t ulStackDepth,
620 void * const pvParameters,
621 UBaseType_t uxPriority,
622 TaskHandle_t * const pxCreatedTask,
624 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
627 * Called after a new task has been created and initialised to place the task
628 * under the control of the scheduler.
630 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
634 /*-----------------------------------------------------------*/
637 * This routine tries to send an interrupt to another core if needed to make it execute a task
638 * of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
639 * other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
640 * is possible that it is inaccurate because the other CPU just did a task switch, but in that case
641 * at most a superfluous interrupt is generated.
643 void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
645 TCB_t *curTCB = pxCurrentTCB[xCoreID];
648 if (xCoreID != tskNO_AFFINITY) {
649 if ( curTCB->uxPriority < uxPriority ) {
650 vPortYieldOtherCore( xCoreID );
655 /* The task has no affinity. See if we can find a CPU to put it on.*/
656 for (i=0; i<portNUM_PROCESSORS; i++) {
657 if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
659 vPortYieldOtherCore( i );
666 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
668 TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode,
669 const char * const pcName,
670 const uint32_t ulStackDepth,
671 void * const pvParameters,
672 UBaseType_t uxPriority,
673 StackType_t * const puxStackBuffer,
674 StaticTask_t * const pxTaskBuffer,
675 const BaseType_t xCoreID )
678 TaskHandle_t xReturn;
680 configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
681 configASSERT( portVALID_STACK_MEM(puxStackBuffer) );
682 configASSERT( (xCoreID>=0 && xCoreID<portNUM_PROCESSORS) || (xCoreID==tskNO_AFFINITY) );
684 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
686 /* The memory used for the task's TCB and stack are passed into this
687 function - use them. */
688 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
689 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
691 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
693 /* Tasks can be created statically or dynamically, so note this
694 task was created statically in case the task is later deleted. */
695 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
697 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
699 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
700 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
710 #endif /* SUPPORT_STATIC_ALLOCATION */
711 /*-----------------------------------------------------------*/
713 #if( portUSING_MPU_WRAPPERS == 1 )
715 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
718 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
720 configASSERT( pxTaskDefinition->puxStackBuffer );
722 if( pxTaskDefinition->puxStackBuffer != NULL )
724 /* Allocate space for the TCB. Where the memory comes from depends
725 on the implementation of the port malloc function and whether or
726 not static allocation is being used. */
727 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
729 if( pxNewTCB != NULL )
731 /* Store the stack location in the TCB. */
732 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
734 /* Tasks can be created statically or dynamically, so note
735 this task had a statically allocated stack in case it is
736 later deleted. The TCB was allocated dynamically. */
737 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
739 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
740 pxTaskDefinition->pcName,
741 pxTaskDefinition->usStackDepth,
742 pxTaskDefinition->pvParameters,
743 pxTaskDefinition->uxPriority,
744 pxCreatedTask, pxNewTCB,
745 pxTaskDefinition->xRegions,
748 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY );
756 #endif /* portUSING_MPU_WRAPPERS */
757 /*-----------------------------------------------------------*/
759 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
761 BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode,
762 const char * const pcName,
763 const uint32_t usStackDepth,
764 void * const pvParameters,
765 UBaseType_t uxPriority,
766 TaskHandle_t * const pxCreatedTask,
767 const BaseType_t xCoreID )
772 /* If the stack grows down then allocate the stack then the TCB so the stack
773 does not grow into the TCB. Likewise if the stack grows up then allocate
774 the TCB then the stack. */
775 #if( portSTACK_GROWTH > 0 )
777 /* Allocate space for the TCB. Where the memory comes from depends on
778 the implementation of the port malloc function and whether or not static
779 allocation is being used. */
780 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
782 if( pxNewTCB != NULL )
784 /* Allocate space for the stack used by the task being created.
785 The base of the stack memory stored in the TCB so the task can
786 be deleted later if required. */
787 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
789 if( pxNewTCB->pxStack == NULL )
791 /* Could not allocate the stack. Delete the allocated TCB. */
792 vPortFree( pxNewTCB );
797 #else /* portSTACK_GROWTH */
799 StackType_t *pxStack;
801 /* Allocate space for the stack used by the task being created. */
802 pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
804 if( pxStack != NULL )
806 /* Allocate space for the TCB. */
807 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) ); /*lint !e961 MISRA exception as the casts are only redundant for some paths. */
809 if( pxNewTCB != NULL )
811 /* Store the stack location in the TCB. */
812 pxNewTCB->pxStack = pxStack;
816 /* The stack cannot be used as the TCB was not created. Free
818 vPortFree( pxStack );
826 #endif /* portSTACK_GROWTH */
828 if( pxNewTCB != NULL )
830 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
832 /* Tasks can be created statically or dynamically, so note this
833 task was created dynamically in case it is later deleted. */
834 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
836 #endif /* configSUPPORT_STATIC_ALLOCATION */
838 prvInitialiseNewTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL, xCoreID );
839 prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID );
844 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
850 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
851 /*-----------------------------------------------------------*/
853 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
854 const char * const pcName,
855 const uint32_t ulStackDepth,
856 void * const pvParameters,
857 UBaseType_t uxPriority,
858 TaskHandle_t * const pxCreatedTask,
860 const MemoryRegion_t * const xRegions, const BaseType_t xCoreID ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
862 StackType_t *pxTopOfStack;
865 #if( portUSING_MPU_WRAPPERS == 1 )
866 /* Should the task be created in privileged mode? */
867 BaseType_t xRunPrivileged;
868 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
870 xRunPrivileged = pdTRUE;
874 xRunPrivileged = pdFALSE;
876 uxPriority &= ~portPRIVILEGE_BIT;
877 #endif /* portUSING_MPU_WRAPPERS == 1 */
879 /* Avoid dependency on memset() if it is not required. */
880 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
882 /* Fill the stack with a known value to assist debugging. */
883 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
885 #endif /* ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) ) */
887 /* Calculate the top of stack address. This depends on whether the stack
888 grows from high memory to low (as per the 80x86) or vice versa.
889 portSTACK_GROWTH is used to make the result positive or negative as required
891 #if( portSTACK_GROWTH < 0 )
893 pxTopOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
894 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */
896 /* Check the alignment of the calculated top of stack is correct. */
897 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
898 #if ( configENABLE_TASK_SNAPSHOT == 1 )
900 /* need stack end for core dumps */
901 pxNewTCB->pxEndOfStack = pxTopOfStack;
905 #else /* portSTACK_GROWTH */
907 pxTopOfStack = pxNewTCB->pxStack;
909 /* Check the alignment of the stack buffer is correct. */
910 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
912 /* The other extreme of the stack space is required if stack checking is
914 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
916 #endif /* portSTACK_GROWTH */
918 /* Store the task name in the TCB. */
919 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
921 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
923 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
924 configMAX_TASK_NAME_LEN characters just in case the memory after the
925 string is not accessible (extremely unlikely). */
926 if( pcName[ x ] == 0x00 )
932 mtCOVERAGE_TEST_MARKER();
936 /* Ensure the name string is terminated in the case that the string length
937 was greater or equal to configMAX_TASK_NAME_LEN. */
938 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
940 /* This is used as an array index so must ensure it's not too large. First
941 remove the privilege bit if one is present. */
942 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
944 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
948 mtCOVERAGE_TEST_MARKER();
951 pxNewTCB->uxPriority = uxPriority;
952 pxNewTCB->xCoreID = xCoreID;
953 #if ( configUSE_MUTEXES == 1 )
955 pxNewTCB->uxBasePriority = uxPriority;
956 pxNewTCB->uxMutexesHeld = 0;
958 #endif /* configUSE_MUTEXES */
960 vListInitialiseItem( &( pxNewTCB->xGenericListItem ) );
961 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
963 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
964 back to the containing TCB from a generic item in a list. */
965 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xGenericListItem ), pxNewTCB );
967 /* Event lists are always in priority order. */
968 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
969 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
971 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
973 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
975 #endif /* portCRITICAL_NESTING_IN_TCB */
977 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
979 pxNewTCB->pxTaskTag = NULL;
981 #endif /* configUSE_APPLICATION_TASK_TAG */
983 #if ( configGENERATE_RUN_TIME_STATS == 1 )
985 pxNewTCB->ulRunTimeCounter = 0UL;
987 #endif /* configGENERATE_RUN_TIME_STATS */
989 #if ( portUSING_MPU_WRAPPERS == 1 )
991 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
995 /* Avoid compiler warning about unreferenced parameter. */
1000 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1002 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1004 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1005 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1)
1006 pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
1012 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1014 pxNewTCB->ulNotifiedValue = 0;
1015 pxNewTCB->eNotifyState = eNotWaitingNotification;
1019 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1021 /* Initialise this task's Newlib reent structure. */
1022 esp_reent_init(&pxNewTCB->xNewLib_reent);
1026 #if( INCLUDE_xTaskAbortDelay == 1 )
1028 pxNewTCB->ucDelayAborted = pdFALSE;
1032 /* Initialize the TCB stack to look as if the task was already running,
1033 but had been interrupted by the scheduler. The return address is set
1034 to the start of the task function. Once the stack has been initialised
1035 the top of stack variable is updated. */
1036 #if( portUSING_MPU_WRAPPERS == 1 )
1038 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1040 #else /* portUSING_MPU_WRAPPERS */
1042 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1044 #endif /* portUSING_MPU_WRAPPERS */
1046 if( ( void * ) pxCreatedTask != NULL )
1048 /* Pass the handle out in an anonymous way. The handle can be used to
1049 change the created task's priority, delete the created task, etc.*/
1050 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1054 mtCOVERAGE_TEST_MARKER();
1057 /*-----------------------------------------------------------*/
1059 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID )
1061 TCB_t *curTCB, *tcb0, *tcb1;
1063 /* Assure that xCoreID is valid or we'll have an out-of-bounds on pxCurrentTCB
1064 You will assert here if e.g. you only have one CPU enabled in menuconfig and
1065 are trying to start a task on core 1. */
1066 configASSERT( xCoreID == tskNO_AFFINITY || xCoreID < portNUM_PROCESSORS);
1068 /* Ensure interrupts don't access the task lists while the lists are being
1070 taskENTER_CRITICAL(&xTaskQueueMutex);
1072 uxCurrentNumberOfTasks++;
1074 // Determine which core this task starts on
1075 if ( xCoreID == tskNO_AFFINITY )
1077 if ( portNUM_PROCESSORS == 1 )
1083 // if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
1084 // put it on the core where it will preempt the lowest priority running task. If neither of these are true,
1085 // queue it on the currently running core.
1086 tcb0 = pxCurrentTCB[0];
1087 tcb1 = pxCurrentTCB[1];
1092 else if ( tcb1 == NULL )
1096 else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
1100 else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
1106 xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
1111 // If nothing is running on this core, put the new task there now
1112 if( pxCurrentTCB[ xCoreID ] == NULL )
1114 /* There are no other tasks, or all the other tasks are in
1115 the suspended state - make this the current task. */
1116 pxCurrentTCB[ xCoreID ] = pxNewTCB;
1118 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1120 #if portFIRST_TASK_HOOK
1121 if ( xPortGetCoreID() == 0 ) {
1122 vPortFirstTaskHook(pxTaskCode);
1124 #endif /* configFIRST_TASK_HOOK */
1125 /* This is the first task to be created so do the preliminary
1126 initialisation required. We will not recover if this call
1127 fails, but we will report the failure. */
1128 prvInitialiseTaskLists();
1132 mtCOVERAGE_TEST_MARKER();
1137 /* If the scheduler is not already running, make this task the
1138 current task if it is the highest priority task to be created
1140 if( xSchedulerRunning == pdFALSE )
1142 /* Scheduler isn't running yet. We need to determine on which CPU to run this task.
1143 Schedule now if either nothing is scheduled yet or we can replace a task of lower prio. */
1144 if ( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
1146 pxCurrentTCB[xCoreID] = pxNewTCB;
1151 mtCOVERAGE_TEST_MARKER();
1157 #if ( configUSE_TRACE_FACILITY == 1 )
1159 /* Add a counter into the TCB for tracing only. */
1160 pxNewTCB->uxTCBNumber = uxTaskNumber;
1162 #endif /* configUSE_TRACE_FACILITY */
1163 traceTASK_CREATE( pxNewTCB );
1165 prvAddTaskToReadyList( pxNewTCB );
1167 portSETUP_TCB( pxNewTCB );
1170 taskEXIT_CRITICAL(&xTaskQueueMutex);
1172 if( xSchedulerRunning != pdFALSE )
1174 taskENTER_CRITICAL(&xTaskQueueMutex);
1176 curTCB = pxCurrentTCB[ xCoreID ];
1177 /* Scheduler is running. If the created task is of a higher priority than an executing task
1178 then it should run now.
1180 if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
1182 if( xCoreID == xPortGetCoreID() )
1184 taskYIELD_IF_USING_PREEMPTION();
1187 taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
1192 mtCOVERAGE_TEST_MARKER();
1194 taskEXIT_CRITICAL(&xTaskQueueMutex);
1198 mtCOVERAGE_TEST_MARKER();
1201 /*-----------------------------------------------------------*/
1203 #if ( INCLUDE_vTaskDelete == 1 )
1204 void vTaskDelete( TaskHandle_t xTaskToDelete )
1207 taskENTER_CRITICAL(&xTaskQueueMutex);
1209 /* If null is passed in here then it is the calling task that is
1211 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1213 /* Remove task from the ready list and place in the termination list.
1214 This will stop the task from be scheduled. The idle task will check
1215 the termination list and free up any memory allocated by the
1216 scheduler for the TCB and stack. */
1217 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1219 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1223 mtCOVERAGE_TEST_MARKER();
1226 /* Is the task waiting on an event also? */
1227 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1229 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1233 mtCOVERAGE_TEST_MARKER();
1236 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xGenericListItem ) );
1238 /* Increment the ucTasksDeleted variable so the idle task knows
1239 there is a task that has been deleted and that it should therefore
1240 check the xTasksWaitingTermination list. */
1243 /* Increment the uxTaskNumberVariable also so kernel aware debuggers
1244 can detect that the task lists need re-generating. */
1247 traceTASK_DELETE( pxTCB );
1249 taskEXIT_CRITICAL(&xTaskQueueMutex);
1251 /* Force a reschedule if it is the currently running task that has just
1253 if( xSchedulerRunning != pdFALSE )
1255 //No mux; no harm done if this misfires. The deleted task won't get scheduled anyway.
1256 if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
1258 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1260 /* The pre-delete hook is primarily for the Windows simulator,
1261 in which Windows specific clean up operations are performed,
1262 after which it is not possible to yield away from this task -
1263 hence xYieldPending is used to latch that a context switch is
1265 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[xPortGetCoreID()] );
1266 portYIELD_WITHIN_API();
1268 else if ( portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !xPortGetCoreID() ] )
1270 /* if task is running on the other CPU, force a yield on that CPU to take it off */
1271 vPortYieldOtherCore( !xPortGetCoreID() );
1275 /* Reset the next expected unblock time in case it referred to
1276 the task that has just been deleted. */
1277 taskENTER_CRITICAL(&xTaskQueueMutex);
1279 prvResetNextTaskUnblockTime();
1281 taskEXIT_CRITICAL(&xTaskQueueMutex);
1286 #endif /* INCLUDE_vTaskDelete */
1287 /*-----------------------------------------------------------*/
1289 #if ( INCLUDE_vTaskDelayUntil == 1 )
1291 /* ToDo: Make this multicore-compatible. */
1292 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1294 TickType_t xTimeToWake;
1295 BaseType_t xAlreadyYielded=pdFALSE, xShouldDelay = pdFALSE;
1297 configASSERT( pxPreviousWakeTime );
1298 configASSERT( ( xTimeIncrement > 0U ) );
1299 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1301 taskENTER_CRITICAL(&xTaskQueueMutex);
1302 // vTaskSuspendAll();
1304 /* Minor optimisation. The tick count cannot change in this
1306 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1307 const TickType_t xConstTickCount = xTickCount;
1308 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1310 /* Generate the tick time at which the task wants to wake. */
1311 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1313 if( xConstTickCount < *pxPreviousWakeTime )
1315 /* The tick count has overflowed since this function was
1316 lasted called. In this case the only time we should ever
1317 actually delay is if the wake time has also overflowed,
1318 and the wake time is greater than the tick time. When this
1319 is the case it is as if neither time had overflowed. */
1320 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1322 xShouldDelay = pdTRUE;
1326 mtCOVERAGE_TEST_MARKER();
1331 /* The tick time has not overflowed. In this case we will
1332 delay if either the wake time has overflowed, and/or the
1333 tick time is less than the wake time. */
1334 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1336 xShouldDelay = pdTRUE;
1340 mtCOVERAGE_TEST_MARKER();
1344 /* Update the wake time ready for the next call. */
1345 *pxPreviousWakeTime = xTimeToWake;
1347 if( xShouldDelay != pdFALSE )
1349 traceTASK_DELAY_UNTIL();
1351 /* Remove the task from the ready list before adding it to the
1352 blocked list as the same list item is used for both lists. */
1353 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1355 /* The current task must be in a ready list, so there is
1356 no need to check, and the port reset macro can be called
1358 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1362 mtCOVERAGE_TEST_MARKER();
1365 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1369 mtCOVERAGE_TEST_MARKER();
1372 // xAlreadyYielded = xTaskResumeAll();
1373 taskEXIT_CRITICAL(&xTaskQueueMutex);
1375 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1376 have put ourselves to sleep. */
1377 if( xAlreadyYielded == pdFALSE )
1379 portYIELD_WITHIN_API();
1383 mtCOVERAGE_TEST_MARKER();
1387 #endif /* INCLUDE_vTaskDelayUntil */
1388 /*-----------------------------------------------------------*/
1390 #if ( INCLUDE_vTaskDelay == 1 )
1391 void vTaskDelay( const TickType_t xTicksToDelay )
1393 TickType_t xTimeToWake;
1394 BaseType_t xAlreadyYielded = pdFALSE;
1396 /* A delay time of zero just forces a reschedule. */
1397 if( xTicksToDelay > ( TickType_t ) 0U )
1399 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1400 taskENTER_CRITICAL(&xTaskQueueMutex);
1401 // vTaskSuspendAll();
1405 /* A task that is removed from the event list while the
1406 scheduler is suspended will not get placed in the ready
1407 list or removed from the blocked list until the scheduler
1410 This task cannot be in an event list as it is the currently
1413 /* Calculate the time to wake - this may overflow but this is
1415 // portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
1416 xTimeToWake = xTickCount + xTicksToDelay;
1417 // portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
1419 /* We must remove ourselves from the ready list before adding
1420 ourselves to the blocked list as the same list item is used for
1422 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1424 /* The current task must be in a ready list, so there is
1425 no need to check, and the port reset macro can be called
1427 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
1431 mtCOVERAGE_TEST_MARKER();
1433 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
1435 // xAlreadyYielded = xTaskResumeAll();
1436 taskEXIT_CRITICAL(&xTaskQueueMutex);
1440 mtCOVERAGE_TEST_MARKER();
1443 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1444 have put ourselves to sleep. */
1445 if( xAlreadyYielded == pdFALSE )
1447 portYIELD_WITHIN_API();
1451 mtCOVERAGE_TEST_MARKER();
1455 #endif /* INCLUDE_vTaskDelay */
1456 /*-----------------------------------------------------------*/
1458 #if ( INCLUDE_eTaskGetState == 1 )
1459 eTaskState eTaskGetState( TaskHandle_t xTask )
1462 List_t *pxStateList;
1463 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1464 TCB_t * curTCBcurCore = xTaskGetCurrentTaskHandle();
1465 TCB_t * curTCBothrCore = xTaskGetCurrentTaskHandleForCPU(!xPortGetCoreID()); //Returns NULL if Unicore
1467 configASSERT( pxTCB );
1469 if( pxTCB == curTCBcurCore || pxTCB == curTCBothrCore )
1471 /* The task calling this function is querying its own state. */
1476 taskENTER_CRITICAL(&xTaskQueueMutex);
1478 pxStateList = ( List_t * ) listLIST_ITEM_CONTAINER( &( pxTCB->xGenericListItem ) );
1480 taskEXIT_CRITICAL(&xTaskQueueMutex);
1482 if( ( pxStateList == pxDelayedTaskList ) || ( pxStateList == pxOverflowDelayedTaskList ) )
1484 /* The task being queried is referenced from one of the Blocked
1489 #if ( INCLUDE_vTaskSuspend == 1 )
1490 else if( pxStateList == &xSuspendedTaskList )
1492 /* The task being queried is referenced from the suspended
1493 list. Is it genuinely suspended or is it block
1495 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1497 eReturn = eSuspended;
1506 #if ( INCLUDE_vTaskDelete == 1 )
1507 else if( pxStateList == &xTasksWaitingTermination )
1509 /* The task being queried is referenced from the deleted
1515 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1517 /* If the task is not in any other state, it must be in the
1518 Ready (including pending ready) state. */
1524 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1526 #endif /* INCLUDE_eTaskGetState */
1527 /*-----------------------------------------------------------*/
1529 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1530 UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask )
1533 UBaseType_t uxReturn;
1535 taskENTER_CRITICAL(&xTaskQueueMutex);
1537 /* If null is passed in here then we are changing the
1538 priority of the calling function. */
1539 pxTCB = prvGetTCBFromHandle( xTask );
1540 uxReturn = pxTCB->uxPriority;
1542 taskEXIT_CRITICAL(&xTaskQueueMutex);
1547 #endif /* INCLUDE_uxTaskPriorityGet */
1548 /*-----------------------------------------------------------*/
1550 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1551 UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask )
1554 UBaseType_t uxReturn;
1556 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1558 /* If null is passed in here then it is the priority of the calling
1559 task that is being queried. */
1560 pxTCB = prvGetTCBFromHandle( xTask );
1561 uxReturn = pxTCB->uxPriority;
1563 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
1568 #endif /* INCLUDE_uxTaskPriorityGet */
1569 /*-----------------------------------------------------------*/
1571 #if ( INCLUDE_vTaskPrioritySet == 1 )
1573 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1576 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1577 BaseType_t xYieldRequired = pdFALSE;
1579 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1581 /* Ensure the new priority is valid. */
1582 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1584 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1588 mtCOVERAGE_TEST_MARKER();
1591 taskENTER_CRITICAL(&xTaskQueueMutex);
1593 /* If null is passed in here then it is the priority of the calling
1594 task that is being changed. */
1595 pxTCB = prvGetTCBFromHandle( xTask );
1597 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1599 #if ( configUSE_MUTEXES == 1 )
1601 uxCurrentBasePriority = pxTCB->uxBasePriority;
1605 uxCurrentBasePriority = pxTCB->uxPriority;
1609 if( uxCurrentBasePriority != uxNewPriority )
1611 /* The priority change may have readied a task of higher
1612 priority than the calling task. */
1613 if( uxNewPriority > uxCurrentBasePriority )
1615 if( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] )
1617 /* The priority of a task other than the currently
1618 running task is being raised. Is the priority being
1619 raised above that of the running task? */
1620 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1622 xYieldRequired = pdTRUE;
1624 else if ( pxTCB->xCoreID != xPortGetCoreID() )
1626 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1630 mtCOVERAGE_TEST_MARKER();
1635 /* The priority of the running task is being raised,
1636 but the running task must already be the highest
1637 priority task able to run so no yield is required. */
1640 else if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
1642 /* Setting the priority of the running task down means
1643 there may now be another task of higher priority that
1644 is ready to execute. */
1645 xYieldRequired = pdTRUE;
1649 /* Setting the priority of any other task down does not
1650 require a yield as the running task must be above the
1651 new priority of the task being modified. */
1654 /* Remember the ready list the task might be referenced from
1655 before its uxPriority member is changed so the
1656 taskRESET_READY_PRIORITY() macro can function correctly. */
1657 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1659 #if ( configUSE_MUTEXES == 1 )
1661 /* Only change the priority being used if the task is not
1662 currently using an inherited priority. */
1663 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1665 pxTCB->uxPriority = uxNewPriority;
1669 mtCOVERAGE_TEST_MARKER();
1672 /* The base priority gets set whatever. */
1673 pxTCB->uxBasePriority = uxNewPriority;
1677 pxTCB->uxPriority = uxNewPriority;
1681 /* Only reset the event list item value if the value is not
1682 being used for anything else. */
1683 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1685 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1689 mtCOVERAGE_TEST_MARKER();
1692 /* If the task is in the blocked or suspended list we need do
1693 nothing more than change it's priority variable. However, if
1694 the task is in a ready list it needs to be removed and placed
1695 in the list appropriate to its new priority. */
1696 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
1698 /* The task is currently in its ready list - remove before adding
1699 it to it's new ready list. As we are in a critical section we
1700 can do this even if the scheduler is suspended. */
1701 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1703 /* It is known that the task is in its ready list so
1704 there is no need to check again and the port level
1705 reset macro can be called directly. */
1706 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1710 mtCOVERAGE_TEST_MARKER();
1712 prvReaddTaskToReadyList( pxTCB );
1716 mtCOVERAGE_TEST_MARKER();
1719 if( xYieldRequired == pdTRUE )
1721 taskYIELD_IF_USING_PREEMPTION();
1725 mtCOVERAGE_TEST_MARKER();
1728 /* Remove compiler warning about unused variables when the port
1729 optimised task selection is not being used. */
1730 ( void ) uxPriorityUsedOnEntry;
1733 taskEXIT_CRITICAL(&xTaskQueueMutex);
1736 #endif /* INCLUDE_vTaskPrioritySet */
1737 /*-----------------------------------------------------------*/
1739 #if ( INCLUDE_vTaskSuspend == 1 )
1740 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1745 taskENTER_CRITICAL(&xTaskQueueMutex);
1747 /* If null is passed in here then it is the running task that is
1749 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1751 traceTASK_SUSPEND( pxTCB );
1753 /* Remove task from the ready/delayed list and place in the
1755 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
1757 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1761 mtCOVERAGE_TEST_MARKER();
1764 /* Is the task waiting on an event also? */
1765 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1767 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1771 mtCOVERAGE_TEST_MARKER();
1773 traceMOVED_TASK_TO_SUSPENDED_LIST(pxTCB);
1774 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) );
1775 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
1777 taskEXIT_CRITICAL(&xTaskQueueMutex);
1779 if( pxTCB == curTCB )
1781 if( xSchedulerRunning != pdFALSE )
1783 /* The current task has just been suspended. */
1784 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
1785 portYIELD_WITHIN_API();
1789 /* The scheduler is not running, but the task that was pointed
1790 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1791 must be adjusted to point to a different task. */
1792 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
1794 /* No other tasks are ready, so set pxCurrentTCB back to
1795 NULL so when the next task is created pxCurrentTCB will
1796 be set to point to it no matter what its relative priority
1798 taskENTER_CRITICAL(&xTaskQueueMutex);
1799 pxCurrentTCB[ xPortGetCoreID() ] = NULL;
1800 taskEXIT_CRITICAL(&xTaskQueueMutex);
1804 vTaskSwitchContext();
1810 if( xSchedulerRunning != pdFALSE )
1812 /* A task other than the currently running task was suspended,
1813 reset the next expected unblock time in case it referred to the
1814 task that is now in the Suspended state. */
1815 taskENTER_CRITICAL(&xTaskQueueMutex);
1817 prvResetNextTaskUnblockTime();
1819 taskEXIT_CRITICAL(&xTaskQueueMutex);
1823 mtCOVERAGE_TEST_MARKER();
1828 #endif /* INCLUDE_vTaskSuspend */
1829 /*-----------------------------------------------------------*/
1831 #if ( INCLUDE_vTaskSuspend == 1 )
1832 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1834 BaseType_t xReturn = pdFALSE;
1835 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
1837 /* Accesses xPendingReadyList so must be called from a critical
1838 section (caller is required to hold xTaskQueueMutex). */
1840 /* It does not make sense to check if the calling task is suspended. */
1841 configASSERT( xTask );
1843 /* Is the task being resumed actually in the suspended list? */
1844 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE )
1846 /* Has the task already been resumed from within an ISR? */
1847 if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE )
1849 /* Is it in the suspended list because it is in the Suspended
1850 state, or because is is blocked with no timeout? */
1851 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE )
1857 mtCOVERAGE_TEST_MARKER();
1862 mtCOVERAGE_TEST_MARKER();
1867 mtCOVERAGE_TEST_MARKER();
1871 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1873 #endif /* INCLUDE_vTaskSuspend */
1874 /*-----------------------------------------------------------*/
1876 #if ( INCLUDE_vTaskSuspend == 1 )
1878 void vTaskResume( TaskHandle_t xTaskToResume )
1880 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1882 /* It does not make sense to resume the calling task. */
1883 configASSERT( xTaskToResume );
1885 taskENTER_CRITICAL(&xTaskQueueMutex);
1886 /* The parameter cannot be NULL as it is impossible to resume the
1887 currently executing task. */
1888 if( ( pxTCB != NULL ) && ( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] ) )
1891 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1893 traceTASK_RESUME( pxTCB );
1895 /* As we are in a critical section we can access the ready
1896 lists even if the scheduler is suspended. */
1897 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
1898 prvAddTaskToReadyList( pxTCB );
1900 /* We may have just resumed a higher priority task. */
1901 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1903 /* This yield may not cause the task just resumed to run,
1904 but will leave the lists in the correct state for the
1906 taskYIELD_IF_USING_PREEMPTION();
1908 else if( pxTCB->xCoreID != xPortGetCoreID() )
1910 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
1914 mtCOVERAGE_TEST_MARKER();
1919 mtCOVERAGE_TEST_MARKER();
1925 mtCOVERAGE_TEST_MARKER();
1927 taskEXIT_CRITICAL(&xTaskQueueMutex);
1930 #endif /* INCLUDE_vTaskSuspend */
1932 /*-----------------------------------------------------------*/
1934 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1936 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1938 BaseType_t xYieldRequired = pdFALSE;
1939 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
1941 configASSERT( xTaskToResume );
1943 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
1946 if( prvTaskIsTaskSuspended( pxTCB ) == pdTRUE )
1948 traceTASK_RESUME_FROM_ISR( pxTCB );
1950 /* Check the ready lists can be accessed. */
1951 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
1953 /* Ready lists can be accessed so move the task from the
1954 suspended list to the ready list directly. */
1955 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
1956 prvAddTaskToReadyList( pxTCB );
1958 if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1960 xYieldRequired = pdTRUE;
1962 else if ( pxTCB->xCoreID != xPortGetCoreID() )
1964 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
1968 mtCOVERAGE_TEST_MARKER();
1973 /* The delayed or ready lists cannot be accessed so the task
1974 is held in the pending ready list until the scheduler is
1976 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
1981 mtCOVERAGE_TEST_MARKER();
1984 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
1986 return xYieldRequired;
1989 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
1990 /*-----------------------------------------------------------*/
1992 void vTaskStartScheduler( void )
1997 /* Add the per-core idle tasks at the lowest priority. */
1998 for ( i=0; i<portNUM_PROCESSORS; i++) {
1999 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2001 /* Create the idle task, storing its handle in xIdleTaskHandle so it can
2002 be returned by the xTaskGetIdleTaskHandle() function. */
2003 xReturn = xTaskCreatePinnedToCore( prvIdleTask, "IDLE", tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), &xIdleTaskHandle[i], i ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2007 /* Create the idle task without storing its handle. */
2008 xReturn = xTaskCreatePinnedToCore( prvIdleTask, "IDLE", tskIDLE_STACK_SIZE, ( void * ) NULL, ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), NULL, i); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2010 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2013 #if ( configUSE_TIMERS == 1 )
2015 if( xReturn == pdPASS )
2017 xReturn = xTimerCreateTimerTask();
2021 mtCOVERAGE_TEST_MARKER();
2024 #endif /* configUSE_TIMERS */
2026 if( xReturn == pdPASS )
2028 /* Interrupts are turned off here, to ensure a tick does not occur
2029 before or during the call to xPortStartScheduler(). The stacks of
2030 the created tasks contain a status word with interrupts switched on
2031 so interrupts will automatically get re-enabled when the first task
2033 portDISABLE_INTERRUPTS();
2036 xTickCount = ( TickType_t ) 0U;
2038 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2039 macro must be defined to configure the timer/counter used to generate
2040 the run time counter time base. */
2041 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2042 xSchedulerRunning = pdTRUE;
2044 /* Setting up the timer tick is hardware specific and thus in the
2045 portable interface. */
2046 if( xPortStartScheduler() != pdFALSE )
2048 /* Should not reach here as if the scheduler is running the
2049 function will not return. */
2053 /* Should only reach here if a task calls xTaskEndScheduler(). */
2058 /* This line will only be reached if the kernel could not be started,
2059 because there was not enough FreeRTOS heap to create the idle task
2060 or the timer task. */
2061 configASSERT( xReturn );
2064 /*-----------------------------------------------------------*/
2066 void vTaskEndScheduler( void )
2068 /* Stop the scheduler interrupts and call the portable scheduler end
2069 routine so the original ISRs can be restored if necessary. The port
2070 layer must ensure interrupts enable bit is left in the correct state. */
2071 portDISABLE_INTERRUPTS();
2072 xSchedulerRunning = pdFALSE;
2073 vPortEndScheduler();
2075 /*----------------------------------------------------------*/
2078 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2079 //Return global reent struct if FreeRTOS isn't running,
2080 struct _reent* __getreent() {
2081 //No lock needed because if this changes, we won't be running anymore.
2082 TCB_t *currTask=xTaskGetCurrentTaskHandle();
2083 if (currTask==NULL) {
2084 //No task running. Return global struct.
2085 return _GLOBAL_REENT;
2087 //We have a task; return its reentrant struct.
2088 return &currTask->xNewLib_reent;
2094 void vTaskSuspendAll( void )
2096 /* A critical section is not required as the variable is of type
2097 BaseType_t. Please read Richard Barry's reply in the following link to a
2098 post in the FreeRTOS support forum before reporting this as a bug! -
2099 http://goo.gl/wu4acr */
2102 state = portENTER_CRITICAL_NESTED();
2103 ++uxSchedulerSuspended[ xPortGetCoreID() ];
2104 portEXIT_CRITICAL_NESTED(state);
2106 /*----------------------------------------------------------*/
2108 #if ( configUSE_TICKLESS_IDLE != 0 )
2110 static TickType_t prvGetExpectedIdleTime( void )
2115 taskENTER_CRITICAL(&xTaskQueueMutex);
2116 if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
2120 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
2122 /* There are other idle priority tasks in the ready state. If
2123 time slicing is used then the very next tick interrupt must be
2129 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2130 xReturn = xNextTaskUnblockTime - xTickCount;
2131 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2133 taskEXIT_CRITICAL(&xTaskQueueMutex);
2138 #endif /* configUSE_TICKLESS_IDLE */
2139 /*----------------------------------------------------------*/
2141 BaseType_t xTaskResumeAll( void )
2144 BaseType_t xAlreadyYielded = pdFALSE;
2146 /* If uxSchedulerSuspended[ xPortGetCoreID() ] is zero then this function does not match a
2147 previous call to vTaskSuspendAll(). */
2148 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] );
2149 /* It is possible that an ISR caused a task to be removed from an event
2150 list while the scheduler was suspended. If this was the case then the
2151 removed task will have been added to the xPendingReadyList. Once the
2152 scheduler has been resumed it is safe to move all the pending ready
2153 tasks from this list into their appropriate ready list. */
2155 taskENTER_CRITICAL(&xTaskQueueMutex);
2157 --uxSchedulerSuspended[ xPortGetCoreID() ];
2159 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2161 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2163 /* Move any readied tasks from the pending list into the
2164 appropriate ready list. */
2165 while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE )
2167 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) );
2168 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2169 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2170 prvAddTaskToReadyList( pxTCB );
2172 /* If the moved task has a priority higher than the current
2173 task then a yield must be performed. */
2174 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2176 /* We can schedule the awoken task on this CPU. */
2177 xYieldPending[xPortGetCoreID()] = pdTRUE;
2181 mtCOVERAGE_TEST_MARKER();
2185 /* If any ticks occurred while the scheduler was suspended then
2186 they should be processed now. This ensures the tick count does
2187 not slip, and that any delayed tasks are resumed at the correct
2189 if( uxPendedTicks > ( UBaseType_t ) 0U )
2191 while( uxPendedTicks > ( UBaseType_t ) 0U )
2193 if( xTaskIncrementTick() != pdFALSE )
2195 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2199 mtCOVERAGE_TEST_MARKER();
2206 mtCOVERAGE_TEST_MARKER();
2209 if( xYieldPending[ xPortGetCoreID() ] == pdTRUE )
2211 #if( configUSE_PREEMPTION != 0 )
2213 xAlreadyYielded = pdTRUE;
2216 taskYIELD_IF_USING_PREEMPTION();
2220 mtCOVERAGE_TEST_MARKER();
2226 mtCOVERAGE_TEST_MARKER();
2229 taskEXIT_CRITICAL(&xTaskQueueMutex);
2231 return xAlreadyYielded;
2233 /*-----------------------------------------------------------*/
2235 TickType_t xTaskGetTickCount( void )
2239 /* Critical section required if running on a 16 bit processor. */
2240 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2242 xTicks = xTickCount;
2244 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2248 /*-----------------------------------------------------------*/
2250 TickType_t xTaskGetTickCountFromISR( void )
2254 taskENTER_CRITICAL_ISR(&xTickCountMutex);
2256 xReturn = xTickCount;
2257 // vPortCPUReleaseMutex( &xTickCountMutex );
2259 taskEXIT_CRITICAL_ISR(&xTickCountMutex);
2263 /*-----------------------------------------------------------*/
2265 UBaseType_t uxTaskGetNumberOfTasks( void )
2267 /* A critical section is not required because the variables are of type
2269 return uxCurrentNumberOfTasks;
2271 /*-----------------------------------------------------------*/
2273 #if ( INCLUDE_pcTaskGetTaskName == 1 )
2274 char *pcTaskGetTaskName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2278 /* If null is passed in here then the name of the calling task is being queried. */
2279 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2280 configASSERT( pxTCB );
2281 return &( pxTCB->pcTaskName[ 0 ] );
2284 #endif /* INCLUDE_pcTaskGetTaskName */
2285 /*-----------------------------------------------------------*/
2287 #if ( configUSE_TRACE_FACILITY == 1 )
2289 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2291 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2293 taskENTER_CRITICAL(&xTaskQueueMutex);
2295 /* Is there a space in the array for each task in the system? */
2296 if( uxArraySize >= uxCurrentNumberOfTasks )
2298 /* Fill in an TaskStatus_t structure with information on each
2299 task in the Ready state. */
2303 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2305 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2307 /* Fill in an TaskStatus_t structure with information on each
2308 task in the Blocked state. */
2309 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2310 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2312 #if( INCLUDE_vTaskDelete == 1 )
2314 /* Fill in an TaskStatus_t structure with information on
2315 each task that has been deleted but not yet cleaned up. */
2316 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2320 #if ( INCLUDE_vTaskSuspend == 1 )
2322 /* Fill in an TaskStatus_t structure with information on
2323 each task in the Suspended state. */
2324 uxTask += prvListTaskWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2328 #if ( configGENERATE_RUN_TIME_STATS == 1)
2330 if( pulTotalRunTime != NULL )
2332 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2333 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2335 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2341 if( pulTotalRunTime != NULL )
2343 *pulTotalRunTime = 0;
2350 mtCOVERAGE_TEST_MARKER();
2353 taskEXIT_CRITICAL(&xTaskQueueMutex);
2357 #endif /* configUSE_TRACE_FACILITY */
2358 /*----------------------------------------------------------*/
2360 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2362 TaskHandle_t xTaskGetIdleTaskHandle( void )
2364 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2365 started, then xIdleTaskHandle will be NULL. */
2366 configASSERT( ( xIdleTaskHandle[ xPortGetCoreID() ] != NULL ) );
2367 return xIdleTaskHandle[ xPortGetCoreID() ];
2370 TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid )
2372 TaskHandle_t xReturn = NULL;
2373 /* If xTaskGetIdleTaskHandleForCPU() is called before the scheduler has been
2374 started, then xIdleTaskHandle will be NULL. */
2375 if (cpuid < portNUM_PROCESSORS) {
2376 configASSERT( ( xIdleTaskHandle[ cpuid ] != NULL ) );
2377 xReturn = xIdleTaskHandle[ cpuid ];
2382 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2383 /*----------------------------------------------------------*/
2385 /* This conditional compilation should use inequality to 0, not equality to 1.
2386 This is to ensure vTaskStepTick() is available when user defined low power mode
2387 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
2389 #if ( configUSE_TICKLESS_IDLE != 0 )
2391 void vTaskStepTick( const TickType_t xTicksToJump )
2393 /* Correct the tick count value after a period during which the tick
2394 was suppressed. Note this does *not* call the tick hook function for
2395 each stepped tick. */
2396 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2397 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2398 xTickCount += xTicksToJump;
2399 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2400 traceINCREASE_TICK_COUNT( xTicksToJump );
2403 #endif /* configUSE_TICKLESS_IDLE */
2404 /*----------------------------------------------------------*/
2406 BaseType_t xTaskIncrementTick( void )
2409 TickType_t xItemValue;
2410 BaseType_t xSwitchRequired = pdFALSE;
2412 /* Called by the portable layer each time a tick interrupt occurs.
2413 Increments the tick then checks to see if the new tick value will cause any
2414 tasks to be unblocked. */
2416 /* Only let core 0 increase the tick count, to keep accurate track of time. */
2417 /* ToDo: This doesn't really play nice with the logic below: it means when core 1 is
2418 running a low-priority task, it will keep running it until there is a context
2419 switch, even when this routine (running on core 0) unblocks a bunch of high-priority
2420 tasks... this is less than optimal -- JD. */
2421 if ( xPortGetCoreID()!=0 ) {
2422 #if ( configUSE_TICK_HOOK == 1 )
2423 vApplicationTickHook();
2424 #endif /* configUSE_TICK_HOOK */
2425 esp_vApplicationTickHook();
2428 We can't really calculate what we need, that's done on core 0... just assume we need a switch.
2429 ToDo: Make this more intelligent? -- JD
2435 traceTASK_INCREMENT_TICK( xTickCount );
2437 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
2439 portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
2440 /* Increment the RTOS tick, switching the delayed and overflowed
2441 delayed lists if it wraps to 0. */
2443 portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
2445 //The other CPU may decide to mess with the task queues, so this needs a mux.
2446 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2448 /* Minor optimisation. The tick count cannot change in this
2450 const TickType_t xConstTickCount = xTickCount;
2452 if( xConstTickCount == ( TickType_t ) 0U )
2454 taskSWITCH_DELAYED_LISTS();
2458 mtCOVERAGE_TEST_MARKER();
2461 /* See if this tick has made a timeout expire. Tasks are stored in
2462 the queue in the order of their wake time - meaning once one task
2463 has been found whose block time has not expired there is no need to
2464 look any further down the list. */
2465 if( xConstTickCount >= xNextTaskUnblockTime )
2469 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2471 /* The delayed list is empty. Set xNextTaskUnblockTime
2472 to the maximum possible value so it is extremely
2474 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2475 next time through. */
2476 xNextTaskUnblockTime = portMAX_DELAY;
2481 /* The delayed list is not empty, get the value of the
2482 item at the head of the delayed list. This is the time
2483 at which the task at the head of the delayed list must
2484 be removed from the Blocked state. */
2485 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
2486 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xGenericListItem ) );
2488 if( xConstTickCount < xItemValue )
2490 /* It is not time to unblock this item yet, but the
2491 item value is the time at which the task at the head
2492 of the blocked list must be removed from the Blocked
2493 state - so record the item value in
2494 xNextTaskUnblockTime. */
2495 xNextTaskUnblockTime = xItemValue;
2500 mtCOVERAGE_TEST_MARKER();
2503 /* It is time to remove the item from the Blocked state. */
2504 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
2506 /* Is the task waiting on an event also? If so remove
2507 it from the event list. */
2508 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2510 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2514 mtCOVERAGE_TEST_MARKER();
2517 /* Place the unblocked task into the appropriate ready
2519 prvAddTaskToReadyList( pxTCB );
2521 /* A task being unblocked cannot cause an immediate
2522 context switch if preemption is turned off. */
2523 #if ( configUSE_PREEMPTION == 1 )
2525 /* Preemption is on, but a context switch should
2526 only be performed if the unblocked task has a
2527 priority that is equal to or higher than the
2528 currently executing task. */
2529 if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2531 xSwitchRequired = pdTRUE;
2535 mtCOVERAGE_TEST_MARKER();
2538 #endif /* configUSE_PREEMPTION */
2544 /* Tasks of equal priority to the currently running task will share
2545 processing time (time slice) if preemption is on, and the application
2546 writer has not explicitly turned time slicing off. */
2547 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2549 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2551 xSwitchRequired = pdTRUE;
2555 mtCOVERAGE_TEST_MARKER();
2558 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2561 /* Guard against the tick hook being called when the pended tick
2562 count is being unwound (when the scheduler is being unlocked). */
2563 if( uxPendedTicks == ( UBaseType_t ) 0U )
2565 #if ( configUSE_TICK_HOOK == 1 )
2566 vApplicationTickHook();
2567 #endif /* configUSE_TICK_HOOK */
2568 esp_vApplicationTickHook();
2572 mtCOVERAGE_TEST_MARKER();
2575 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2581 /* The tick hook gets called at regular intervals, even if the
2582 scheduler is locked. */
2583 #if ( configUSE_TICK_HOOK == 1 )
2585 vApplicationTickHook();
2588 esp_vApplicationTickHook();
2591 #if ( configUSE_PREEMPTION == 1 )
2593 if( xYieldPending [ xPortGetCoreID() ] != pdFALSE )
2595 xSwitchRequired = pdTRUE;
2599 mtCOVERAGE_TEST_MARKER();
2602 #endif /* configUSE_PREEMPTION */
2604 return xSwitchRequired;
2606 /*-----------------------------------------------------------*/
2608 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2610 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2614 /* If xTask is NULL then it is the task hook of the calling task that is
2618 xTCB = ( TCB_t * ) pxCurrentTCB[ xPortGetCoreID() ];
2622 xTCB = ( TCB_t * ) xTask;
2625 /* Save the hook function in the TCB. A critical section is required as
2626 the value can be accessed from an interrupt. */
2627 taskENTER_CRITICAL(&xTaskQueueMutex);
2628 xTCB->pxTaskTag = pxHookFunction;
2629 taskEXIT_CRITICAL(&xTaskQueueMutex);
2632 #endif /* configUSE_APPLICATION_TASK_TAG */
2633 /*-----------------------------------------------------------*/
2635 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2637 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2640 TaskHookFunction_t xReturn;
2642 /* If xTask is NULL then we are setting our own task hook. */
2645 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2649 xTCB = ( TCB_t * ) xTask;
2652 /* Save the hook function in the TCB. A critical section is required as
2653 the value can be accessed from an interrupt. */
2654 taskENTER_CRITICAL(&xTaskQueueMutex);
2656 xReturn = xTCB->pxTaskTag;
2658 taskEXIT_CRITICAL(&xTaskQueueMutex);
2663 #endif /* configUSE_APPLICATION_TASK_TAG */
2664 /*-----------------------------------------------------------*/
2666 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2668 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2673 /* If xTask is NULL then we are calling our own task hook. */
2676 xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle();
2680 xTCB = ( TCB_t * ) xTask;
2683 if( xTCB->pxTaskTag != NULL )
2685 xReturn = xTCB->pxTaskTag( pvParameter );
2695 #endif /* configUSE_APPLICATION_TASK_TAG */
2696 /*-----------------------------------------------------------*/
2698 void vTaskSwitchContext( void )
2700 //Theoretically, this is only called from either the tick interrupt or the crosscore interrupt, so disabling
2701 //interrupts shouldn't be necessary anymore. Still, for safety we'll leave it in for now.
2702 int irqstate=portENTER_CRITICAL_NESTED();
2704 if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
2706 /* The scheduler is currently suspended - do not allow a context
2708 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
2712 xYieldPending[ xPortGetCoreID() ] = pdFALSE;
2713 xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
2714 traceTASK_SWITCHED_OUT();
2716 #if ( configGENERATE_RUN_TIME_STATS == 1 )
2718 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2719 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
2721 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2724 /* Add the amount of time the task has been running to the
2725 accumulated time so far. The time the task started running was
2726 stored in ulTaskSwitchedInTime. Note that there is no overflow
2727 protection here so count values are only valid until the timer
2728 overflows. The guard against negative values is to protect
2729 against suspect run time stat counter implementations - which
2730 are provided by the application, not the kernel. */
2731 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2732 if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
2734 pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
2738 mtCOVERAGE_TEST_MARKER();
2740 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2741 ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
2743 #endif /* configGENERATE_RUN_TIME_STATS */
2745 /* Check for stack overflow, if configured. */
2746 taskFIRST_CHECK_FOR_STACK_OVERFLOW();
2747 taskSECOND_CHECK_FOR_STACK_OVERFLOW();
2749 /* Select a new task to run */
2752 We cannot do taskENTER_CRITICAL_ISR(&xTaskQueueMutex); here because it saves the interrupt context to the task tcb, and we're
2753 swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
2754 need to acquire the mutex.
2756 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2757 vPortCPUAcquireMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2759 vPortCPUAcquireMutex( &xTaskQueueMutex );
2762 unsigned portBASE_TYPE foundNonExecutingWaiter = pdFALSE, ableToSchedule = pdFALSE, resetListHead;
2763 portBASE_TYPE uxDynamicTopReady = uxTopReadyPriority;
2764 unsigned portBASE_TYPE holdTop=pdFALSE;
2767 * ToDo: This scheduler doesn't correctly implement the round-robin scheduling as done in the single-core
2768 * FreeRTOS stack when multiple tasks have the same priority and are all ready; it just keeps grabbing the
2769 * first one. ToDo: fix this.
2770 * (Is this still true? if any, there's the issue with one core skipping over the processes for the other
2771 * core, potentially not giving the skipped-over processes any time.)
2774 while ( ableToSchedule == pdFALSE && uxDynamicTopReady >= 0 )
2776 resetListHead = pdFALSE;
2777 // Nothing to do for empty lists
2778 if (!listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxDynamicTopReady ] ) )) {
2780 ableToSchedule = pdFALSE;
2783 /* Remember the current list item so that we
2784 can detect if all items have been inspected.
2785 Once this happens, we move on to a lower
2786 priority list (assuming nothing is suitable
2787 for scheduling). Note: This can return NULL if
2788 the list index is at the listItem */
2789 pxRefTCB = pxReadyTasksLists[ uxDynamicTopReady ].pxIndex->pvOwner;
2791 if ((void*)pxReadyTasksLists[ uxDynamicTopReady ].pxIndex==(void*)&pxReadyTasksLists[ uxDynamicTopReady ].xListEnd) {
2792 //pxIndex points to the list end marker. Skip that and just get the next item.
2793 listGET_OWNER_OF_NEXT_ENTRY( pxRefTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2797 listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2798 /* Find out if the next task in the list is
2799 already being executed by another core */
2800 foundNonExecutingWaiter = pdTRUE;
2801 portBASE_TYPE i = 0;
2802 for ( i=0; i<portNUM_PROCESSORS; i++ ) {
2803 if (i == xPortGetCoreID()) {
2805 } else if (pxCurrentTCB[i] == pxTCB) {
2806 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2807 foundNonExecutingWaiter = pdFALSE;
2812 if (foundNonExecutingWaiter == pdTRUE) {
2813 /* If the task is not being executed
2814 by another core and its affinity is
2815 compatible with the current one,
2816 prepare it to be swapped in */
2817 if (pxTCB->xCoreID == tskNO_AFFINITY) {
2818 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2819 ableToSchedule = pdTRUE;
2820 } else if (pxTCB->xCoreID == xPortGetCoreID()) {
2821 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
2822 ableToSchedule = pdTRUE;
2824 ableToSchedule = pdFALSE;
2825 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
2828 ableToSchedule = pdFALSE;
2831 if (ableToSchedule == pdFALSE) {
2832 resetListHead = pdTRUE;
2833 } else if ((ableToSchedule == pdTRUE) && (resetListHead == pdTRUE)) {
2834 tskTCB * pxResetTCB;
2836 listGET_OWNER_OF_NEXT_ENTRY( pxResetTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
2837 } while(pxResetTCB != pxRefTCB);
2839 } while ((ableToSchedule == pdFALSE) && (pxTCB != pxRefTCB));
2841 if (!holdTop) --uxTopReadyPriority;
2843 --uxDynamicTopReady;
2846 traceTASK_SWITCHED_IN();
2847 xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
2849 //Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
2850 //exit the function.
2851 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
2852 vPortCPUReleaseMutex( &xTaskQueueMutex, __FUNCTION__, __LINE__ );
2854 vPortCPUReleaseMutex( &xTaskQueueMutex );
2857 #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
2858 vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
2862 portEXIT_CRITICAL_NESTED(irqstate);
2864 /*-----------------------------------------------------------*/
2866 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
2868 TickType_t xTimeToWake;
2870 configASSERT( pxEventList );
2872 taskENTER_CRITICAL(&xTaskQueueMutex);
2874 /* Place the event list item of the TCB in the appropriate event list.
2875 This is placed in the list in priority order so the highest priority task
2876 is the first to be woken by the event. The queue that contains the event
2877 list is locked, preventing simultaneous access from interrupts. */
2878 vListInsert( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
2880 /* The task must be removed from from the ready list before it is added to
2881 the blocked list as the same list item is used for both lists. Exclusive
2882 access to the ready lists guaranteed because the scheduler is locked. */
2883 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
2885 /* The current task must be in a ready list, so there is no need to
2886 check, and the port reset macro can be called directly. */
2887 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
2891 mtCOVERAGE_TEST_MARKER();
2894 #if ( INCLUDE_vTaskSuspend == 1 )
2896 if( xTicksToWait == portMAX_DELAY )
2898 /* Add the task to the suspended task list instead of a delayed task
2899 list to ensure the task is not woken by a timing event. It will
2900 block indefinitely. */
2901 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
2902 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
2906 /* Calculate the time at which the task should be woken if the event
2907 does not occur. This may overflow but this doesn't matter, the
2908 scheduler will handle it. */
2909 xTimeToWake = xTickCount + xTicksToWait;
2910 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
2913 #else /* INCLUDE_vTaskSuspend */
2915 /* Calculate the time at which the task should be woken if the event does
2916 not occur. This may overflow but this doesn't matter, the scheduler
2918 xTimeToWake = xTickCount + xTicksToWait;
2919 prvAddCurrentTaskToDelayedList( xTimeToWake );
2921 #endif /* INCLUDE_vTaskSuspend */
2923 taskEXIT_CRITICAL(&xTaskQueueMutex);
2926 /*-----------------------------------------------------------*/
2928 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
2930 TickType_t xTimeToWake;
2932 configASSERT( pxEventList );
2934 taskENTER_CRITICAL(&xTaskQueueMutex);
2936 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
2937 the event groups implementation. */
2938 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 );
2940 /* Store the item value in the event list item. It is safe to access the
2941 event list item here as interrupts won't access the event list item of a
2942 task that is not in the Blocked state. */
2943 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
2945 /* Place the event list item of the TCB at the end of the appropriate event
2946 list. It is safe to access the event list here because it is part of an
2947 event group implementation - and interrupts don't access event groups
2948 directly (instead they access them indirectly by pending function calls to
2950 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
2952 /* The task must be removed from the ready list before it is added to the
2953 blocked list. Exclusive access can be assured to the ready list as the
2954 scheduler is locked. */
2955 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
2957 /* The current task must be in a ready list, so there is no need to
2958 check, and the port reset macro can be called directly. */
2959 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
2963 mtCOVERAGE_TEST_MARKER();
2966 #if ( INCLUDE_vTaskSuspend == 1 )
2968 if( xTicksToWait == portMAX_DELAY )
2970 /* Add the task to the suspended task list instead of a delayed task
2971 list to ensure it is not woken by a timing event. It will block
2973 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
2977 /* Calculate the time at which the task should be woken if the event
2978 does not occur. This may overflow but this doesn't matter, the
2979 kernel will manage it correctly. */
2980 xTimeToWake = xTickCount + xTicksToWait;
2981 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
2984 #else /* INCLUDE_vTaskSuspend */
2986 /* Calculate the time at which the task should be woken if the event does
2987 not occur. This may overflow but this doesn't matter, the kernel
2988 will manage it correctly. */
2989 xTimeToWake = xTickCount + xTicksToWait;
2990 prvAddCurrentTaskToDelayedList( xTimeToWake );
2992 #endif /* INCLUDE_vTaskSuspend */
2994 taskEXIT_CRITICAL(&xTaskQueueMutex);
2996 /*-----------------------------------------------------------*/
2998 #if configUSE_TIMERS == 1
3000 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, const TickType_t xTicksToWait )
3002 TickType_t xTimeToWake;
3004 taskENTER_CRITICAL(&xTaskQueueMutex);
3005 configASSERT( pxEventList );
3007 /* This function should not be called by application code hence the
3008 'Restricted' in its name. It is not part of the public API. It is
3009 designed for use by kernel code, and has special calling requirements -
3010 it should be called from a critical section. */
3013 /* Place the event list item of the TCB in the appropriate event list.
3014 In this case it is assume that this is the only task that is going to
3015 be waiting on this event list, so the faster vListInsertEnd() function
3016 can be used in place of vListInsert. */
3017 vListInsertEnd( pxEventList, &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
3019 /* We must remove this task from the ready list before adding it to the
3020 blocked list as the same list item is used for both lists. This
3021 function is called form a critical section. */
3022 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
3024 /* The current task must be in a ready list, so there is no need to
3025 check, and the port reset macro can be called directly. */
3026 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
3030 mtCOVERAGE_TEST_MARKER();
3033 /* Calculate the time at which the task should be woken if the event does
3034 not occur. This may overflow but this doesn't matter. */
3035 xTimeToWake = xTickCount + xTicksToWait;
3037 traceTASK_DELAY_UNTIL();
3038 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
3039 taskEXIT_CRITICAL(&xTaskQueueMutex);
3043 #endif /* configUSE_TIMERS */
3044 /*-----------------------------------------------------------*/
3046 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3048 TCB_t *pxUnblockedTCB;
3050 BaseType_t xTaskCanBeReady;
3051 UBaseType_t i, uxTargetCPU;
3053 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3054 called from a critical section within an ISR. */
3055 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
3056 /* The event list is sorted in priority order, so the first in the list can
3057 be removed as it is known to be the highest priority. Remove the TCB from
3058 the delayed list, and add it to the ready list.
3060 If an event is for a queue that is locked then this function will never
3061 get called - the lock count on the queue will get modified instead. This
3062 means exclusive access to the event list is guaranteed here.
3064 This function assumes that a check has already been made to ensure that
3065 pxEventList is not empty. */
3066 if ( ( listLIST_IS_EMPTY( pxEventList ) ) == pdFALSE ) {
3067 pxUnblockedTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
3068 configASSERT( pxUnblockedTCB );
3069 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3071 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3075 /* Determine if the task can possibly be run on either CPU now, either because the scheduler
3076 the task is pinned to is running or because a scheduler is running on any CPU. */
3077 xTaskCanBeReady = pdFALSE;
3078 if ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) {
3079 uxTargetCPU = xPortGetCoreID();
3080 for (i = 0; i < portNUM_PROCESSORS; i++) {
3081 if ( uxSchedulerSuspended[ i ] == ( UBaseType_t ) pdFALSE ) {
3082 xTaskCanBeReady = pdTRUE;
3087 uxTargetCPU = pxUnblockedTCB->xCoreID;
3088 xTaskCanBeReady = uxSchedulerSuspended[ uxTargetCPU ] == ( UBaseType_t ) pdFALSE;
3092 if( xTaskCanBeReady )
3094 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3095 prvAddTaskToReadyList( pxUnblockedTCB );
3099 /* The delayed and ready lists cannot be accessed, so hold this task
3100 pending until the scheduler is resumed on this CPU. */
3101 vListInsertEnd( &( xPendingReadyList[ uxTargetCPU ] ), &( pxUnblockedTCB->xEventListItem ) );
3104 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3106 /* Return true if the task removed from the event list has a higher
3107 priority than the calling task. This allows the calling task to know if
3108 it should force a context switch now. */
3111 /* Mark that a yield is pending in case the user is not using the
3112 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3113 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3115 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3117 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3125 #if( configUSE_TICKLESS_IDLE == 1 )
3127 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3128 might be set to the blocked task's time out time. If the task is
3129 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3130 normally left unchanged, because it is automatically get reset to a new
3131 value when the tick count equals xNextTaskUnblockTime. However if
3132 tickless idling is used it might be more important to enter sleep mode
3133 at the earliest possible time - so reset xNextTaskUnblockTime here to
3134 ensure it is updated at the earliest possible time. */
3135 prvResetNextTaskUnblockTime();
3138 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3142 /*-----------------------------------------------------------*/
3144 BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3146 TCB_t *pxUnblockedTCB;
3149 taskENTER_CRITICAL(&xTaskQueueMutex);
3150 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3151 the event flags implementation. */
3152 configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != pdFALSE );
3154 /* Store the new item value in the event list. */
3155 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3157 /* Remove the event list form the event flag. Interrupts do not access
3159 pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem );
3160 configASSERT( pxUnblockedTCB );
3161 ( void ) uxListRemove( pxEventListItem );
3163 /* Remove the task from the delayed list and add it to the ready list. The
3164 scheduler is suspended so interrupts will not be accessing the ready
3166 ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
3167 prvAddTaskToReadyList( pxUnblockedTCB );
3169 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3171 /* Return true if the task removed from the event list has
3172 a higher priority than the calling task. This allows
3173 the calling task to know if it should force a context
3177 /* Mark that a yield is pending in case the user is not using the
3178 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3179 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3181 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3183 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3191 taskEXIT_CRITICAL(&xTaskQueueMutex);
3194 /*-----------------------------------------------------------*/
3196 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3198 configASSERT( pxTimeOut );
3199 pxTimeOut->xOverflowCount = xNumOfOverflows;
3200 pxTimeOut->xTimeOnEntering = xTickCount;
3202 /*-----------------------------------------------------------*/
3204 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3208 configASSERT( pxTimeOut );
3209 configASSERT( pxTicksToWait );
3211 taskENTER_CRITICAL(&xTickCountMutex);
3213 /* Minor optimisation. The tick count cannot change in this block. */
3214 const TickType_t xConstTickCount = xTickCount;
3216 #if ( INCLUDE_vTaskSuspend == 1 )
3217 /* If INCLUDE_vTaskSuspend is set to 1 and the block time specified is
3218 the maximum block time then the task should block indefinitely, and
3219 therefore never time out. */
3220 if( *pxTicksToWait == portMAX_DELAY )
3224 else /* We are not blocking indefinitely, perform the checks below. */
3227 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3229 /* The tick count is greater than the time at which vTaskSetTimeout()
3230 was called, but has also overflowed since vTaskSetTimeOut() was called.
3231 It must have wrapped all the way around and gone past us again. This
3232 passed since vTaskSetTimeout() was called. */
3235 else if( ( xConstTickCount - pxTimeOut->xTimeOnEntering ) < *pxTicksToWait )
3237 /* Not a genuine timeout. Adjust parameters for time remaining. */
3238 *pxTicksToWait -= ( xConstTickCount - pxTimeOut->xTimeOnEntering );
3239 vTaskSetTimeOutState( pxTimeOut );
3247 taskEXIT_CRITICAL(&xTickCountMutex);
3251 /*-----------------------------------------------------------*/
3253 void vTaskMissedYield( void )
3255 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3257 /*-----------------------------------------------------------*/
3259 #if ( configUSE_TRACE_FACILITY == 1 )
3261 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3263 UBaseType_t uxReturn;
3268 pxTCB = ( TCB_t * ) xTask;
3269 uxReturn = pxTCB->uxTaskNumber;
3279 #endif /* configUSE_TRACE_FACILITY */
3280 /*-----------------------------------------------------------*/
3282 #if ( configUSE_TRACE_FACILITY == 1 )
3284 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3290 pxTCB = ( TCB_t * ) xTask;
3291 pxTCB->uxTaskNumber = uxHandle;
3295 #endif /* configUSE_TRACE_FACILITY */
3298 * -----------------------------------------------------------
3300 * ----------------------------------------------------------
3302 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3303 * language extensions. The equivalent prototype for this function is:
3305 * void prvIdleTask( void *pvParameters );
3308 static portTASK_FUNCTION( prvIdleTask, pvParameters )
3310 /* Stop warnings. */
3311 ( void ) pvParameters;
3315 /* See if any tasks have been deleted. */
3316 prvCheckTasksWaitingTermination();
3318 #if ( configUSE_PREEMPTION == 0 )
3320 /* If we are not using preemption we keep forcing a task switch to
3321 see if any other task has become available. If we are using
3322 preemption we don't need to do this as any task becoming available
3323 will automatically get the processor anyway. */
3326 #endif /* configUSE_PREEMPTION */
3328 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3330 /* When using preemption tasks of equal priority will be
3331 timesliced. If a task that is sharing the idle priority is ready
3332 to run then the idle task should yield before the end of the
3335 A critical region is not required here as we are just reading from
3336 the list, and an occasional incorrect value will not matter. If
3337 the ready list at the idle priority contains more than one task
3338 then a task other than the idle task is ready to execute. */
3339 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3345 mtCOVERAGE_TEST_MARKER();
3348 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3350 #if ( configUSE_IDLE_HOOK == 1 )
3352 extern void vApplicationIdleHook( void );
3354 /* Call the user defined function from within the idle task. This
3355 allows the application designer to add background functionality
3356 without the overhead of a separate task.
3357 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3358 CALL A FUNCTION THAT MIGHT BLOCK. */
3359 vApplicationIdleHook();
3361 #endif /* configUSE_IDLE_HOOK */
3363 /* Call the esp-idf hook system */
3364 extern void esp_vApplicationIdleHook( void );
3365 esp_vApplicationIdleHook();
3369 /* This conditional compilation should use inequality to 0, not equality
3370 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3371 user defined low power mode implementations require
3372 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3373 #if ( configUSE_TICKLESS_IDLE != 0 )
3375 TickType_t xExpectedIdleTime;
3377 /* It is not desirable to suspend then resume the scheduler on
3378 each iteration of the idle task. Therefore, a preliminary
3379 test of the expected idle time is performed without the
3380 scheduler suspended. The result here is not necessarily
3382 xExpectedIdleTime = prvGetExpectedIdleTime();
3384 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3386 // vTaskSuspendAll();
3387 taskENTER_CRITICAL(&xTaskQueueMutex);
3389 /* Now the scheduler is suspended, the expected idle
3390 time can be sampled again, and this time its value can
3392 configASSERT( xNextTaskUnblockTime >= xTickCount );
3393 xExpectedIdleTime = prvGetExpectedIdleTime();
3395 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3397 traceLOW_POWER_IDLE_BEGIN();
3398 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3399 traceLOW_POWER_IDLE_END();
3403 mtCOVERAGE_TEST_MARKER();
3406 taskEXIT_CRITICAL(&xTaskQueueMutex);
3407 // ( void ) xTaskResumeAll();
3411 mtCOVERAGE_TEST_MARKER();
3414 #endif /* configUSE_TICKLESS_IDLE */
3417 /*-----------------------------------------------------------*/
3419 #if configUSE_TICKLESS_IDLE != 0
3421 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3423 eSleepModeStatus eReturn = eStandardSleep;
3424 taskENTER_CRITICAL(&xTaskQueueMutex);
3426 if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
3428 /* A task was made ready while the scheduler was suspended. */
3429 eReturn = eAbortSleep;
3431 else if( xYieldPending[ xPortGetCoreID() ] != pdFALSE )
3433 /* A yield was pended while the scheduler was suspended. */
3434 eReturn = eAbortSleep;
3438 #if configUSE_TIMERS == 0
3440 /* The idle task exists in addition to the application tasks. */
3441 const UBaseType_t uxNonApplicationTasks = 1;
3443 /* If timers are not being used and all the tasks are in the
3444 suspended list (which might mean they have an infinite block
3445 time rather than actually being suspended) then it is safe to
3446 turn all clocks off and just wait for external interrupts. */
3447 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3449 eReturn = eNoTasksWaitingTimeout;
3453 mtCOVERAGE_TEST_MARKER();
3456 #endif /* configUSE_TIMERS */
3458 taskEXIT_CRITICAL(&xTaskQueueMutex);
3462 #endif /* configUSE_TICKLESS_IDLE */
3463 /*-----------------------------------------------------------*/
3465 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3467 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3469 void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue , TlsDeleteCallbackFunction_t xDelCallback)
3473 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3475 taskENTER_CRITICAL(&xTaskQueueMutex);
3476 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3477 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3478 pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
3479 taskEXIT_CRITICAL(&xTaskQueueMutex);
3483 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3485 vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, (TlsDeleteCallbackFunction_t)NULL );
3490 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3494 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3496 taskENTER_CRITICAL(&xTaskQueueMutex);
3497 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3498 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3499 taskEXIT_CRITICAL(&xTaskQueueMutex);
3502 #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
3504 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3505 /*-----------------------------------------------------------*/
3507 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3509 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3511 void *pvReturn = NULL;
3514 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3516 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3517 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3527 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3530 #if ( portUSING_MPU_WRAPPERS == 1 )
3531 /* ToDo: Check for multicore */
3532 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3536 UNTESTED_FUNCTION();
3537 /* If null is passed in here then we are deleting ourselves. */
3538 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3540 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3543 #endif /* portUSING_MPU_WRAPPERS */
3544 /*-----------------------------------------------------------*/
3546 static void prvInitialiseTaskLists( void )
3548 UBaseType_t uxPriority;
3550 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3552 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3555 vListInitialise( &xDelayedTaskList1 );
3556 vListInitialise( &xDelayedTaskList2 );
3557 vListInitialise( &xPendingReadyList[ 0 ] );
3558 if (portNUM_PROCESSORS == 2) {
3559 vListInitialise( &xPendingReadyList[ 1 ] );
3562 #if ( INCLUDE_vTaskDelete == 1 )
3564 vListInitialise( &xTasksWaitingTermination );
3566 #endif /* INCLUDE_vTaskDelete */
3568 #if ( INCLUDE_vTaskSuspend == 1 )
3570 vListInitialise( &xSuspendedTaskList );
3572 #endif /* INCLUDE_vTaskSuspend */
3574 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3576 pxDelayedTaskList = &xDelayedTaskList1;
3577 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3579 /*-----------------------------------------------------------*/
3581 static void prvCheckTasksWaitingTermination( void )
3583 #if ( INCLUDE_vTaskDelete == 1 )
3585 BaseType_t xListIsEmpty;
3587 /* ucTasksDeleted is used to prevent vTaskSuspendAll() being called
3588 too often in the idle task. */
3589 while(uxTasksDeleted > ( UBaseType_t ) 0U )
3591 TCB_t *pxTCB = NULL;
3592 taskENTER_CRITICAL(&xTaskQueueMutex);
3594 xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
3597 if( xListIsEmpty == pdFALSE )
3600 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
3601 /* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
3602 be called on the core the process is pinned on, if any */
3603 if( pxTCB->xCoreID == tskNO_AFFINITY || pxTCB->xCoreID == xPortGetCoreID()) {
3604 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
3605 --uxCurrentNumberOfTasks;
3608 /* Need to wait until the idle task on the other processor kills that task first. */
3609 taskEXIT_CRITICAL(&xTaskQueueMutex);
3614 taskEXIT_CRITICAL(&xTaskQueueMutex);
3616 if (pxTCB != NULL) {
3617 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3619 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
3621 if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL)
3623 pxTCB->pvThreadLocalStoragePointersDelCallback[ x ](x, pxTCB->pvThreadLocalStoragePointers[ x ]);
3627 prvDeleteTCB( pxTCB );
3631 mtCOVERAGE_TEST_MARKER();
3635 #endif /* vTaskDelete */
3637 /*-----------------------------------------------------------*/
3639 //This should be called with the taskqueuemutex grabbed. -JD
3640 static void prvAddCurrentTaskToDelayedList( const BaseType_t xCoreID, const TickType_t xTimeToWake )
3642 /* The list item will be inserted in wake time order. */
3643 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xGenericListItem ), xTimeToWake );
3645 if( xTimeToWake < xTickCount )
3647 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
3648 /* Wake time has overflowed. Place this item in the overflow list. */
3649 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3653 traceMOVED_TASK_TO_DELAYED_LIST();
3654 /* The wake time has not overflowed, so the current block list is used. */
3655 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[ xCoreID ]->xGenericListItem ) );
3657 /* If the task entering the blocked state was placed at the head of the
3658 list of blocked tasks then xNextTaskUnblockTime needs to be updated
3660 if( xTimeToWake < xNextTaskUnblockTime )
3662 xNextTaskUnblockTime = xTimeToWake;
3666 mtCOVERAGE_TEST_MARKER();
3670 /*-----------------------------------------------------------*/
3672 BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
3676 pxTCB = prvGetTCBFromHandle( xTask );
3678 return pxTCB->xCoreID;
3680 /*-----------------------------------------------------------*/
3683 #if ( configUSE_TRACE_FACILITY == 1 )
3685 static UBaseType_t prvListTaskWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3687 volatile TCB_t *pxNextTCB, *pxFirstTCB;
3688 UBaseType_t uxTask = 0;
3690 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3692 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
3694 /* Populate an TaskStatus_t structure within the
3695 pxTaskStatusArray array for each task that is referenced from
3696 pxList. See the definition of TaskStatus_t in task.h for the
3697 meaning of each TaskStatus_t structure member. */
3700 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
3702 pxTaskStatusArray[ uxTask ].xHandle = ( TaskHandle_t ) pxNextTCB;
3703 pxTaskStatusArray[ uxTask ].pcTaskName = ( const char * ) &( pxNextTCB->pcTaskName [ 0 ] );
3704 pxTaskStatusArray[ uxTask ].xTaskNumber = pxNextTCB->uxTCBNumber;
3705 pxTaskStatusArray[ uxTask ].eCurrentState = eState;
3706 pxTaskStatusArray[ uxTask ].uxCurrentPriority = pxNextTCB->uxPriority;
3708 #if ( INCLUDE_vTaskSuspend == 1 )
3710 /* If the task is in the suspended list then there is a chance
3711 it is actually just blocked indefinitely - so really it should
3712 be reported as being in the Blocked state. */
3713 if( eState == eSuspended )
3715 if( listLIST_ITEM_CONTAINER( &( pxNextTCB->xEventListItem ) ) != NULL )
3717 pxTaskStatusArray[ uxTask ].eCurrentState = eBlocked;
3721 #endif /* INCLUDE_vTaskSuspend */
3723 #if ( configUSE_MUTEXES == 1 )
3725 pxTaskStatusArray[ uxTask ].uxBasePriority = pxNextTCB->uxBasePriority;
3729 pxTaskStatusArray[ uxTask ].uxBasePriority = 0;
3733 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3735 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = pxNextTCB->ulRunTimeCounter;
3739 pxTaskStatusArray[ uxTask ].ulRunTimeCounter = 0;
3743 #if ( portSTACK_GROWTH > 0 )
3745 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxEndOfStack );
3749 pxTaskStatusArray[ uxTask ].usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxNextTCB->pxStack );
3755 } while( pxNextTCB != pxFirstTCB );
3759 mtCOVERAGE_TEST_MARKER();
3765 #endif /* configUSE_TRACE_FACILITY */
3766 /*-----------------------------------------------------------*/
3768 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
3770 static uint32_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3772 uint32_t ulCount = 0U;
3774 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3776 pucStackByte -= portSTACK_GROWTH;
3780 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3782 return ( uint32_t ) ulCount;
3785 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */
3786 /*-----------------------------------------------------------*/
3788 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3790 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3793 uint8_t *pucEndOfStack;
3794 UBaseType_t uxReturn;
3796 pxTCB = prvGetTCBFromHandle( xTask );
3798 #if portSTACK_GROWTH < 0
3800 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3804 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3808 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
3813 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3814 /*-----------------------------------------------------------*/
3816 #if (INCLUDE_pxTaskGetStackStart == 1)
3818 uint8_t* pxTaskGetStackStart( TaskHandle_t xTask)
3823 pxTCB = prvGetTCBFromHandle( xTask );
3824 uxReturn = (uint8_t*)pxTCB->pxStack;
3829 #endif /* INCLUDE_pxTaskGetStackStart */
3830 /*-----------------------------------------------------------*/
3832 #if ( INCLUDE_vTaskDelete == 1 )
3835 static void prvDeleteTCB( TCB_t *pxTCB )
3837 /* Free up the memory allocated by the scheduler for the task. It is up
3838 to the task to free any memory allocated at the application level. */
3839 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3841 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3843 #endif /* configUSE_NEWLIB_REENTRANT */
3845 #if ( portUSING_MPU_WRAPPERS == 1 )
3846 vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
3849 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3851 /* The task can only have been allocated dynamically - free both
3852 the stack and TCB. */
3853 vPortFreeAligned( pxTCB->pxStack );
3856 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
3858 /* The task could have been allocated statically or dynamically, so
3859 check what was statically allocated before trying to free the
3861 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3863 /* Both the stack and TCB were allocated dynamically, so both
3865 vPortFreeAligned( pxTCB->pxStack );
3868 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3870 /* Only the stack was statically allocated, so the TCB is the
3871 only memory that must be freed. */
3876 /* Neither the stack nor the TCB were allocated dynamically, so
3877 nothing needs to be freed. */
3878 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
3879 portCLEAN_UP_TCB( pxTCB );
3880 mtCOVERAGE_TEST_MARKER();
3883 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3886 #endif /* INCLUDE_vTaskDelete */
3887 /*-----------------------------------------------------------*/
3889 static void prvResetNextTaskUnblockTime( void )
3893 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3895 /* The new current delayed list is empty. Set
3896 xNextTaskUnblockTime to the maximum possible value so it is
3897 extremely unlikely that the
3898 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
3899 there is an item in the delayed list. */
3900 xNextTaskUnblockTime = portMAX_DELAY;
3904 /* The new current delayed list is not empty, get the value of
3905 the item at the head of the delayed list. This is the time at
3906 which the task at the head of the delayed list should be removed
3907 from the Blocked state. */
3908 ( pxTCB ) = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
3909 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xGenericListItem ) );
3912 /*-----------------------------------------------------------*/
3914 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
3916 TaskHandle_t xTaskGetCurrentTaskHandle( void )
3918 TaskHandle_t xReturn;
3921 state = portENTER_CRITICAL_NESTED();
3922 xReturn = pxCurrentTCB[ xPortGetCoreID() ];
3923 portEXIT_CRITICAL_NESTED(state);
3928 TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid )
3930 TaskHandle_t xReturn=NULL;
3932 //Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock.
3933 if (cpuid < portNUM_PROCESSORS) {
3934 xReturn = pxCurrentTCB[ cpuid ];
3941 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
3942 /*-----------------------------------------------------------*/
3944 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
3946 BaseType_t xTaskGetSchedulerState( void )
3951 state = portENTER_CRITICAL_NESTED();
3952 if( xSchedulerRunning == pdFALSE )
3954 xReturn = taskSCHEDULER_NOT_STARTED;
3958 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
3960 xReturn = taskSCHEDULER_RUNNING;
3964 xReturn = taskSCHEDULER_SUSPENDED;
3967 portEXIT_CRITICAL_NESTED(state);
3972 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
3973 /*-----------------------------------------------------------*/
3975 #if ( configUSE_MUTEXES == 1 )
3977 void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
3979 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
3981 taskENTER_CRITICAL(&xTickCountMutex);
3982 /* If the mutex was given back by an interrupt while the queue was
3983 locked then the mutex holder might now be NULL. */
3984 if( pxMutexHolder != NULL )
3986 if( pxTCB->uxPriority < pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3988 taskENTER_CRITICAL(&xTaskQueueMutex);
3989 /* Adjust the mutex holder state to account for its new
3990 priority. Only reset the event list item value if the value is
3991 not being used for anything else. */
3992 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
3994 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
3998 mtCOVERAGE_TEST_MARKER();
4001 /* If the task being modified is in the ready state it will need to
4002 be moved into a new list. */
4003 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxTCB->uxPriority ] ), &( pxTCB->xGenericListItem ) ) != pdFALSE )
4005 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4007 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4011 mtCOVERAGE_TEST_MARKER();
4014 /* Inherit the priority before being moved into the new list. */
4015 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4016 prvReaddTaskToReadyList( pxTCB );
4020 /* Just inherit the priority. */
4021 pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
4024 taskEXIT_CRITICAL(&xTaskQueueMutex);
4026 traceTASK_PRIORITY_INHERIT( pxTCB, pxCurrentTCB[ xPortGetCoreID() ]->uxPriority );
4030 mtCOVERAGE_TEST_MARKER();
4035 mtCOVERAGE_TEST_MARKER();
4038 taskEXIT_CRITICAL(&xTickCountMutex);
4042 #endif /* configUSE_MUTEXES */
4043 /*-----------------------------------------------------------*/
4045 #if ( configUSE_MUTEXES == 1 )
4047 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4049 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
4050 BaseType_t xReturn = pdFALSE;
4051 taskENTER_CRITICAL(&xTickCountMutex);
4053 if( pxMutexHolder != NULL )
4055 configASSERT( pxTCB->uxMutexesHeld );
4056 ( pxTCB->uxMutexesHeld )--;
4058 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4060 /* Only disinherit if no other mutexes are held. */
4061 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4063 taskENTER_CRITICAL(&xTaskQueueMutex);
4064 /* A task can only have an inhertied priority if it holds
4065 the mutex. If the mutex is held by a task then it cannot be
4066 given from an interrupt, and if a mutex is given by the
4067 holding task then it must be the running state task. Remove
4068 the holding task from the ready list. */
4069 if( uxListRemove( &( pxTCB->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4071 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4075 mtCOVERAGE_TEST_MARKER();
4078 /* Disinherit the priority before adding the task into the
4080 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4081 pxTCB->uxPriority = pxTCB->uxBasePriority;
4083 /* Reset the event list item value. It cannot be in use for
4084 any other purpose if this task is running, and it must be
4085 running to give back the mutex. */
4086 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4087 prvReaddTaskToReadyList( pxTCB );
4089 /* Return true to indicate that a context switch is required.
4090 This is only actually required in the corner case whereby
4091 multiple mutexes were held and the mutexes were given back
4092 in an order different to that in which they were taken.
4093 If a context switch did not occur when the first mutex was
4094 returned, even if a task was waiting on it, then a context
4095 switch should occur when the last mutex is returned whether
4096 a task is waiting on it or not. */
4098 taskEXIT_CRITICAL(&xTaskQueueMutex);
4102 mtCOVERAGE_TEST_MARKER();
4107 mtCOVERAGE_TEST_MARKER();
4112 mtCOVERAGE_TEST_MARKER();
4115 taskEXIT_CRITICAL(&xTickCountMutex);
4119 #endif /* configUSE_MUTEXES */
4120 /*-----------------------------------------------------------*/
4122 /* For multicore, this assumes the vPortCPUAquireMutex is recursive, that is, it can be called multiple
4123 times and the release call will have to be called as many times for the mux to unlock. */
4125 /* Gotcha (which seems to be deliberate in FreeRTOS, according to
4126 http://www.freertos.org/FreeRTOS_Support_Forum_Archive/December_2012/freertos_PIC32_Bug_-_vTaskEnterCritical_6400806.html
4127 ) is that calling vTaskEnterCritical followed by vTaskExitCritical will leave the interrupts DISABLED when the scheduler
4128 is not running. Re-enabling the scheduler will re-enable the interrupts instead.
4130 For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and portENTER_CRITICAL_ISR.
4133 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4135 #include "portmux_impl.h"
4137 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4138 void vTaskEnterCritical( portMUX_TYPE *mux, const char *function, int line )
4140 void vTaskEnterCritical( portMUX_TYPE *mux )
4143 BaseType_t oldInterruptLevel=0;
4144 BaseType_t schedulerRunning = xSchedulerRunning;
4145 if( schedulerRunning != pdFALSE )
4147 //Interrupts may already be disabled (because we're doing this recursively) but we can't get the interrupt level after
4148 //vPortCPUAquireMutex, because it also may mess with interrupts. Get it here first, then later figure out if we're nesting
4149 //and save for real there.
4150 oldInterruptLevel=portENTER_CRITICAL_NESTED();
4152 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4153 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT, function, line );
4155 vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT );
4158 if( schedulerRunning != pdFALSE )
4160 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4161 BaseType_t newNesting = tcb->uxCriticalNesting + 1;
4162 tcb->uxCriticalNesting = newNesting;
4163 if( newNesting == 1 )
4165 //This is the first time we get called. Save original interrupt level.
4166 tcb->uxOldInterruptState = oldInterruptLevel;
4169 /* Original FreeRTOS comment, saved for reference:
4170 This is not the interrupt safe version of the enter critical
4171 function so assert() if it is being called from an interrupt
4172 context. Only API functions that end in "FromISR" can be used in an
4173 interrupt. Only assert if the critical nesting count is 1 to
4174 protect against recursive calls if the assert function also uses a
4175 critical section. */
4177 /* DISABLED in the esp32 port - because of SMP, For ESP32
4178 FreeRTOS, vTaskEnterCritical implements both
4179 portENTER_CRITICAL and portENTER_CRITICAL_ISR. vTaskEnterCritical
4180 has to be used in way more places than before, and some are called
4181 both from ISR as well as non-ISR code, thus we re-organized
4182 vTaskEnterCritical to also work in ISRs. */
4184 if( newNesting == 1 )
4186 portASSERT_IF_IN_ISR();
4193 mtCOVERAGE_TEST_MARKER();
4197 #endif /* portCRITICAL_NESTING_IN_TCB */
4198 /*-----------------------------------------------------------*/
4202 For ESP32 FreeRTOS, vTaskExitCritical implements both portEXIT_CRITICAL and portEXIT_CRITICAL_ISR.
4204 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4206 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4207 void vTaskExitCritical( portMUX_TYPE *mux, const char *function, int line )
4209 void vTaskExitCritical( portMUX_TYPE *mux )
4212 #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
4213 vPortCPUReleaseMutexIntsDisabled( mux, function, line );
4215 vPortCPUReleaseMutexIntsDisabled( mux );
4217 if( xSchedulerRunning != pdFALSE )
4219 TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
4220 BaseType_t nesting = tcb->uxCriticalNesting;
4224 tcb->uxCriticalNesting = nesting;
4228 portEXIT_CRITICAL_NESTED(tcb->uxOldInterruptState);
4232 mtCOVERAGE_TEST_MARKER();
4237 mtCOVERAGE_TEST_MARKER();
4242 mtCOVERAGE_TEST_MARKER();
4246 #endif /* portCRITICAL_NESTING_IN_TCB */
4247 /*-----------------------------------------------------------*/
4249 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4251 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4255 /* Start by copying the entire string. */
4256 strcpy( pcBuffer, pcTaskName );
4258 /* Pad the end of the string with spaces to ensure columns line up when
4260 for( x = strlen( pcBuffer ); x < ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4262 pcBuffer[ x ] = ' ';
4266 pcBuffer[ x ] = 0x00;
4268 /* Return the new end of string. */
4269 return &( pcBuffer[ x ] );
4272 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4273 /*-----------------------------------------------------------*/
4275 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4277 void vTaskList( char * pcWriteBuffer )
4279 TaskStatus_t *pxTaskStatusArray;
4280 volatile UBaseType_t uxArraySize, x;
4286 * This function is provided for convenience only, and is used by many
4287 * of the demo applications. Do not consider it to be part of the
4290 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4291 * uxTaskGetSystemState() output into a human readable table that
4292 * displays task names, states and stack usage.
4294 * vTaskList() has a dependency on the sprintf() C library function that
4295 * might bloat the code size, use a lot of stack, and provide different
4296 * results on different platforms. An alternative, tiny, third party,
4297 * and limited functionality implementation of sprintf() is provided in
4298 * many of the FreeRTOS/Demo sub-directories in a file called
4299 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4300 * snprintf() implementation!).
4302 * It is recommended that production systems call uxTaskGetSystemState()
4303 * directly to get access to raw stats data, rather than indirectly
4304 * through a call to vTaskList().
4308 /* Make sure the write buffer does not contain a string. */
4309 *pcWriteBuffer = 0x00;
4311 /* Take a snapshot of the number of tasks in case it changes while this
4312 function is executing. */
4313 uxArraySize = uxCurrentNumberOfTasks;
4315 /* Allocate an array index for each task. NOTE! if
4316 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4318 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4320 if( pxTaskStatusArray != NULL )
4322 /* Generate the (binary) data. */
4323 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
4325 /* Create a human readable table from the binary data. */
4326 for( x = 0; x < uxArraySize; x++ )
4328 switch( pxTaskStatusArray[ x ].eCurrentState )
4330 case eReady: cStatus = tskREADY_CHAR;
4333 case eBlocked: cStatus = tskBLOCKED_CHAR;
4336 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4339 case eDeleted: cStatus = tskDELETED_CHAR;
4342 default: /* Should not get here, but it is included
4343 to prevent static checking errors. */
4348 /* Write the task name to the string, padding with spaces so it
4349 can be printed in tabular form more easily. */
4350 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4352 /* Write the rest of the string. */
4353 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
4354 pcWriteBuffer += strlen( pcWriteBuffer );
4357 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4358 is 0 then vPortFree() will be #defined to nothing. */
4359 vPortFree( pxTaskStatusArray );
4363 mtCOVERAGE_TEST_MARKER();
4367 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4368 /*----------------------------------------------------------*/
4370 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4372 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4374 TaskStatus_t *pxTaskStatusArray;
4375 volatile UBaseType_t uxArraySize, x;
4376 uint32_t ulTotalTime, ulStatsAsPercentage;
4378 #if( configUSE_TRACE_FACILITY != 1 )
4380 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4387 * This function is provided for convenience only, and is used by many
4388 * of the demo applications. Do not consider it to be part of the
4391 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4392 * of the uxTaskGetSystemState() output into a human readable table that
4393 * displays the amount of time each task has spent in the Running state
4394 * in both absolute and percentage terms.
4396 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4397 * function that might bloat the code size, use a lot of stack, and
4398 * provide different results on different platforms. An alternative,
4399 * tiny, third party, and limited functionality implementation of
4400 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4401 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4402 * a full snprintf() implementation!).
4404 * It is recommended that production systems call uxTaskGetSystemState()
4405 * directly to get access to raw stats data, rather than indirectly
4406 * through a call to vTaskGetRunTimeStats().
4409 /* Make sure the write buffer does not contain a string. */
4410 *pcWriteBuffer = 0x00;
4412 /* Take a snapshot of the number of tasks in case it changes while this
4413 function is executing. */
4414 uxArraySize = uxCurrentNumberOfTasks;
4416 /* Allocate an array index for each task. NOTE! If
4417 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4419 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
4421 if( pxTaskStatusArray != NULL )
4423 /* Generate the (binary) data. */
4424 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4426 /* For percentage calculations. */
4427 ulTotalTime /= 100UL;
4429 /* Avoid divide by zero errors. */
4430 if( ulTotalTime > 0 )
4432 /* Create a human readable table from the binary data. */
4433 for( x = 0; x < uxArraySize; x++ )
4435 /* What percentage of the total run time has the task used?
4436 This will always be rounded down to the nearest integer.
4437 ulTotalRunTimeDiv100 has already been divided by 100. */
4438 /* Also need to consider total run time of all */
4439 ulStatsAsPercentage = (pxTaskStatusArray[ x ].ulRunTimeCounter/portNUM_PROCESSORS)/ ulTotalTime;
4441 /* Write the task name to the string, padding with
4442 spaces so it can be printed in tabular form more
4444 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4446 if( ulStatsAsPercentage > 0UL )
4448 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4450 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4454 /* sizeof( int ) == sizeof( long ) so a smaller
4455 printf() library can be used. */
4456 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage );
4462 /* If the percentage is zero here then the task has
4463 consumed less than 1% of the total run time. */
4464 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4466 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4470 /* sizeof( int ) == sizeof( long ) so a smaller
4471 printf() library can be used. */
4472 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
4477 pcWriteBuffer += strlen( pcWriteBuffer );
4482 mtCOVERAGE_TEST_MARKER();
4485 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4486 is 0 then vPortFree() will be #defined to nothing. */
4487 vPortFree( pxTaskStatusArray );
4491 mtCOVERAGE_TEST_MARKER();
4495 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4496 /*-----------------------------------------------------------*/
4498 TickType_t uxTaskResetEventItemValue( void )
4500 TickType_t uxReturn;
4501 taskENTER_CRITICAL(&xTaskQueueMutex);
4502 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
4504 /* Reset the event list item to its normal value - so it can be used with
4505 queues and semaphores. */
4506 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4507 taskEXIT_CRITICAL(&xTaskQueueMutex);
4511 /*-----------------------------------------------------------*/
4513 #if ( configUSE_MUTEXES == 1 )
4515 void *pvTaskIncrementMutexHeldCount( void )
4519 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4520 then pxCurrentTCB will be NULL. */
4521 taskENTER_CRITICAL(&xTaskQueueMutex);
4522 if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
4524 ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
4526 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
4527 taskEXIT_CRITICAL(&xTaskQueueMutex);
4532 #endif /* configUSE_MUTEXES */
4533 /*-----------------------------------------------------------*/
4535 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4537 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4539 TickType_t xTimeToWake;
4542 taskENTER_CRITICAL(&xTaskQueueMutex);
4544 /* Only block if the notification count is not already non-zero. */
4545 if( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue == 0UL )
4547 /* Mark this task as waiting for a notification. */
4548 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4550 if( xTicksToWait > ( TickType_t ) 0 )
4552 /* The task is going to block. First it must be removed
4553 from the ready list. */
4554 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4556 /* The current task must be in a ready list, so there is
4557 no need to check, and the port reset macro can be called
4559 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4563 mtCOVERAGE_TEST_MARKER();
4566 #if ( INCLUDE_vTaskSuspend == 1 )
4568 if( xTicksToWait == portMAX_DELAY )
4570 /* Add the task to the suspended task list instead
4571 of a delayed task list to ensure the task is not
4572 woken by a timing event. It will block
4574 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4575 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4579 /* Calculate the time at which the task should be
4580 woken if no notification events occur. This may
4581 overflow but this doesn't matter, the scheduler will
4583 xTimeToWake = xTickCount + xTicksToWait;
4584 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4587 #else /* INCLUDE_vTaskSuspend */
4589 /* Calculate the time at which the task should be
4590 woken if the event does not occur. This may
4591 overflow but this doesn't matter, the scheduler will
4593 xTimeToWake = xTickCount + xTicksToWait;
4594 prvAddCurrentTaskToDelayedList( xTimeToWake );
4596 #endif /* INCLUDE_vTaskSuspend */
4598 /* All ports are written to allow a yield in a critical
4599 section (some will yield immediately, others wait until the
4600 critical section exits) - but it is not something that
4601 application code should ever do. */
4602 portYIELD_WITHIN_API();
4606 mtCOVERAGE_TEST_MARKER();
4611 mtCOVERAGE_TEST_MARKER();
4614 taskEXIT_CRITICAL(&xTaskQueueMutex);
4616 taskENTER_CRITICAL(&xTaskQueueMutex);
4618 ulReturn = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4620 if( ulReturn != 0UL )
4622 if( xClearCountOnExit != pdFALSE )
4624 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue = 0UL;
4628 ( pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue )--;
4633 mtCOVERAGE_TEST_MARKER();
4636 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4638 taskEXIT_CRITICAL(&xTaskQueueMutex);
4643 #endif /* configUSE_TASK_NOTIFICATIONS */
4644 /*-----------------------------------------------------------*/
4646 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4648 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4650 TickType_t xTimeToWake;
4653 taskENTER_CRITICAL(&xTaskQueueMutex);
4655 /* Only block if a notification is not already pending. */
4656 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState != eNotified )
4658 /* Clear bits in the task's notification value as bits may get
4659 set by the notifying task or interrupt. This can be used to
4660 clear the value to zero. */
4661 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnEntry;
4663 /* Mark this task as waiting for a notification. */
4664 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eWaitingNotification;
4666 if( xTicksToWait > ( TickType_t ) 0 )
4668 /* The task is going to block. First it must be removed
4669 from the ready list. */
4670 if( uxListRemove( &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) ) == ( UBaseType_t ) 0 )
4672 /* The current task must be in a ready list, so there is
4673 no need to check, and the port reset macro can be called
4675 portRESET_READY_PRIORITY( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority, uxTopReadyPriority );
4679 mtCOVERAGE_TEST_MARKER();
4682 #if ( INCLUDE_vTaskSuspend == 1 )
4684 if( xTicksToWait == portMAX_DELAY )
4686 /* Add the task to the suspended task list instead
4687 of a delayed task list to ensure the task is not
4688 woken by a timing event. It will block
4690 traceMOVED_TASK_TO_SUSPENDED_LIST(pxCurrentTCB);
4691 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[ xPortGetCoreID() ]->xGenericListItem ) );
4695 /* Calculate the time at which the task should be
4696 woken if no notification events occur. This may
4697 overflow but this doesn't matter, the scheduler will
4699 xTimeToWake = xTickCount + xTicksToWait;
4700 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake );
4703 #else /* INCLUDE_vTaskSuspend */
4705 /* Calculate the time at which the task should be
4706 woken if the event does not occur. This may
4707 overflow but this doesn't matter, the scheduler will
4709 xTimeToWake = xTickCount + xTicksToWait;
4710 prvAddCurrentTaskToDelayedList( xTimeToWake );
4712 #endif /* INCLUDE_vTaskSuspend */
4714 /* All ports are written to allow a yield in a critical
4715 section (some will yield immediately, others wait until the
4716 critical section exits) - but it is not something that
4717 application code should ever do. */
4718 portYIELD_WITHIN_API();
4722 mtCOVERAGE_TEST_MARKER();
4727 mtCOVERAGE_TEST_MARKER();
4730 taskEXIT_CRITICAL(&xTaskQueueMutex);
4732 taskENTER_CRITICAL(&xTaskQueueMutex);
4734 if( pulNotificationValue != NULL )
4736 /* Output the current notification value, which may or may not
4738 *pulNotificationValue = pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue;
4741 /* If eNotifyValue is set then either the task never entered the
4742 blocked state (because a notification was already pending) or the
4743 task unblocked because of a notification. Otherwise the task
4744 unblocked because of a timeout. */
4745 if( pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState == eWaitingNotification )
4747 /* A notification was not received. */
4752 /* A notification was already pending or a notification was
4753 received while the task was waiting. */
4754 pxCurrentTCB[ xPortGetCoreID() ]->ulNotifiedValue &= ~ulBitsToClearOnExit;
4758 pxCurrentTCB[ xPortGetCoreID() ]->eNotifyState = eNotWaitingNotification;
4760 taskEXIT_CRITICAL(&xTaskQueueMutex);
4765 #endif /* configUSE_TASK_NOTIFICATIONS */
4766 /*-----------------------------------------------------------*/
4768 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4770 BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction )
4773 eNotifyValue eOriginalNotifyState;
4774 BaseType_t xReturn = pdPASS;
4776 configASSERT( xTaskToNotify );
4777 pxTCB = ( TCB_t * ) xTaskToNotify;
4779 taskENTER_CRITICAL(&xTaskQueueMutex);
4781 eOriginalNotifyState = pxTCB->eNotifyState;
4783 pxTCB->eNotifyState = eNotified;
4788 pxTCB->ulNotifiedValue |= ulValue;
4792 ( pxTCB->ulNotifiedValue )++;
4795 case eSetValueWithOverwrite :
4796 pxTCB->ulNotifiedValue = ulValue;
4799 case eSetValueWithoutOverwrite :
4800 if( eOriginalNotifyState != eNotified )
4802 pxTCB->ulNotifiedValue = ulValue;
4806 /* The value could not be written to the task. */
4812 /* The task is being notified without its notify value being
4818 /* If the task is in the blocked state specifically to wait for a
4819 notification then unblock it now. */
4820 if( eOriginalNotifyState == eWaitingNotification )
4822 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
4823 prvAddTaskToReadyList( pxTCB );
4825 /* The task should not have been on an event list. */
4826 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4828 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4830 /* The notified task has a priority above the currently
4831 executing task so a yield is required. */
4832 portYIELD_WITHIN_API();
4834 else if ( pxTCB->xCoreID != xPortGetCoreID() )
4836 taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
4840 mtCOVERAGE_TEST_MARKER();
4845 mtCOVERAGE_TEST_MARKER();
4848 taskEXIT_CRITICAL(&xTaskQueueMutex);
4853 #endif /* configUSE_TASK_NOTIFICATIONS */
4854 /*-----------------------------------------------------------*/
4856 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4858 BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken )
4861 eNotifyValue eOriginalNotifyState;
4862 BaseType_t xReturn = pdPASS;
4864 configASSERT( xTaskToNotify );
4866 pxTCB = ( TCB_t * ) xTaskToNotify;
4868 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
4871 eOriginalNotifyState = pxTCB->eNotifyState;
4873 pxTCB->eNotifyState = eNotified;
4878 pxTCB->ulNotifiedValue |= ulValue;
4882 ( pxTCB->ulNotifiedValue )++;
4885 case eSetValueWithOverwrite :
4886 pxTCB->ulNotifiedValue = ulValue;
4889 case eSetValueWithoutOverwrite :
4890 if( eOriginalNotifyState != eNotified )
4892 pxTCB->ulNotifiedValue = ulValue;
4896 /* The value could not be written to the task. */
4902 /* The task is being notified without its notify value being
4908 /* If the task is in the blocked state specifically to wait for a
4909 notification then unblock it now. */
4910 if( eOriginalNotifyState == eWaitingNotification )
4912 /* The task should not have been on an event list. */
4913 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4915 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
4917 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
4918 prvAddTaskToReadyList( pxTCB );
4922 /* The delayed and ready lists cannot be accessed, so hold
4923 this task pending until the scheduler is resumed. */
4924 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
4927 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4929 /* The notified task has a priority above the currently
4930 executing task so a yield is required. */
4931 if( pxHigherPriorityTaskWoken != NULL )
4933 *pxHigherPriorityTaskWoken = pdTRUE;
4936 else if ( pxTCB->xCoreID != xPortGetCoreID() )
4938 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
4942 mtCOVERAGE_TEST_MARKER();
4946 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
4951 #endif /* configUSE_TASK_NOTIFICATIONS */
4952 /*-----------------------------------------------------------*/
4954 #if( configUSE_TASK_NOTIFICATIONS == 1 )
4956 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
4959 eNotifyValue eOriginalNotifyState;
4961 configASSERT( xTaskToNotify );
4964 pxTCB = ( TCB_t * ) xTaskToNotify;
4966 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
4968 eOriginalNotifyState = pxTCB->eNotifyState;
4969 pxTCB->eNotifyState = eNotified;
4971 /* 'Giving' is equivalent to incrementing a count in a counting
4973 ( pxTCB->ulNotifiedValue )++;
4975 /* If the task is in the blocked state specifically to wait for a
4976 notification then unblock it now. */
4977 if( eOriginalNotifyState == eWaitingNotification )
4979 /* The task should not have been on an event list. */
4980 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4982 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
4984 ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
4985 prvAddTaskToReadyList( pxTCB );
4989 /* The delayed and ready lists cannot be accessed, so hold
4990 this task pending until the scheduler is resumed. */
4991 vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
4994 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
4996 /* The notified task has a priority above the currently
4997 executing task so a yield is required. */
4998 if( pxHigherPriorityTaskWoken != NULL )
5000 *pxHigherPriorityTaskWoken = pdTRUE;
5003 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5005 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5009 mtCOVERAGE_TEST_MARKER();
5013 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5016 #endif /* configUSE_TASK_NOTIFICATIONS */
5018 #if ( configENABLE_TASK_SNAPSHOT == 1 )
5019 static void prvTaskGetSnapshot( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, TCB_t *pxTCB )
5021 if (pxTCB == NULL) {
5024 pxTaskSnapshotArray[ *uxTask ].pxTCB = pxTCB;
5025 pxTaskSnapshotArray[ *uxTask ].pxTopOfStack = (StackType_t *)pxTCB->pxTopOfStack;
5026 #if( portSTACK_GROWTH < 0 )
5028 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxEndOfStack;
5032 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxStack;
5038 static void prvTaskGetSnapshotsFromList( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, const UBaseType_t uxArraySize, List_t *pxList )
5040 TCB_t *pxNextTCB, *pxFirstTCB;
5042 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
5044 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
5047 if( *uxTask >= uxArraySize )
5050 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
5051 prvTaskGetSnapshot( pxTaskSnapshotArray, uxTask, pxNextTCB );
5052 } while( pxNextTCB != pxFirstTCB );
5056 mtCOVERAGE_TEST_MARKER();
5060 UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray, const UBaseType_t uxArraySize, UBaseType_t * const pxTcbSz )
5062 UBaseType_t uxTask = 0, i = 0;
5065 *pxTcbSz = sizeof(TCB_t);
5066 /* Fill in an TaskStatus_t structure with information on each
5067 task in the Ready state. */
5068 i = configMAX_PRIORITIES;
5072 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( pxReadyTasksLists[ i ] ) );
5073 } while( i > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
5075 /* Fill in an TaskStatus_t structure with information on each
5076 task in the Blocked state. */
5077 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxDelayedTaskList );
5078 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxOverflowDelayedTaskList );
5079 for (i = 0; i < portNUM_PROCESSORS; i++) {
5080 if( uxTask >= uxArraySize )
5082 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( xPendingReadyList[ i ] ) );
5085 #if( INCLUDE_vTaskDelete == 1 )
5087 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xTasksWaitingTermination );
5091 #if ( INCLUDE_vTaskSuspend == 1 )
5093 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xSuspendedTaskList );
5101 #ifdef FREERTOS_MODULE_TEST
5102 #include "tasks_test_access_functions.h"