2 FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
7 This file is part of the FreeRTOS distribution.
9 FreeRTOS is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License (version 2) as published by the
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
13 ***************************************************************************
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
15 >>! distribute a combined work that includes FreeRTOS without being !<<
16 >>! obliged to provide the source code for proprietary components !<<
17 >>! outside of the FreeRTOS kernel. !<<
18 ***************************************************************************
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
23 link: http://www.freertos.org/a00114.html
25 ***************************************************************************
27 * FreeRTOS provides completely free yet professionally developed, *
28 * robust, strictly quality controlled, supported, and cross *
29 * platform software that is more than just the market leader, it *
30 * is the industry's de facto standard. *
32 * Help yourself get started quickly while simultaneously helping *
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
34 * tutorial book, reference manual, or both: *
35 * http://www.FreeRTOS.org/Documentation *
37 ***************************************************************************
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
40 the FAQ page "My application does not run, what could be wrong?". Have you
41 defined configASSERT()?
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
44 embedded software for free we request you assist our global community by
45 participating in the support forum.
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
48 be as productive as possible as early as possible. Now you can receive
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
50 Ltd, and the world's leading authority on the world's leading RTOS.
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
61 licenses offer ticketed support, indemnification and commercial middleware.
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
64 engineered and independently SIL3 certified version for use in safety and
65 mission critical applications that require provable dependability.
73 ToDo: The multicore implementation of this uses taskENTER_CRITICAL etc to make sure the
74 queue structures aren't accessed by another processor or core. It would be useful to have
75 IRQs be able to schedule stuff while doing task-related stuff, meaning we have to convert
76 the taskENTER_CRITICAL stuff to a lock + a scheduler suspend instead.
82 #include "rom/ets_sys.h"
84 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
85 all the API functions to use the MPU wrappers. That should only be done when
86 task.h is included from an application file. */
87 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
93 #if ( configUSE_CO_ROUTINES == 1 )
97 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
98 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
99 header files above, but not in this file, in order to generate the correct
100 privileged Vs unprivileged linkage and placement. */
101 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
103 /* When the Queue_t structure is used to represent a base queue its pcHead and
104 pcTail members are used as pointers into the queue storage area. When the
105 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
106 not necessary, and the pcHead pointer is set to NULL to indicate that the
107 pcTail pointer actually points to the mutex holder (if any). Map alternative
108 names to the pcHead and pcTail structure members to ensure the readability of
109 the code is maintained despite this dual use of two structure members. An
110 alternative implementation would be to use a union, but use of a union is
111 against the coding standard (although an exception to the standard has been
112 permitted where the dual use also significantly changes the type of the
113 structure member). */
114 #define pxMutexHolder pcTail
115 #define uxQueueType pcHead
116 #define queueQUEUE_IS_MUTEX NULL
118 /* Semaphores do not actually store or copy data, so have an item size of
120 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
121 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
123 #if( configUSE_PREEMPTION == 0 )
124 /* If the cooperative scheduler is being used then a yield should not be
125 performed just because a higher priority task has been woken. */
126 #define queueYIELD_IF_USING_PREEMPTION()
128 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
132 * Definition of the queue used by the scheduler.
133 * Items are queued by copy, not reference. See the following link for the
134 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
136 typedef struct QueueDefinition
138 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
139 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
140 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
142 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
144 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
145 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
148 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
149 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
151 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
152 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
153 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
155 #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
156 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
159 #if ( configUSE_QUEUE_SETS == 1 )
160 struct QueueDefinition *pxQueueSetContainer;
163 #if ( configUSE_TRACE_FACILITY == 1 )
164 UBaseType_t uxQueueNumber;
168 portMUX_TYPE mux; //Mutex required due to SMP
172 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
173 name below to enable the use of older kernel aware debuggers. */
174 typedef xQUEUE Queue_t;
176 #if __GNUC_PREREQ(4, 6)
177 _Static_assert(sizeof(StaticQueue_t) == sizeof(Queue_t), "StaticQueue_t != Queue_t");
181 /*-----------------------------------------------------------*/
184 * The queue registry is just a means for kernel aware debuggers to locate
185 * queue structures. It has no other purpose so is an optional component.
187 #if ( configQUEUE_REGISTRY_SIZE > 0 )
189 /* The type stored within the queue registry array. This allows a name
190 to be assigned to each queue making kernel aware debugging a little
191 more user friendly. */
192 typedef struct QUEUE_REGISTRY_ITEM
194 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
195 QueueHandle_t xHandle;
196 } xQueueRegistryItem;
198 /* The old xQueueRegistryItem name is maintained above then typedefed to the
199 new xQueueRegistryItem name below to enable the use of older kernel aware
201 typedef xQueueRegistryItem QueueRegistryItem_t;
203 /* The queue registry is simply an array of QueueRegistryItem_t structures.
204 The pcQueueName member of a structure being NULL is indicative of the
205 array position being vacant. */
206 QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
208 //Need to add queue registry mutex to protect against simultaneous access
209 static portMUX_TYPE queue_registry_spinlock = portMUX_INITIALIZER_UNLOCKED;
211 #endif /* configQUEUE_REGISTRY_SIZE */
215 * Uses a critical section to determine if there is any data in a queue.
217 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
219 static BaseType_t prvIsQueueEmpty( Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
222 * Uses a critical section to determine if there is any space in a queue.
224 * @return pdTRUE if there is no space, otherwise pdFALSE;
226 static BaseType_t prvIsQueueFull( Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
229 * Copies an item into the queue, either at the front of the queue or the
232 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
235 * Copies an item out of a queue.
237 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
239 #if ( configUSE_QUEUE_SETS == 1 )
241 * Checks to see if a queue is a member of a queue set, and if so, notifies
242 * the queue set that the queue contains data.
244 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
248 * Called after a Queue_t structure has been allocated either statically or
249 * dynamically to fill in the structure's members.
251 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
254 * Mutexes are a special type of queue. When a mutex is created, first the
255 * queue is created, then prvInitialiseMutex() is called to configure the queue
258 #if( configUSE_MUTEXES == 1 )
259 static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
262 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
264 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
266 configASSERT( pxQueue );
268 if ( xNewQueue == pdTRUE )
270 vPortCPUInitializeMutex(&pxQueue->mux);
272 taskENTER_CRITICAL(&pxQueue->mux);
274 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
275 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
276 pxQueue->pcWriteTo = pxQueue->pcHead;
277 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
279 if( xNewQueue == pdFALSE )
281 /* If there are tasks blocked waiting to read from the queue, then
282 the tasks will remain blocked as after this function exits the queue
283 will still be empty. If there are tasks blocked waiting to write to
284 the queue, then one should be unblocked as after this function exits
285 it will be possible to write to it. */
286 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
288 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
290 queueYIELD_IF_USING_PREEMPTION();
294 mtCOVERAGE_TEST_MARKER();
299 mtCOVERAGE_TEST_MARKER();
304 /* Ensure the event queues start in the correct state. */
305 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
306 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
309 taskEXIT_CRITICAL(&pxQueue->mux);
311 /* A value is returned for calling semantic consistency with previous
315 /*-----------------------------------------------------------*/
317 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
319 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
323 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
325 /* The StaticQueue_t structure and the queue storage area must be
327 configASSERT( pxStaticQueue != NULL );
329 /* A queue storage area should be provided if the item size is not 0, and
330 should not be provided if the item size is 0. */
331 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
332 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
334 #if( configASSERT_DEFINED == 1 )
336 /* Sanity check that the size of the structure used to declare a
337 variable of type StaticQueue_t or StaticSemaphore_t equals the size of
338 the real queue and semaphore structures. */
339 volatile size_t xSize = sizeof( StaticQueue_t );
340 configASSERT( xSize == sizeof( Queue_t ) );
342 #endif /* configASSERT_DEFINED */
344 /* The address of a statically allocated queue was passed in, use it.
345 The address of a statically allocated storage area was also passed in
346 but is already set. */
347 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
349 if( pxNewQueue != NULL )
351 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
353 /* Queues can be allocated wither statically or dynamically, so
354 note this queue was allocated statically in case the queue is
356 pxNewQueue->ucStaticallyAllocated = pdTRUE;
358 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
360 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
366 #endif /* configSUPPORT_STATIC_ALLOCATION */
367 /*-----------------------------------------------------------*/
369 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
371 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
374 size_t xQueueSizeInBytes;
375 uint8_t *pucQueueStorage;
377 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
379 if( uxItemSize == ( UBaseType_t ) 0 )
381 /* There is not going to be a queue storage area. */
382 xQueueSizeInBytes = ( size_t ) 0;
386 /* Allocate enough space to hold the maximum number of items that
387 can be in the queue at any time. */
388 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
391 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
393 if( pxNewQueue != NULL )
395 /* Jump past the queue structure to find the location of the queue
397 pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
399 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
401 /* Queues can be created either statically or dynamically, so
402 note this task was created dynamically in case it is later
404 pxNewQueue->ucStaticallyAllocated = pdFALSE;
406 #endif /* configSUPPORT_STATIC_ALLOCATION */
408 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
414 #endif /* configSUPPORT_STATIC_ALLOCATION */
415 /*-----------------------------------------------------------*/
417 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
419 /* Remove compiler warnings about unused parameters should
420 configUSE_TRACE_FACILITY not be set to 1. */
421 ( void ) ucQueueType;
423 if( uxItemSize == ( UBaseType_t ) 0 )
425 /* No RAM was allocated for the queue storage area, but PC head cannot
426 be set to NULL because NULL is used as a key to say the queue is used as
427 a mutex. Therefore just set pcHead to point to the queue as a benign
428 value that is known to be within the memory map. */
429 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
433 /* Set the head to the start of the queue storage area. */
434 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
437 /* Initialise the queue members as described where the queue type is
439 pxNewQueue->uxLength = uxQueueLength;
440 pxNewQueue->uxItemSize = uxItemSize;
441 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
443 #if ( configUSE_TRACE_FACILITY == 1 )
445 pxNewQueue->ucQueueType = ucQueueType;
447 #endif /* configUSE_TRACE_FACILITY */
449 #if( configUSE_QUEUE_SETS == 1 )
451 pxNewQueue->pxQueueSetContainer = NULL;
453 #endif /* configUSE_QUEUE_SETS */
455 traceQUEUE_CREATE( pxNewQueue );
457 /*-----------------------------------------------------------*/
459 #if( configUSE_MUTEXES == 1 )
461 static void prvInitialiseMutex( Queue_t *pxNewQueue )
463 if( pxNewQueue != NULL )
465 /* The queue create function will set all the queue structure members
466 correctly for a generic queue, but this function is creating a
467 mutex. Overwrite those members that need to be set differently -
468 in particular the information required for priority inheritance. */
469 pxNewQueue->pxMutexHolder = NULL;
470 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
472 /* In case this is a recursive mutex. */
473 pxNewQueue->u.uxRecursiveCallCount = 0;
475 vPortCPUInitializeMutex(&pxNewQueue->mux);
477 traceCREATE_MUTEX( pxNewQueue );
479 /* Start with the semaphore in the expected state. */
480 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
484 traceCREATE_MUTEX_FAILED();
488 #endif /* configUSE_MUTEXES */
489 /*-----------------------------------------------------------*/
491 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
493 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
496 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
498 pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
499 prvInitialiseMutex( pxNewQueue );
504 #endif /* configUSE_MUTEXES */
505 /*-----------------------------------------------------------*/
507 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
509 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
512 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
514 /* Prevent compiler warnings about unused parameters if
515 configUSE_TRACE_FACILITY does not equal 1. */
516 ( void ) ucQueueType;
518 pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
519 prvInitialiseMutex( pxNewQueue );
524 #endif /* configUSE_MUTEXES */
525 /*-----------------------------------------------------------*/
527 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
529 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
531 Queue_t * const pxQueue = ( Queue_t * ) xSemaphore;
534 /* This function is called by xSemaphoreGetMutexHolder(), and should not
535 be called directly. Note: This is a good way of determining if the
536 calling task is the mutex holder, but not a good way of determining the
537 identity of the mutex holder, as the holder may change between the
538 following critical section exiting and the function returning. */
539 taskENTER_CRITICAL(&pxQueue->mux);
541 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
543 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
550 taskEXIT_CRITICAL(&pxQueue->mux);
553 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
556 /*-----------------------------------------------------------*/
558 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
560 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
563 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
565 configASSERT( pxMutex );
567 /* If this is the task that holds the mutex then pxMutexHolder will not
568 change outside of this task. If this task does not hold the mutex then
569 pxMutexHolder can never coincidentally equal the tasks handle, and as
570 this is the only condition we are interested in it does not matter if
571 pxMutexHolder is accessed simultaneously by another task. Therefore no
572 mutual exclusion is required to test the pxMutexHolder variable. */
573 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
575 traceGIVE_MUTEX_RECURSIVE( pxMutex );
577 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
578 the task handle, therefore no underflow check is required. Also,
579 uxRecursiveCallCount is only modified by the mutex holder, and as
580 there can only be one, no mutual exclusion is required to modify the
581 uxRecursiveCallCount member. */
582 ( pxMutex->u.uxRecursiveCallCount )--;
584 /* Have we unwound the call count? */
585 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
587 /* Return the mutex. This will automatically unblock any other
588 task that might be waiting to access the mutex. */
589 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
593 mtCOVERAGE_TEST_MARKER();
600 /* The mutex cannot be given because the calling task is not the
604 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
610 #endif /* configUSE_RECURSIVE_MUTEXES */
611 /*-----------------------------------------------------------*/
613 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
615 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
618 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
620 configASSERT( pxMutex );
622 /* Comments regarding mutual exclusion as per those within
623 xQueueGiveMutexRecursive(). */
625 traceTAKE_MUTEX_RECURSIVE( pxMutex );
627 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
629 ( pxMutex->u.uxRecursiveCallCount )++;
634 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
636 /* pdPASS will only be returned if the mutex was successfully
637 obtained. The calling task may have entered the Blocked state
638 before reaching here. */
639 if( xReturn == pdPASS )
641 ( pxMutex->u.uxRecursiveCallCount )++;
645 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
652 #endif /* configUSE_RECURSIVE_MUTEXES */
653 /*-----------------------------------------------------------*/
655 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
657 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
659 QueueHandle_t xHandle;
661 configASSERT( uxMaxCount != 0 );
662 configASSERT( uxInitialCount <= uxMaxCount );
664 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
666 if( xHandle != NULL )
668 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
670 traceCREATE_COUNTING_SEMAPHORE();
674 traceCREATE_COUNTING_SEMAPHORE_FAILED();
680 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
681 /*-----------------------------------------------------------*/
683 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
685 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
687 QueueHandle_t xHandle;
689 configASSERT( uxMaxCount != 0 );
690 configASSERT( uxInitialCount <= uxMaxCount );
692 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
694 if( xHandle != NULL )
696 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
698 traceCREATE_COUNTING_SEMAPHORE();
702 traceCREATE_COUNTING_SEMAPHORE_FAILED();
705 configASSERT( xHandle );
709 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
710 /*-----------------------------------------------------------*/
712 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
714 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
716 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
718 configASSERT( pxQueue );
719 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
720 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
721 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
723 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
728 /* This function relaxes the coding standard somewhat to allow return
729 statements within the function itself. This is done in the interest
730 of execution time efficiency. */
733 taskENTER_CRITICAL(&pxQueue->mux);
735 /* Is there room on the queue now? The running task must be
736 the highest priority task wanting to access the queue. If
737 the head item in the queue is to be overwritten then it does
738 not matter if the queue is full. */
739 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
741 traceQUEUE_SEND( pxQueue );
742 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
744 #if ( configUSE_QUEUE_SETS == 1 )
746 if( pxQueue->pxQueueSetContainer != NULL )
748 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
750 /* The queue is a member of a queue set, and posting
751 to the queue set caused a higher priority task to
752 unblock. A context switch is required. */
753 queueYIELD_IF_USING_PREEMPTION();
757 mtCOVERAGE_TEST_MARKER();
762 /* If there was a task waiting for data to arrive on the
763 queue then unblock it now. */
764 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
766 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
768 /* The unblocked task has a priority higher than
769 our own so yield immediately. Yes it is ok to
770 do this from within the critical section - the
771 kernel takes care of that. */
772 queueYIELD_IF_USING_PREEMPTION();
776 mtCOVERAGE_TEST_MARKER();
779 else if( xYieldRequired != pdFALSE )
781 /* This path is a special case that will only get
782 executed if the task was holding multiple mutexes
783 and the mutexes were given back in an order that is
784 different to that in which they were taken. */
785 queueYIELD_IF_USING_PREEMPTION();
789 mtCOVERAGE_TEST_MARKER();
793 #else /* configUSE_QUEUE_SETS */
795 /* If there was a task waiting for data to arrive on the
796 queue then unblock it now. */
797 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
799 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
801 /* The unblocked task has a priority higher than
802 our own so yield immediately. Yes it is ok to do
803 this from within the critical section - the kernel
804 takes care of that. */
805 queueYIELD_IF_USING_PREEMPTION();
809 mtCOVERAGE_TEST_MARKER();
812 else if( xYieldRequired != pdFALSE )
814 /* This path is a special case that will only get
815 executed if the task was holding multiple mutexes and
816 the mutexes were given back in an order that is
817 different to that in which they were taken. */
818 queueYIELD_IF_USING_PREEMPTION();
822 mtCOVERAGE_TEST_MARKER();
825 #endif /* configUSE_QUEUE_SETS */
827 taskEXIT_CRITICAL(&pxQueue->mux);
832 if( xTicksToWait == ( TickType_t ) 0 )
834 /* The queue was full and no block time is specified (or
835 the block time has expired) so leave now. */
836 taskEXIT_CRITICAL(&pxQueue->mux);
838 /* Return to the original privilege level before exiting
840 traceQUEUE_SEND_FAILED( pxQueue );
841 return errQUEUE_FULL;
843 else if( xEntryTimeSet == pdFALSE )
845 /* The queue was full and a block time was specified so
846 configure the timeout structure. */
847 vTaskSetTimeOutState( &xTimeOut );
848 xEntryTimeSet = pdTRUE;
852 /* Entry time was already set. */
853 mtCOVERAGE_TEST_MARKER();
857 taskEXIT_CRITICAL(&pxQueue->mux);
859 /* Interrupts and other tasks can send to and receive from the queue
860 now the critical section has been exited. */
862 taskENTER_CRITICAL(&pxQueue->mux);
864 /* Update the timeout state to see if it has expired yet. */
865 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
867 if( prvIsQueueFull( pxQueue ) != pdFALSE )
869 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
870 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
873 /* Resuming the scheduler will move tasks from the pending
874 ready list into the ready list - so it is feasible that this
875 task is already in a ready list before it yields - in which
876 case the yield will not cause a context switch unless there
877 is also a higher priority task in the pending ready list. */
878 taskEXIT_CRITICAL(&pxQueue->mux);
879 portYIELD_WITHIN_API();
884 taskEXIT_CRITICAL(&pxQueue->mux);
889 /* The timeout has expired. */
890 taskEXIT_CRITICAL(&pxQueue->mux);
892 /* Return to the original privilege level before exiting the
894 traceQUEUE_SEND_FAILED( pxQueue );
895 return errQUEUE_FULL;
899 /*-----------------------------------------------------------*/
901 #if ( configUSE_ALTERNATIVE_API == 1 )
903 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
905 BaseType_t xEntryTimeSet = pdFALSE;
907 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
909 configASSERT( pxQueue );
910 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
914 taskENTER_CRITICAL(&pxQueue->mux);
916 /* Is there room on the queue now? To be running we must be
917 the highest priority task wanting to access the queue. */
918 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
920 traceQUEUE_SEND( pxQueue );
921 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
923 /* If there was a task waiting for data to arrive on the
924 queue then unblock it now. */
925 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
927 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
929 /* The unblocked task has a priority higher than
930 our own so yield immediately. */
931 taskEXIT_CRITICAL(&pxQueue->mux);
932 portYIELD_WITHIN_API();
933 taskENTER_CRITICAL(&pxQueue->mux);
937 mtCOVERAGE_TEST_MARKER();
942 mtCOVERAGE_TEST_MARKER();
945 taskEXIT_CRITICAL(&pxQueue->mux);
950 if( xTicksToWait == ( TickType_t ) 0 )
952 taskEXIT_CRITICAL(&pxQueue->mux);
953 return errQUEUE_FULL;
955 else if( xEntryTimeSet == pdFALSE )
957 vTaskSetTimeOutState( &xTimeOut );
958 xEntryTimeSet = pdTRUE;
962 taskEXIT_CRITICAL(&pxQueue->mux);
964 taskENTER_CRITICAL(&pxQueue->mux);
966 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
968 if( prvIsQueueFull( pxQueue ) != pdFALSE )
970 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
971 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
972 taskEXIT_CRITICAL(&pxQueue->mux);
973 portYIELD_WITHIN_API();
974 taskENTER_CRITICAL(&pxQueue->mux);
978 mtCOVERAGE_TEST_MARKER();
983 taskEXIT_CRITICAL(&pxQueue->mux);
984 traceQUEUE_SEND_FAILED( pxQueue );
985 return errQUEUE_FULL;
988 taskEXIT_CRITICAL(&pxQueue->mux);
992 #endif /* configUSE_ALTERNATIVE_API */
993 /*-----------------------------------------------------------*/
995 #if ( configUSE_ALTERNATIVE_API == 1 )
997 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
999 BaseType_t xEntryTimeSet = pdFALSE;
1001 int8_t *pcOriginalReadPosition;
1002 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1004 configASSERT( pxQueue );
1005 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1006 UNTESTED_FUNCTION();
1009 taskENTER_CRITICAL();
1011 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1013 /* Remember our read position in case we are just peeking. */
1014 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1016 prvCopyDataFromQueue( pxQueue, pvBuffer );
1018 if( xJustPeeking == pdFALSE )
1020 traceQUEUE_RECEIVE( pxQueue );
1022 /* Data is actually being removed (not just peeked). */
1023 --( pxQueue->uxMessagesWaiting );
1025 #if ( configUSE_MUTEXES == 1 )
1027 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1029 /* Record the information required to implement
1030 priority inheritance should it become necessary. */
1031 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
1035 mtCOVERAGE_TEST_MARKER();
1040 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1042 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
1044 portYIELD_WITHIN_API();
1048 mtCOVERAGE_TEST_MARKER();
1054 traceQUEUE_PEEK( pxQueue );
1056 /* The data is not being removed, so reset our read
1058 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1060 /* The data is being left in the queue, so see if there are
1061 any other tasks waiting for the data. */
1062 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1064 /* Tasks that are removed from the event list will get added to
1065 the pending ready list as the scheduler is still suspended. */
1066 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1068 /* The task waiting has a higher priority than this task. */
1069 portYIELD_WITHIN_API();
1073 mtCOVERAGE_TEST_MARKER();
1078 mtCOVERAGE_TEST_MARKER();
1082 taskEXIT_CRITICAL();
1087 if( xTicksToWait == ( TickType_t ) 0 )
1089 taskEXIT_CRITICAL();
1090 traceQUEUE_RECEIVE_FAILED( pxQueue );
1091 return errQUEUE_EMPTY;
1093 else if( xEntryTimeSet == pdFALSE )
1095 vTaskSetTimeOutState( &xTimeOut );
1096 xEntryTimeSet = pdTRUE;
1100 taskEXIT_CRITICAL();
1102 taskENTER_CRITICAL();
1104 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1106 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1108 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1110 #if ( configUSE_MUTEXES == 1 )
1112 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1114 taskENTER_CRITICAL();
1116 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
1118 taskEXIT_CRITICAL();
1122 mtCOVERAGE_TEST_MARKER();
1127 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1128 portYIELD_WITHIN_API();
1132 mtCOVERAGE_TEST_MARKER();
1137 taskEXIT_CRITICAL();
1138 traceQUEUE_RECEIVE_FAILED( pxQueue );
1139 return errQUEUE_EMPTY;
1142 taskEXIT_CRITICAL();
1147 #endif /* configUSE_ALTERNATIVE_API */
1148 /*-----------------------------------------------------------*/
1150 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
1153 UBaseType_t uxSavedInterruptStatus;
1154 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1156 configASSERT( pxQueue );
1157 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1158 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1160 /* RTOS ports that support interrupt nesting have the concept of a maximum
1161 system call (or maximum API call) interrupt priority. Interrupts that are
1162 above the maximum system call priority are kept permanently enabled, even
1163 when the RTOS kernel is in a critical section, but cannot make any calls to
1164 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1165 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1166 failure if a FreeRTOS API function is called from an interrupt that has been
1167 assigned a priority above the configured maximum system call priority.
1168 Only FreeRTOS functions that end in FromISR can be called from interrupts
1169 that have been assigned a priority at or (logically) below the maximum
1170 system call interrupt priority. FreeRTOS maintains a separate interrupt
1171 safe API to ensure interrupt entry is as fast and as simple as possible.
1172 More information (albeit Cortex-M specific) is provided on the following
1173 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1174 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1176 /* Similar to xQueueGenericSend, except without blocking if there is no room
1177 in the queue. Also don't directly wake a task that was blocked on a queue
1178 read, instead return a flag to say whether a context switch is required or
1179 not (i.e. has a task with a higher priority than us been woken by this
1181 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1183 taskENTER_CRITICAL_ISR(&pxQueue->mux);
1184 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1186 traceQUEUE_SEND_FROM_ISR( pxQueue );
1188 /* A task can only have an inherited priority if it is a mutex
1189 holder - and if there is a mutex holder then the mutex cannot be
1190 given from an ISR. Therefore, unlike the xQueueGenericGive()
1191 function, there is no need to determine the need for priority
1192 disinheritance here or to clear the mutex holder TCB member. */
1193 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1195 #if ( configUSE_QUEUE_SETS == 1 )
1197 if( pxQueue->pxQueueSetContainer != NULL )
1199 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
1201 /* The queue is a member of a queue set, and posting
1202 to the queue set caused a higher priority task to
1203 unblock. A context switch is required. */
1204 if( pxHigherPriorityTaskWoken != NULL )
1206 *pxHigherPriorityTaskWoken = pdTRUE;
1210 mtCOVERAGE_TEST_MARKER();
1215 mtCOVERAGE_TEST_MARKER();
1220 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1222 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1224 /* The task waiting has a higher priority so
1225 record that a context switch is required. */
1226 if( pxHigherPriorityTaskWoken != NULL )
1228 *pxHigherPriorityTaskWoken = pdTRUE;
1232 mtCOVERAGE_TEST_MARKER();
1237 mtCOVERAGE_TEST_MARKER();
1242 mtCOVERAGE_TEST_MARKER();
1246 #else /* configUSE_QUEUE_SETS */
1248 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1250 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1252 /* The task waiting has a higher priority so record that a
1253 context switch is required. */
1254 if( pxHigherPriorityTaskWoken != NULL )
1256 *pxHigherPriorityTaskWoken = pdTRUE;
1260 mtCOVERAGE_TEST_MARKER();
1265 mtCOVERAGE_TEST_MARKER();
1270 mtCOVERAGE_TEST_MARKER();
1273 #endif /* configUSE_QUEUE_SETS */
1278 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1279 xReturn = errQUEUE_FULL;
1281 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1283 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1287 /*-----------------------------------------------------------*/
1289 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
1292 UBaseType_t uxSavedInterruptStatus;
1293 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1295 configASSERT( pxQueue );
1297 /* xQueueGenericSendFromISR() should be used in the item size is not 0. */
1298 configASSERT( pxQueue->uxItemSize == 0 );
1300 /* RTOS ports that support interrupt nesting have the concept of a maximum
1301 system call (or maximum API call) interrupt priority. Interrupts that are
1302 above the maximum system call priority are kept permanently enabled, even
1303 when the RTOS kernel is in a critical section, but cannot make any calls to
1304 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1305 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1306 failure if a FreeRTOS API function is called from an interrupt that has been
1307 assigned a priority above the configured maximum system call priority.
1308 Only FreeRTOS functions that end in FromISR can be called from interrupts
1309 that have been assigned a priority at or (logically) below the maximum
1310 system call interrupt priority. FreeRTOS maintains a separate interrupt
1311 safe API to ensure interrupt entry is as fast and as simple as possible.
1312 More information (albeit Cortex-M specific) is provided on the following
1313 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1314 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1316 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1317 item size is 0. Don't directly wake a task that was blocked on a queue
1318 read, instead return a flag to say whether a context switch is required or
1319 not (i.e. has a task with a higher priority than us been woken by this
1321 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1323 taskENTER_CRITICAL_ISR(&pxQueue->mux);
1324 /* When the queue is used to implement a semaphore no data is ever
1325 moved through the queue but it is still valid to see if the queue 'has
1327 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
1329 traceQUEUE_SEND_FROM_ISR( pxQueue );
1331 /* A task can only have an inherited priority if it is a mutex
1332 holder - and if there is a mutex holder then the mutex cannot be
1333 given from an ISR. Therefore, unlike the xQueueGenericGive()
1334 function, there is no need to determine the need for priority
1335 disinheritance here or to clear the mutex holder TCB member. */
1337 ++( pxQueue->uxMessagesWaiting );
1339 #if ( configUSE_QUEUE_SETS == 1 )
1341 if( pxQueue->pxQueueSetContainer != NULL )
1343 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
1345 /* The semaphore is a member of a queue set, and
1346 posting to the queue set caused a higher priority
1347 task to unblock. A context switch is required. */
1348 if( pxHigherPriorityTaskWoken != NULL )
1350 *pxHigherPriorityTaskWoken = pdTRUE;
1354 mtCOVERAGE_TEST_MARKER();
1359 mtCOVERAGE_TEST_MARKER();
1364 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1366 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1368 /* The task waiting has a higher priority so
1369 record that a context switch is required. */
1370 if( pxHigherPriorityTaskWoken != NULL )
1372 *pxHigherPriorityTaskWoken = pdTRUE;
1376 mtCOVERAGE_TEST_MARKER();
1381 mtCOVERAGE_TEST_MARKER();
1386 mtCOVERAGE_TEST_MARKER();
1390 #else /* configUSE_QUEUE_SETS */
1392 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1394 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1396 /* The task waiting has a higher priority so record that a
1397 context switch is required. */
1398 if( pxHigherPriorityTaskWoken != NULL )
1400 *pxHigherPriorityTaskWoken = pdTRUE;
1404 mtCOVERAGE_TEST_MARKER();
1409 mtCOVERAGE_TEST_MARKER();
1414 mtCOVERAGE_TEST_MARKER();
1417 #endif /* configUSE_QUEUE_SETS */
1423 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1424 xReturn = errQUEUE_FULL;
1426 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1428 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1432 /*-----------------------------------------------------------*/
1434 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
1436 BaseType_t xEntryTimeSet = pdFALSE;
1438 int8_t *pcOriginalReadPosition;
1439 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1441 configASSERT( pxQueue );
1442 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1443 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1445 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1449 /* This function relaxes the coding standard somewhat to allow return
1450 statements within the function itself. This is done in the interest
1451 of execution time efficiency. */
1455 taskENTER_CRITICAL(&pxQueue->mux);
1457 /* Is there data in the queue now? To be running the calling task
1458 must be the highest priority task wanting to access the queue. */
1459 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1461 /* Remember the read position in case the queue is only being
1463 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1465 prvCopyDataFromQueue( pxQueue, pvBuffer );
1467 if( xJustPeeking == pdFALSE )
1469 traceQUEUE_RECEIVE( pxQueue );
1471 /* Actually removing data, not just peeking. */
1472 --( pxQueue->uxMessagesWaiting );
1474 #if ( configUSE_MUTEXES == 1 )
1476 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1478 /* Record the information required to implement
1479 priority inheritance should it become necessary. */
1480 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
1484 mtCOVERAGE_TEST_MARKER();
1487 #endif /* configUSE_MUTEXES */
1489 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1491 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
1493 queueYIELD_IF_USING_PREEMPTION();
1497 mtCOVERAGE_TEST_MARKER();
1502 mtCOVERAGE_TEST_MARKER();
1507 traceQUEUE_PEEK( pxQueue );
1509 /* The data is not being removed, so reset the read
1511 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1513 /* The data is being left in the queue, so see if there are
1514 any other tasks waiting for the data. */
1515 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1517 /* Tasks that are removed from the event list will get added to
1518 the pending ready list as the scheduler is still suspended. */
1519 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1521 /* The task waiting has a higher priority than this task. */
1522 queueYIELD_IF_USING_PREEMPTION();
1526 mtCOVERAGE_TEST_MARKER();
1531 mtCOVERAGE_TEST_MARKER();
1535 taskEXIT_CRITICAL(&pxQueue->mux);
1540 if( xTicksToWait == ( TickType_t ) 0 )
1542 /* The queue was empty and no block time is specified (or
1543 the block time has expired) so leave now. */
1544 traceQUEUE_RECEIVE_FAILED( pxQueue );
1545 taskEXIT_CRITICAL(&pxQueue->mux);
1546 return errQUEUE_EMPTY;
1548 else if( xEntryTimeSet == pdFALSE )
1550 /* The queue was empty and a block time was specified so
1551 configure the timeout structure. */
1552 vTaskSetTimeOutState( &xTimeOut );
1553 xEntryTimeSet = pdTRUE;
1557 /* Entry time was already set. */
1558 mtCOVERAGE_TEST_MARKER();
1562 taskEXIT_CRITICAL(&pxQueue->mux);
1564 /* Interrupts and other tasks can send to and receive from the queue
1565 now the critical section has been exited. */
1567 taskENTER_CRITICAL(&pxQueue->mux);
1569 /* Update the timeout state to see if it has expired yet. */
1570 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1572 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1574 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1576 #if ( configUSE_MUTEXES == 1 )
1578 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1580 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
1584 mtCOVERAGE_TEST_MARKER();
1589 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1590 taskEXIT_CRITICAL(&pxQueue->mux);
1591 portYIELD_WITHIN_API();
1596 taskEXIT_CRITICAL(&pxQueue->mux);
1601 taskEXIT_CRITICAL(&pxQueue->mux);
1602 traceQUEUE_RECEIVE_FAILED( pxQueue );
1603 return errQUEUE_EMPTY;
1607 /*-----------------------------------------------------------*/
1609 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
1612 UBaseType_t uxSavedInterruptStatus;
1613 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1615 configASSERT( pxQueue );
1616 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1618 /* RTOS ports that support interrupt nesting have the concept of a maximum
1619 system call (or maximum API call) interrupt priority. Interrupts that are
1620 above the maximum system call priority are kept permanently enabled, even
1621 when the RTOS kernel is in a critical section, but cannot make any calls to
1622 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1623 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1624 failure if a FreeRTOS API function is called from an interrupt that has been
1625 assigned a priority above the configured maximum system call priority.
1626 Only FreeRTOS functions that end in FromISR can be called from interrupts
1627 that have been assigned a priority at or (logically) below the maximum
1628 system call interrupt priority. FreeRTOS maintains a separate interrupt
1629 safe API to ensure interrupt entry is as fast and as simple as possible.
1630 More information (albeit Cortex-M specific) is provided on the following
1631 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1632 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1634 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1636 taskENTER_CRITICAL_ISR(&pxQueue->mux);
1637 /* Cannot block in an ISR, so check there is data available. */
1638 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1640 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
1642 prvCopyDataFromQueue( pxQueue, pvBuffer );
1643 --( pxQueue->uxMessagesWaiting );
1645 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1647 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1649 /* The task waiting has a higher priority than us so
1650 force a context switch. */
1651 if( pxHigherPriorityTaskWoken != NULL )
1653 *pxHigherPriorityTaskWoken = pdTRUE;
1657 mtCOVERAGE_TEST_MARKER();
1662 mtCOVERAGE_TEST_MARKER();
1667 mtCOVERAGE_TEST_MARKER();
1675 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
1677 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1679 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1683 /*-----------------------------------------------------------*/
1685 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
1688 UBaseType_t uxSavedInterruptStatus;
1689 int8_t *pcOriginalReadPosition;
1690 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1692 configASSERT( pxQueue );
1693 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1694 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
1696 /* RTOS ports that support interrupt nesting have the concept of a maximum
1697 system call (or maximum API call) interrupt priority. Interrupts that are
1698 above the maximum system call priority are kept permanently enabled, even
1699 when the RTOS kernel is in a critical section, but cannot make any calls to
1700 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1701 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1702 failure if a FreeRTOS API function is called from an interrupt that has been
1703 assigned a priority above the configured maximum system call priority.
1704 Only FreeRTOS functions that end in FromISR can be called from interrupts
1705 that have been assigned a priority at or (logically) below the maximum
1706 system call interrupt priority. FreeRTOS maintains a separate interrupt
1707 safe API to ensure interrupt entry is as fast and as simple as possible.
1708 More information (albeit Cortex-M specific) is provided on the following
1709 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1710 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1712 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1713 taskENTER_CRITICAL_ISR(&pxQueue->mux);
1715 /* Cannot block in an ISR, so check there is data available. */
1716 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1718 traceQUEUE_PEEK_FROM_ISR( pxQueue );
1720 /* Remember the read position so it can be reset as nothing is
1721 actually being removed from the queue. */
1722 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1723 prvCopyDataFromQueue( pxQueue, pvBuffer );
1724 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1731 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
1734 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1735 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1739 /*-----------------------------------------------------------*/
1741 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
1743 UBaseType_t uxReturn;
1744 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1746 configASSERT( xQueue );
1748 taskENTER_CRITICAL(&pxQueue->mux);
1750 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1752 taskEXIT_CRITICAL(&pxQueue->mux);
1755 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1756 /*-----------------------------------------------------------*/
1758 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
1760 UBaseType_t uxReturn;
1763 pxQueue = ( Queue_t * ) xQueue;
1764 configASSERT( pxQueue );
1766 taskENTER_CRITICAL(&pxQueue->mux);
1768 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
1770 taskEXIT_CRITICAL(&pxQueue->mux);
1773 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1774 /*-----------------------------------------------------------*/
1776 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
1778 UBaseType_t uxReturn;
1779 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1781 configASSERT( xQueue );
1783 taskENTER_CRITICAL_ISR(&pxQueue->mux);
1784 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1785 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1788 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1789 /*-----------------------------------------------------------*/
1791 void vQueueDelete( QueueHandle_t xQueue )
1793 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1795 configASSERT( pxQueue );
1797 traceQUEUE_DELETE( pxQueue );
1798 #if ( configQUEUE_REGISTRY_SIZE > 0 )
1800 vQueueUnregisterQueue( pxQueue );
1804 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
1806 /* The queue can only have been allocated dynamically - free it
1808 vPortFree( pxQueue );
1810 #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
1812 /* The queue could have been allocated statically or dynamically, so
1813 check before attempting to free the memory. */
1814 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
1816 vPortFree( pxQueue );
1820 mtCOVERAGE_TEST_MARKER();
1825 /* The queue must have been statically allocated, so is not going to be
1826 deleted. Avoid compiler warnings about the unused parameter. */
1829 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1831 /*-----------------------------------------------------------*/
1833 #if ( configUSE_TRACE_FACILITY == 1 )
1835 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
1837 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
1840 #endif /* configUSE_TRACE_FACILITY */
1841 /*-----------------------------------------------------------*/
1843 #if ( configUSE_TRACE_FACILITY == 1 )
1845 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
1847 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
1850 #endif /* configUSE_TRACE_FACILITY */
1851 /*-----------------------------------------------------------*/
1853 #if ( configUSE_TRACE_FACILITY == 1 )
1855 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
1857 return ( ( Queue_t * ) xQueue )->ucQueueType;
1860 #endif /* configUSE_TRACE_FACILITY */
1861 /*-----------------------------------------------------------*/
1863 //This routine assumes the queue has already been locked.
1864 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
1866 BaseType_t xReturn = pdFALSE;
1868 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
1870 #if ( configUSE_MUTEXES == 1 )
1872 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1874 /* The mutex is no longer being held. */
1875 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
1876 pxQueue->pxMutexHolder = NULL;
1880 mtCOVERAGE_TEST_MARKER();
1883 #endif /* configUSE_MUTEXES */
1885 else if( xPosition == queueSEND_TO_BACK )
1887 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
1888 pxQueue->pcWriteTo += pxQueue->uxItemSize;
1889 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1891 pxQueue->pcWriteTo = pxQueue->pcHead;
1895 mtCOVERAGE_TEST_MARKER();
1900 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1901 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
1902 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1904 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
1908 mtCOVERAGE_TEST_MARKER();
1911 if( xPosition == queueOVERWRITE )
1913 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1915 /* An item is not being added but overwritten, so subtract
1916 one from the recorded number of items in the queue so when
1917 one is added again below the number of recorded items remains
1919 --( pxQueue->uxMessagesWaiting );
1923 mtCOVERAGE_TEST_MARKER();
1928 mtCOVERAGE_TEST_MARKER();
1932 ++( pxQueue->uxMessagesWaiting );
1936 /*-----------------------------------------------------------*/
1938 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
1940 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
1942 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
1943 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
1945 pxQueue->u.pcReadFrom = pxQueue->pcHead;
1949 mtCOVERAGE_TEST_MARKER();
1951 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
1955 /*-----------------------------------------------------------*/
1957 static BaseType_t prvIsQueueEmpty( Queue_t *pxQueue )
1961 //No lock needed: we read a base type.
1963 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
1975 /*-----------------------------------------------------------*/
1977 BaseType_t xQueueIsQueueEmptyFromISR( QueueHandle_t xQueue )
1980 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1982 configASSERT( xQueue );
1983 taskENTER_CRITICAL_ISR(&pxQueue->mux);
1984 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
1992 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1995 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
1996 /*-----------------------------------------------------------*/
1998 static BaseType_t prvIsQueueFull( Queue_t *pxQueue )
2002 taskENTER_CRITICAL_ISR(&pxQueue->mux);
2004 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2013 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
2017 /*-----------------------------------------------------------*/
2019 BaseType_t xQueueIsQueueFullFromISR( QueueHandle_t xQueue )
2022 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2024 configASSERT( xQueue );
2025 taskENTER_CRITICAL_ISR(&pxQueue->mux);
2026 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
2034 taskEXIT_CRITICAL_ISR(&pxQueue->mux);
2037 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2038 /*-----------------------------------------------------------*/
2040 #if ( configUSE_CO_ROUTINES == 1 )
2042 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
2045 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2047 UNTESTED_FUNCTION();
2048 /* If the queue is already full we may have to block. A critical section
2049 is required to prevent an interrupt removing something from the queue
2050 between the check to see if the queue is full and blocking on the queue. */
2051 portDISABLE_INTERRUPTS();
2053 if( prvIsQueueFull( pxQueue ) != pdFALSE )
2055 /* The queue is full - do we want to block or just leave without
2057 if( xTicksToWait > ( TickType_t ) 0 )
2059 /* As this is called from a coroutine we cannot block directly, but
2060 return indicating that we need to block. */
2061 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2062 portENABLE_INTERRUPTS();
2063 return errQUEUE_BLOCKED;
2067 portENABLE_INTERRUPTS();
2068 return errQUEUE_FULL;
2072 portENABLE_INTERRUPTS();
2074 portDISABLE_INTERRUPTS();
2076 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2078 /* There is room in the queue, copy the data into the queue. */
2079 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2082 /* Were any co-routines waiting for data to become available? */
2083 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2085 /* In this instance the co-routine could be placed directly
2086 into the ready list as we are within a critical section.
2087 Instead the same pending ready list mechanism is used as if
2088 the event were caused from within an interrupt. */
2089 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2091 /* The co-routine waiting has a higher priority so record
2092 that a yield might be appropriate. */
2093 xReturn = errQUEUE_YIELD;
2097 mtCOVERAGE_TEST_MARKER();
2102 mtCOVERAGE_TEST_MARKER();
2107 xReturn = errQUEUE_FULL;
2110 portENABLE_INTERRUPTS();
2115 #endif /* configUSE_CO_ROUTINES */
2116 /*-----------------------------------------------------------*/
2118 #if ( configUSE_CO_ROUTINES == 1 )
2120 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
2123 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2125 /* If the queue is already empty we may have to block. A critical section
2126 is required to prevent an interrupt adding something to the queue
2127 between the check to see if the queue is empty and blocking on the queue. */
2128 portDISABLE_INTERRUPTS();
2130 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2132 /* There are no messages in the queue, do we want to block or just
2133 leave with nothing? */
2134 if( xTicksToWait > ( TickType_t ) 0 )
2136 /* As this is a co-routine we cannot block directly, but return
2137 indicating that we need to block. */
2138 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2139 portENABLE_INTERRUPTS();
2140 return errQUEUE_BLOCKED;
2144 portENABLE_INTERRUPTS();
2145 return errQUEUE_FULL;
2150 mtCOVERAGE_TEST_MARKER();
2153 portENABLE_INTERRUPTS();
2155 portDISABLE_INTERRUPTS();
2157 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2159 /* Data is available from the queue. */
2160 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2161 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2163 pxQueue->u.pcReadFrom = pxQueue->pcHead;
2167 mtCOVERAGE_TEST_MARKER();
2169 --( pxQueue->uxMessagesWaiting );
2170 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2174 /* Were any co-routines waiting for space to become available? */
2175 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2177 /* In this instance the co-routine could be placed directly
2178 into the ready list as we are within a critical section.
2179 Instead the same pending ready list mechanism is used as if
2180 the event were caused from within an interrupt. */
2181 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2183 xReturn = errQUEUE_YIELD;
2187 mtCOVERAGE_TEST_MARKER();
2192 mtCOVERAGE_TEST_MARKER();
2200 portENABLE_INTERRUPTS();
2205 #endif /* configUSE_CO_ROUTINES */
2206 /*-----------------------------------------------------------*/
2208 #if ( configUSE_CO_ROUTINES == 1 )
2210 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
2212 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2214 /* Cannot block within an ISR so if there is no space on the queue then
2215 exit without doing anything. */
2216 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2218 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2220 /* We only want to wake one co-routine per ISR, so check that a
2221 co-routine has not already been woken. */
2222 if( xCoRoutinePreviouslyWoken == pdFALSE )
2224 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2226 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2232 mtCOVERAGE_TEST_MARKER();
2237 mtCOVERAGE_TEST_MARKER();
2242 mtCOVERAGE_TEST_MARKER();
2247 mtCOVERAGE_TEST_MARKER();
2250 return xCoRoutinePreviouslyWoken;
2253 #endif /* configUSE_CO_ROUTINES */
2254 /*-----------------------------------------------------------*/
2256 #if ( configUSE_CO_ROUTINES == 1 )
2258 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
2261 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2263 /* We cannot block from an ISR, so check there is data available. If
2264 not then just leave without doing anything. */
2265 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2267 /* Copy the data from the queue. */
2268 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2269 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2271 pxQueue->u.pcReadFrom = pxQueue->pcHead;
2275 mtCOVERAGE_TEST_MARKER();
2277 --( pxQueue->uxMessagesWaiting );
2278 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2280 if( ( *pxCoRoutineWoken ) == pdFALSE )
2282 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2284 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2286 *pxCoRoutineWoken = pdTRUE;
2290 mtCOVERAGE_TEST_MARKER();
2295 mtCOVERAGE_TEST_MARKER();
2300 mtCOVERAGE_TEST_MARKER();
2313 #endif /* configUSE_CO_ROUTINES */
2314 /*-----------------------------------------------------------*/
2316 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2318 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2322 portENTER_CRITICAL(&queue_registry_spinlock);
2323 /* See if there is an empty space in the registry. A NULL name denotes
2325 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2327 if( xQueueRegistry[ ux ].pcQueueName == NULL )
2329 /* Store the information on this queue. */
2330 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
2331 xQueueRegistry[ ux ].xHandle = xQueue;
2333 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
2338 mtCOVERAGE_TEST_MARKER();
2341 portEXIT_CRITICAL(&queue_registry_spinlock);
2344 #endif /* configQUEUE_REGISTRY_SIZE */
2345 /*-----------------------------------------------------------*/
2347 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2349 //This function is backported from FreeRTOS v9.0.0
2350 const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2353 const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2355 portENTER_CRITICAL(&queue_registry_spinlock);
2356 /* Note there is nothing here to protect against another task adding or
2357 removing entries from the registry while it is being searched. */
2358 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2360 if( xQueueRegistry[ ux ].xHandle == xQueue )
2362 pcReturn = xQueueRegistry[ ux ].pcQueueName;
2367 mtCOVERAGE_TEST_MARKER();
2370 portEXIT_CRITICAL(&queue_registry_spinlock);
2375 #endif /* configQUEUE_REGISTRY_SIZE */
2376 /*-----------------------------------------------------------*/
2378 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2380 void vQueueUnregisterQueue( QueueHandle_t xQueue )
2384 portENTER_CRITICAL(&queue_registry_spinlock);
2385 /* See if the handle of the queue being unregistered in actually in the
2387 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2389 if( xQueueRegistry[ ux ].xHandle == xQueue )
2391 /* Set the name to NULL to show that this slot if free again. */
2392 xQueueRegistry[ ux ].pcQueueName = NULL;
2397 mtCOVERAGE_TEST_MARKER();
2400 portEXIT_CRITICAL(&queue_registry_spinlock);
2402 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2404 #endif /* configQUEUE_REGISTRY_SIZE */
2405 /*-----------------------------------------------------------*/
2407 #if ( configUSE_TIMERS == 1 )
2409 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
2411 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2413 /* This function should not be called by application code hence the
2414 'Restricted' in its name. It is not part of the public API. It is
2415 designed for use by kernel code, and has special calling requirements.
2416 It can result in vListInsert() being called on a list that can only
2417 possibly ever have one item in it, so the list will be fast, but even
2418 so it should be called with the scheduler locked and not from a critical
2421 /* Only do anything if there are no messages in the queue. This function
2422 will not actually cause the task to block, just place it on a blocked
2423 list. It will not block until the scheduler is unlocked - at which
2424 time a yield will be performed. */
2425 taskENTER_CRITICAL(&pxQueue->mux);
2426 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
2428 /* There is nothing in the queue, block for the specified period. */
2429 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
2433 mtCOVERAGE_TEST_MARKER();
2435 taskEXIT_CRITICAL(&pxQueue->mux);
2438 #endif /* configUSE_TIMERS */
2439 /*-----------------------------------------------------------*/
2441 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
2443 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
2445 QueueSetHandle_t pxQueue;
2447 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
2452 #endif /* configUSE_QUEUE_SETS */
2453 /*-----------------------------------------------------------*/
2455 #if ( configUSE_QUEUE_SETS == 1 )
2457 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
2461 taskENTER_CRITICAL(&(((Queue_t * )xQueueOrSemaphore)->mux));
2463 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
2465 /* Cannot add a queue/semaphore to more than one queue set. */
2468 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
2470 /* Cannot add a queue/semaphore to a queue set if there are already
2471 items in the queue/semaphore. */
2476 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
2480 taskEXIT_CRITICAL(&(((Queue_t * )xQueueOrSemaphore)->mux));
2485 #endif /* configUSE_QUEUE_SETS */
2486 /*-----------------------------------------------------------*/
2488 #if ( configUSE_QUEUE_SETS == 1 )
2490 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
2493 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
2495 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
2497 /* The queue was not a member of the set. */
2500 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
2502 /* It is dangerous to remove a queue from a set when the queue is
2503 not empty because the queue set will still hold pending events for
2509 taskENTER_CRITICAL(&(pxQueueOrSemaphore->mux));
2511 /* The queue is no longer contained in the set. */
2512 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
2514 taskEXIT_CRITICAL(&(pxQueueOrSemaphore->mux));
2519 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
2521 #endif /* configUSE_QUEUE_SETS */
2522 /*-----------------------------------------------------------*/
2524 #if ( configUSE_QUEUE_SETS == 1 )
2526 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
2528 QueueSetMemberHandle_t xReturn = NULL;
2530 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
2534 #endif /* configUSE_QUEUE_SETS */
2535 /*-----------------------------------------------------------*/
2537 #if ( configUSE_QUEUE_SETS == 1 )
2539 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
2541 QueueSetMemberHandle_t xReturn = NULL;
2543 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
2547 #endif /* configUSE_QUEUE_SETS */
2548 /*-----------------------------------------------------------*/
2550 #if ( configUSE_QUEUE_SETS == 1 )
2552 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
2554 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
2555 BaseType_t xReturn = pdFALSE;
2558 * This function is called with a Queue's / Semaphore's spinlock already
2559 * acquired. Acquiring the Queue set's spinlock is still necessary.
2562 configASSERT( pxQueueSetContainer );
2564 //Acquire the Queue set's spinlock
2565 portENTER_CRITICAL(&(pxQueueSetContainer->mux));
2566 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
2568 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
2570 traceQUEUE_SEND( pxQueueSetContainer );
2571 /* The data copied is the handle of the queue that contains data. */
2572 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
2574 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
2576 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
2578 /* The task waiting has a higher priority */
2583 mtCOVERAGE_TEST_MARKER();
2588 mtCOVERAGE_TEST_MARKER();
2593 mtCOVERAGE_TEST_MARKER();
2596 //Release the Queue set's spinlock
2597 portEXIT_CRITICAL(&(pxQueueSetContainer->mux));
2602 #endif /* configUSE_QUEUE_SETS */