Update revision to use TI's mqtt and Freertos.

Dependencies:   mbed client server

Fork of cc3100_Test_mqtt_CM3 by David Fletcher

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers queue.c Source File

queue.c

00001 /*
00002     FreeRTOS V8.2.1 - Copyright (C) 2015 Real Time Engineers Ltd.
00003     All rights reserved
00004 
00005     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
00006 
00007     This file is part of the FreeRTOS distribution.
00008 
00009     FreeRTOS is free software; you can redistribute it and/or modify it under
00010     the terms of the GNU General Public License (version 2) as published by the
00011     Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
00012 
00013     ***************************************************************************
00014     >>!   NOTE: The modification to the GPL is included to allow you to     !<<
00015     >>!   distribute a combined work that includes FreeRTOS without being   !<<
00016     >>!   obliged to provide the source code for proprietary components     !<<
00017     >>!   outside of the FreeRTOS kernel.                                   !<<
00018     ***************************************************************************
00019 
00020     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
00021     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00022     FOR A PARTICULAR PURPOSE.  Full license text is available on the following
00023     link: http://www.freertos.org/a00114.html
00024 
00025     ***************************************************************************
00026      *                                                                       *
00027      *    FreeRTOS provides completely free yet professionally developed,    *
00028      *    robust, strictly quality controlled, supported, and cross          *
00029      *    platform software that is more than just the market leader, it     *
00030      *    is the industry's de facto standard.                               *
00031      *                                                                       *
00032      *    Help yourself get started quickly while simultaneously helping     *
00033      *    to support the FreeRTOS project by purchasing a FreeRTOS           *
00034      *    tutorial book, reference manual, or both:                          *
00035      *    http://www.FreeRTOS.org/Documentation                              *
00036      *                                                                       *
00037     ***************************************************************************
00038 
00039     http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading
00040     the FAQ page "My application does not run, what could be wrong?".  Have you
00041     defined configASSERT()?
00042 
00043     http://www.FreeRTOS.org/support - In return for receiving this top quality
00044     embedded software for free we request you assist our global community by
00045     participating in the support forum.
00046 
00047     http://www.FreeRTOS.org/training - Investing in training allows your team to
00048     be as productive as possible as early as possible.  Now you can receive
00049     FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
00050     Ltd, and the world's leading authority on the world's leading RTOS.
00051 
00052     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
00053     including FreeRTOS+Trace - an indispensable productivity tool, a DOS
00054     compatible FAT file system, and our tiny thread aware UDP/IP stack.
00055 
00056     http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
00057     Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
00058 
00059     http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
00060     Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS
00061     licenses offer ticketed support, indemnification and commercial middleware.
00062 
00063     http://www.SafeRTOS.com - High Integrity Systems also provide a safety
00064     engineered and independently SIL3 certified version for use in safety and
00065     mission critical applications that require provable dependability.
00066 
00067     1 tab == 4 spaces!
00068 */
00069 
00070 #include <stdlib.h>
00071 #include <string.h>
00072 
00073 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
00074 all the API functions to use the MPU wrappers.  That should only be done when
00075 task.h is included from an application file. */
00076 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
00077 
00078 #include "FreeRTOS.h"
00079 #include "task.h"
00080 #include "queue.h"
00081 
00082 #if ( configUSE_CO_ROUTINES == 1 )
00083     #include "croutine.h"
00084 #endif
00085 
00086 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
00087 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
00088 header files above, but not in this file, in order to generate the correct
00089 privileged Vs unprivileged linkage and placement. */
00090 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
00091 
00092 
00093 /* Constants used with the xRxLock and xTxLock structure members. */
00094 #define queueUNLOCKED                   ( ( BaseType_t ) -1 )
00095 #define queueLOCKED_UNMODIFIED          ( ( BaseType_t ) 0 )
00096 
00097 /* When the Queue_t structure is used to represent a base queue its pcHead and
00098 pcTail members are used as pointers into the queue storage area.  When the
00099 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
00100 not necessary, and the pcHead pointer is set to NULL to indicate that the
00101 pcTail pointer actually points to the mutex holder (if any).  Map alternative
00102 names to the pcHead and pcTail structure members to ensure the readability of
00103 the code is maintained despite this dual use of two structure members.  An
00104 alternative implementation would be to use a union, but use of a union is
00105 against the coding standard (although an exception to the standard has been
00106 permitted where the dual use also significantly changes the type of the
00107 structure member). */
00108 #define pxMutexHolder                   pcTail
00109 #define uxQueueType                     pcHead
00110 #define queueQUEUE_IS_MUTEX             NULL
00111 
00112 /* Semaphores do not actually store or copy data, so have an item size of
00113 zero. */
00114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
00115 #define queueMUTEX_GIVE_BLOCK_TIME       ( ( TickType_t ) 0U )
00116 
00117 #if( configUSE_PREEMPTION == 0 )
00118     /* If the cooperative scheduler is being used then a yield should not be
00119     performed just because a higher priority task has been woken. */
00120     #define queueYIELD_IF_USING_PREEMPTION()
00121 #else
00122     #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
00123 #endif
00124 
00125 /*
00126  * Definition of the queue used by the scheduler.
00127  * Items are queued by copy, not reference.  See the following link for the
00128  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
00129  */
00130 typedef struct QueueDefinition
00131 {
00132     int8_t *pcHead;                 /*< Points to the beginning of the queue storage area. */
00133     int8_t *pcTail;                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
00134     int8_t *pcWriteTo;              /*< Points to the free next place in the storage area. */
00135 
00136     union                           /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
00137     {
00138         int8_t *pcReadFrom;         /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
00139         UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
00140     } u;
00141 
00142     List_t xTasksWaitingToSend;     /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */
00143     List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */
00144 
00145     volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
00146     UBaseType_t uxLength;           /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
00147     UBaseType_t uxItemSize;         /*< The size of each items that the queue will hold. */
00148 
00149     volatile BaseType_t xRxLock;    /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
00150     volatile BaseType_t xTxLock;    /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
00151 
00152     #if ( configUSE_TRACE_FACILITY == 1 )
00153         UBaseType_t uxQueueNumber;
00154         uint8_t ucQueueType;
00155     #endif
00156 
00157     #if ( configUSE_QUEUE_SETS == 1 )
00158         struct QueueDefinition *pxQueueSetContainer;
00159     #endif
00160 
00161 } xQUEUE;
00162 
00163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
00164 name below to enable the use of older kernel aware debuggers. */
00165 typedef xQUEUE Queue_t;
00166 
00167 /*-----------------------------------------------------------*/
00168 
00169 /*
00170  * The queue registry is just a means for kernel aware debuggers to locate
00171  * queue structures.  It has no other purpose so is an optional component.
00172  */
00173 #if ( configQUEUE_REGISTRY_SIZE > 0 )
00174 
00175     /* The type stored within the queue registry array.  This allows a name
00176     to be assigned to each queue making kernel aware debugging a little
00177     more user friendly. */
00178     typedef struct QUEUE_REGISTRY_ITEM
00179     {
00180         const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
00181         QueueHandle_t xHandle;
00182     } xQueueRegistryItem;
00183 
00184     /* The old xQueueRegistryItem name is maintained above then typedefed to the
00185     new xQueueRegistryItem name below to enable the use of older kernel aware
00186     debuggers. */
00187     typedef xQueueRegistryItem QueueRegistryItem_t;
00188 
00189     /* The queue registry is simply an array of QueueRegistryItem_t structures.
00190     The pcQueueName member of a structure being NULL is indicative of the
00191     array position being vacant. */
00192     QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
00193 
00194 #endif /* configQUEUE_REGISTRY_SIZE */
00195 
00196 /*
00197  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
00198  * prevent an ISR from adding or removing items to the queue, but does prevent
00199  * an ISR from removing tasks from the queue event lists.  If an ISR finds a
00200  * queue is locked it will instead increment the appropriate queue lock count
00201  * to indicate that a task may require unblocking.  When the queue in unlocked
00202  * these lock counts are inspected, and the appropriate action taken.
00203  */
00204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
00205 
00206 /*
00207  * Uses a critical section to determine if there is any data in a queue.
00208  *
00209  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
00210  */
00211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
00212 
00213 /*
00214  * Uses a critical section to determine if there is any space in a queue.
00215  *
00216  * @return pdTRUE if there is no space, otherwise pdFALSE;
00217  */
00218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
00219 
00220 /*
00221  * Copies an item into the queue, either at the front of the queue or the
00222  * back of the queue.
00223  */
00224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
00225 
00226 /*
00227  * Copies an item out of a queue.
00228  */
00229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
00230 
00231 #if ( configUSE_QUEUE_SETS == 1 )
00232     /*
00233      * Checks to see if a queue is a member of a queue set, and if so, notifies
00234      * the queue set that the queue contains data.
00235      */
00236     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
00237 #endif
00238 
00239 /*-----------------------------------------------------------*/
00240 
00241 /*
00242  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
00243  * accessing the queue event lists.
00244  */
00245 #define prvLockQueue( pxQueue )                             \
00246     taskENTER_CRITICAL();                                   \
00247     {                                                       \
00248         if( ( pxQueue )->xRxLock == queueUNLOCKED )         \
00249         {                                                   \
00250             ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED;  \
00251         }                                                   \
00252         if( ( pxQueue )->xTxLock == queueUNLOCKED )         \
00253         {                                                   \
00254             ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED;  \
00255         }                                                   \
00256     }                                                       \
00257     taskEXIT_CRITICAL()
00258 /*-----------------------------------------------------------*/
00259 
00260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
00261 {
00262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
00263 
00264     configASSERT( pxQueue );
00265 
00266     taskENTER_CRITICAL();
00267     {
00268         pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
00269         pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
00270         pxQueue->pcWriteTo = pxQueue->pcHead;
00271         pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
00272         pxQueue->xRxLock = queueUNLOCKED;
00273         pxQueue->xTxLock = queueUNLOCKED;
00274 
00275         if( xNewQueue == pdFALSE )
00276         {
00277             /* If there are tasks blocked waiting to read from the queue, then
00278             the tasks will remain blocked as after this function exits the queue
00279             will still be empty.  If there are tasks blocked waiting to write to
00280             the queue, then one should be unblocked as after this function exits
00281             it will be possible to write to it. */
00282             if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
00283             {
00284                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
00285                 {
00286                     queueYIELD_IF_USING_PREEMPTION();
00287                 }
00288                 else
00289                 {
00290                     mtCOVERAGE_TEST_MARKER();
00291                 }
00292             }
00293             else
00294             {
00295                 mtCOVERAGE_TEST_MARKER();
00296             }
00297         }
00298         else
00299         {
00300             /* Ensure the event queues start in the correct state. */
00301             vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
00302             vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
00303         }
00304     }
00305     taskEXIT_CRITICAL();
00306 
00307     /* A value is returned for calling semantic consistency with previous
00308     versions. */
00309     return pdPASS;
00310 }
00311 /*-----------------------------------------------------------*/
00312 
00313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
00314 {
00315 Queue_t *pxNewQueue;
00316 size_t xQueueSizeInBytes;
00317 QueueHandle_t xReturn = NULL;
00318 int8_t *pcAllocatedBuffer;
00319 
00320     /* Remove compiler warnings about unused parameters should
00321     configUSE_TRACE_FACILITY not be set to 1. */
00322     ( void ) ucQueueType;
00323 
00324     configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
00325 
00326     if( uxItemSize == ( UBaseType_t ) 0 )
00327     {
00328         /* There is not going to be a queue storage area. */
00329         xQueueSizeInBytes = ( size_t ) 0;
00330     }
00331     else
00332     {
00333         /* The queue is one byte longer than asked for to make wrap checking
00334         easier/faster. */
00335         xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
00336     }
00337 
00338     /* Allocate the new queue structure and storage area. */
00339     pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
00340 
00341     if( pcAllocatedBuffer != NULL )
00342     {
00343         pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be too small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */
00344 
00345         if( uxItemSize == ( UBaseType_t ) 0 )
00346         {
00347             /* No RAM was allocated for the queue storage area, but PC head
00348             cannot be set to NULL because NULL is used as a key to say the queue
00349             is used as a mutex.  Therefore just set pcHead to point to the queue
00350             as a benign value that is known to be within the memory map. */
00351             pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
00352         }
00353         else
00354         {
00355             /* Jump past the queue structure to find the location of the queue
00356             storage area - adding the padding bytes to get a better alignment. */
00357             pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t );
00358         }
00359 
00360         /* Initialise the queue members as described above where the queue type
00361         is defined. */
00362         pxNewQueue->uxLength = uxQueueLength;
00363         pxNewQueue->uxItemSize = uxItemSize;
00364         ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
00365 
00366         #if ( configUSE_TRACE_FACILITY == 1 )
00367         {
00368             pxNewQueue->ucQueueType = ucQueueType;
00369         }
00370         #endif /* configUSE_TRACE_FACILITY */
00371 
00372         #if( configUSE_QUEUE_SETS == 1 )
00373         {
00374             pxNewQueue->pxQueueSetContainer = NULL;
00375         }
00376         #endif /* configUSE_QUEUE_SETS */
00377 
00378         traceQUEUE_CREATE( pxNewQueue );
00379         xReturn = pxNewQueue;
00380     }
00381     else
00382     {
00383         mtCOVERAGE_TEST_MARKER();
00384     }
00385 
00386     configASSERT( xReturn );
00387 
00388     return xReturn;
00389 }
00390 /*-----------------------------------------------------------*/
00391 
00392 #if ( configUSE_MUTEXES == 1 )
00393 
00394     QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
00395     {
00396     Queue_t *pxNewQueue;
00397 
00398         /* Prevent compiler warnings about unused parameters if
00399         configUSE_TRACE_FACILITY does not equal 1. */
00400         ( void ) ucQueueType;
00401 
00402         /* Allocate the new queue structure. */
00403         pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
00404         if( pxNewQueue != NULL )
00405         {
00406             /* Information required for priority inheritance. */
00407             pxNewQueue->pxMutexHolder = NULL;
00408             pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
00409 
00410             /* Queues used as a mutex no data is actually copied into or out
00411             of the queue. */
00412             pxNewQueue->pcWriteTo = NULL;
00413             pxNewQueue->u.pcReadFrom = NULL;
00414 
00415             /* Each mutex has a length of 1 (like a binary semaphore) and
00416             an item size of 0 as nothing is actually copied into or out
00417             of the mutex. */
00418             pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
00419             pxNewQueue->uxLength = ( UBaseType_t ) 1U;
00420             pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
00421             pxNewQueue->xRxLock = queueUNLOCKED;
00422             pxNewQueue->xTxLock = queueUNLOCKED;
00423 
00424             #if ( configUSE_TRACE_FACILITY == 1 )
00425             {
00426                 pxNewQueue->ucQueueType = ucQueueType;
00427             }
00428             #endif
00429 
00430             #if ( configUSE_QUEUE_SETS == 1 )
00431             {
00432                 pxNewQueue->pxQueueSetContainer = NULL;
00433             }
00434             #endif
00435 
00436             /* Ensure the event queues start with the correct state. */
00437             vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
00438             vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
00439 
00440             traceCREATE_MUTEX( pxNewQueue );
00441 
00442             /* Start with the semaphore in the expected state. */
00443             ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
00444         }
00445         else
00446         {
00447             traceCREATE_MUTEX_FAILED();
00448         }
00449 
00450         configASSERT( pxNewQueue );
00451         return pxNewQueue;
00452     }
00453 
00454 #endif /* configUSE_MUTEXES */
00455 /*-----------------------------------------------------------*/
00456 
00457 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
00458 
00459     void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
00460     {
00461     void *pxReturn;
00462 
00463         /* This function is called by xSemaphoreGetMutexHolder(), and should not
00464         be called directly.  Note:  This is a good way of determining if the
00465         calling task is the mutex holder, but not a good way of determining the
00466         identity of the mutex holder, as the holder may change between the
00467         following critical section exiting and the function returning. */
00468         taskENTER_CRITICAL();
00469         {
00470             if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
00471             {
00472                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
00473             }
00474             else
00475             {
00476                 pxReturn = NULL;
00477             }
00478         }
00479         taskEXIT_CRITICAL();
00480 
00481         return pxReturn;
00482     } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
00483 
00484 #endif
00485 /*-----------------------------------------------------------*/
00486 
00487 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
00488 
00489     BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
00490     {
00491     BaseType_t xReturn;
00492     Queue_t * const pxMutex = ( Queue_t * ) xMutex;
00493 
00494         configASSERT( pxMutex );
00495 
00496         /* If this is the task that holds the mutex then pxMutexHolder will not
00497         change outside of this task.  If this task does not hold the mutex then
00498         pxMutexHolder can never coincidentally equal the tasks handle, and as
00499         this is the only condition we are interested in it does not matter if
00500         pxMutexHolder is accessed simultaneously by another task.  Therefore no
00501         mutual exclusion is required to test the pxMutexHolder variable. */
00502         if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
00503         {
00504             traceGIVE_MUTEX_RECURSIVE( pxMutex );
00505 
00506             /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
00507             the task handle, therefore no underflow check is required.  Also,
00508             uxRecursiveCallCount is only modified by the mutex holder, and as
00509             there can only be one, no mutual exclusion is required to modify the
00510             uxRecursiveCallCount member. */
00511             ( pxMutex->u.uxRecursiveCallCount )--;
00512 
00513             /* Have we unwound the call count? */
00514             if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
00515             {
00516                 /* Return the mutex.  This will automatically unblock any other
00517                 task that might be waiting to access the mutex. */
00518                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
00519             }
00520             else
00521             {
00522                 mtCOVERAGE_TEST_MARKER();
00523             }
00524 
00525             xReturn = pdPASS;
00526         }
00527         else
00528         {
00529             /* The mutex cannot be given because the calling task is not the
00530             holder. */
00531             xReturn = pdFAIL;
00532 
00533             traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
00534         }
00535 
00536         return xReturn;
00537     }
00538 
00539 #endif /* configUSE_RECURSIVE_MUTEXES */
00540 /*-----------------------------------------------------------*/
00541 
00542 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
00543 
00544     BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
00545     {
00546     BaseType_t xReturn;
00547     Queue_t * const pxMutex = ( Queue_t * ) xMutex;
00548 
00549         configASSERT( pxMutex );
00550 
00551         /* Comments regarding mutual exclusion as per those within
00552         xQueueGiveMutexRecursive(). */
00553 
00554         traceTAKE_MUTEX_RECURSIVE( pxMutex );
00555 
00556         if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
00557         {
00558             ( pxMutex->u.uxRecursiveCallCount )++;
00559             xReturn = pdPASS;
00560         }
00561         else
00562         {
00563             xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
00564 
00565             /* pdPASS will only be returned if the mutex was successfully
00566             obtained.  The calling task may have entered the Blocked state
00567             before reaching here. */
00568             if( xReturn == pdPASS )
00569             {
00570                 ( pxMutex->u.uxRecursiveCallCount )++;
00571             }
00572             else
00573             {
00574                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
00575             }
00576         }
00577 
00578         return xReturn;
00579     }
00580 
00581 #endif /* configUSE_RECURSIVE_MUTEXES */
00582 /*-----------------------------------------------------------*/
00583 
00584 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
00585 
00586     QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
00587     {
00588     QueueHandle_t xHandle;
00589 
00590         configASSERT( uxMaxCount != 0 );
00591         configASSERT( uxInitialCount <= uxMaxCount );
00592 
00593         xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
00594 
00595         if( xHandle != NULL )
00596         {
00597             ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
00598 
00599             traceCREATE_COUNTING_SEMAPHORE();
00600         }
00601         else
00602         {
00603             traceCREATE_COUNTING_SEMAPHORE_FAILED();
00604         }
00605 
00606         configASSERT( xHandle );
00607         return xHandle;
00608     }
00609 
00610 #endif /* configUSE_COUNTING_SEMAPHORES */
00611 /*-----------------------------------------------------------*/
00612 
00613 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
00614 {
00615 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
00616 TimeOut_t xTimeOut;
00617 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
00618 
00619     configASSERT( pxQueue );
00620     configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
00621     configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
00622     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
00623     {
00624         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
00625     }
00626     #endif
00627 
00628 
00629     /* This function relaxes the coding standard somewhat to allow return
00630     statements within the function itself.  This is done in the interest
00631     of execution time efficiency. */
00632     for( ;; )
00633     {
00634         taskENTER_CRITICAL();
00635         {
00636             /* Is there room on the queue now?  The running task must be the
00637             highest priority task wanting to access the queue.  If the head item
00638             in the queue is to be overwritten then it does not matter if the
00639             queue is full. */
00640             if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
00641             {
00642                 traceQUEUE_SEND( pxQueue );
00643                 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
00644 
00645                 #if ( configUSE_QUEUE_SETS == 1 )
00646                 {
00647                     if( pxQueue->pxQueueSetContainer != NULL )
00648                     {
00649                         if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
00650                         {
00651                             /* The queue is a member of a queue set, and posting
00652                             to the queue set caused a higher priority task to
00653                             unblock. A context switch is required. */
00654                             queueYIELD_IF_USING_PREEMPTION();
00655                         }
00656                         else
00657                         {
00658                             mtCOVERAGE_TEST_MARKER();
00659                         }
00660                     }
00661                     else
00662                     {
00663                         /* If there was a task waiting for data to arrive on the
00664                         queue then unblock it now. */
00665                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
00666                         {
00667                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
00668                             {
00669                                 /* The unblocked task has a priority higher than
00670                                 our own so yield immediately.  Yes it is ok to
00671                                 do this from within the critical section - the
00672                                 kernel takes care of that. */
00673                                 queueYIELD_IF_USING_PREEMPTION();
00674                             }
00675                             else
00676                             {
00677                                 mtCOVERAGE_TEST_MARKER();
00678                             }
00679                         }
00680                         else if( xYieldRequired != pdFALSE )
00681                         {
00682                             /* This path is a special case that will only get
00683                             executed if the task was holding multiple mutexes
00684                             and the mutexes were given back in an order that is
00685                             different to that in which they were taken. */
00686                             queueYIELD_IF_USING_PREEMPTION();
00687                         }
00688                         else
00689                         {
00690                             mtCOVERAGE_TEST_MARKER();
00691                         }
00692                     }
00693                 }
00694                 #else /* configUSE_QUEUE_SETS */
00695                 {
00696                     /* If there was a task waiting for data to arrive on the
00697                     queue then unblock it now. */
00698                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
00699                     {
00700                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
00701                         {
00702                             /* The unblocked task has a priority higher than
00703                             our own so yield immediately.  Yes it is ok to do
00704                             this from within the critical section - the kernel
00705                             takes care of that. */
00706                             queueYIELD_IF_USING_PREEMPTION();
00707                         }
00708                         else
00709                         {
00710                             mtCOVERAGE_TEST_MARKER();
00711                         }
00712                     }
00713                     else if( xYieldRequired != pdFALSE )
00714                     {
00715                         /* This path is a special case that will only get
00716                         executed if the task was holding multiple mutexes and
00717                         the mutexes were given back in an order that is
00718                         different to that in which they were taken. */
00719                         queueYIELD_IF_USING_PREEMPTION();
00720                     }
00721                     else
00722                     {
00723                         mtCOVERAGE_TEST_MARKER();
00724                     }
00725                 }
00726                 #endif /* configUSE_QUEUE_SETS */
00727 
00728                 taskEXIT_CRITICAL();
00729                 return pdPASS;
00730             }
00731             else
00732             {
00733                 if( xTicksToWait == ( TickType_t ) 0 )
00734                 {
00735                     /* The queue was full and no block time is specified (or
00736                     the block time has expired) so leave now. */
00737                     taskEXIT_CRITICAL();
00738 
00739                     /* Return to the original privilege level before exiting
00740                     the function. */
00741                     traceQUEUE_SEND_FAILED( pxQueue );
00742                     return errQUEUE_FULL;
00743                 }
00744                 else if( xEntryTimeSet == pdFALSE )
00745                 {
00746                     /* The queue was full and a block time was specified so
00747                     configure the timeout structure. */
00748                     vTaskSetTimeOutState( &xTimeOut );
00749                     xEntryTimeSet = pdTRUE;
00750                 }
00751                 else
00752                 {
00753                     /* Entry time was already set. */
00754                     mtCOVERAGE_TEST_MARKER();
00755                 }
00756             }
00757         }
00758         taskEXIT_CRITICAL();
00759 
00760         /* Interrupts and other tasks can send to and receive from the queue
00761         now the critical section has been exited. */
00762 
00763         vTaskSuspendAll();
00764         prvLockQueue( pxQueue );
00765 
00766         /* Update the timeout state to see if it has expired yet. */
00767         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
00768         {
00769             if( prvIsQueueFull( pxQueue ) != pdFALSE )
00770             {
00771                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
00772                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
00773 
00774                 /* Unlocking the queue means queue events can effect the
00775                 event list.  It is possible that interrupts occurring now
00776                 remove this task from the event list again - but as the
00777                 scheduler is suspended the task will go onto the pending
00778                 ready last instead of the actual ready list. */
00779                 prvUnlockQueue( pxQueue );
00780 
00781                 /* Resuming the scheduler will move tasks from the pending
00782                 ready list into the ready list - so it is feasible that this
00783                 task is already in a ready list before it yields - in which
00784                 case the yield will not cause a context switch unless there
00785                 is also a higher priority task in the pending ready list. */
00786                 if( xTaskResumeAll() == pdFALSE )
00787                 {
00788                     portYIELD_WITHIN_API();
00789                 }
00790             }
00791             else
00792             {
00793                 /* Try again. */
00794                 prvUnlockQueue( pxQueue );
00795                 ( void ) xTaskResumeAll();
00796             }
00797         }
00798         else
00799         {
00800             /* The timeout has expired. */
00801             prvUnlockQueue( pxQueue );
00802             ( void ) xTaskResumeAll();
00803 
00804             /* Return to the original privilege level before exiting the
00805             function. */
00806             traceQUEUE_SEND_FAILED( pxQueue );
00807             return errQUEUE_FULL;
00808         }
00809     }
00810 }
00811 /*-----------------------------------------------------------*/
00812 
00813 #if ( configUSE_ALTERNATIVE_API == 1 )
00814 
00815     BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
00816     {
00817     BaseType_t xEntryTimeSet = pdFALSE;
00818     TimeOut_t xTimeOut;
00819     Queue_t * const pxQueue = ( Queue_t * ) xQueue;
00820 
00821         configASSERT( pxQueue );
00822         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
00823 
00824         for( ;; )
00825         {
00826             taskENTER_CRITICAL();
00827             {
00828                 /* Is there room on the queue now?  To be running we must be
00829                 the highest priority task wanting to access the queue. */
00830                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
00831                 {
00832                     traceQUEUE_SEND( pxQueue );
00833                     prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
00834 
00835                     /* If there was a task waiting for data to arrive on the
00836                     queue then unblock it now. */
00837                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
00838                     {
00839                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
00840                         {
00841                             /* The unblocked task has a priority higher than
00842                             our own so yield immediately. */
00843                             portYIELD_WITHIN_API();
00844                         }
00845                         else
00846                         {
00847                             mtCOVERAGE_TEST_MARKER();
00848                         }
00849                     }
00850                     else
00851                     {
00852                         mtCOVERAGE_TEST_MARKER();
00853                     }
00854 
00855                     taskEXIT_CRITICAL();
00856                     return pdPASS;
00857                 }
00858                 else
00859                 {
00860                     if( xTicksToWait == ( TickType_t ) 0 )
00861                     {
00862                         taskEXIT_CRITICAL();
00863                         return errQUEUE_FULL;
00864                     }
00865                     else if( xEntryTimeSet == pdFALSE )
00866                     {
00867                         vTaskSetTimeOutState( &xTimeOut );
00868                         xEntryTimeSet = pdTRUE;
00869                     }
00870                 }
00871             }
00872             taskEXIT_CRITICAL();
00873 
00874             taskENTER_CRITICAL();
00875             {
00876                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
00877                 {
00878                     if( prvIsQueueFull( pxQueue ) != pdFALSE )
00879                     {
00880                         traceBLOCKING_ON_QUEUE_SEND( pxQueue );
00881                         vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
00882                         portYIELD_WITHIN_API();
00883                     }
00884                     else
00885                     {
00886                         mtCOVERAGE_TEST_MARKER();
00887                     }
00888                 }
00889                 else
00890                 {
00891                     taskEXIT_CRITICAL();
00892                     traceQUEUE_SEND_FAILED( pxQueue );
00893                     return errQUEUE_FULL;
00894                 }
00895             }
00896             taskEXIT_CRITICAL();
00897         }
00898     }
00899 
00900 #endif /* configUSE_ALTERNATIVE_API */
00901 /*-----------------------------------------------------------*/
00902 
00903 #if ( configUSE_ALTERNATIVE_API == 1 )
00904 
00905     BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
00906     {
00907     BaseType_t xEntryTimeSet = pdFALSE;
00908     TimeOut_t xTimeOut;
00909     int8_t *pcOriginalReadPosition;
00910     Queue_t * const pxQueue = ( Queue_t * ) xQueue;
00911 
00912         configASSERT( pxQueue );
00913         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
00914 
00915         for( ;; )
00916         {
00917             taskENTER_CRITICAL();
00918             {
00919                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
00920                 {
00921                     /* Remember our read position in case we are just peeking. */
00922                     pcOriginalReadPosition = pxQueue->u.pcReadFrom;
00923 
00924                     prvCopyDataFromQueue( pxQueue, pvBuffer );
00925 
00926                     if( xJustPeeking == pdFALSE )
00927                     {
00928                         traceQUEUE_RECEIVE( pxQueue );
00929 
00930                         /* Data is actually being removed (not just peeked). */
00931                         --( pxQueue->uxMessagesWaiting );
00932 
00933                         #if ( configUSE_MUTEXES == 1 )
00934                         {
00935                             if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
00936                             {
00937                                 /* Record the information required to implement
00938                                 priority inheritance should it become necessary. */
00939                                 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
00940                             }
00941                             else
00942                             {
00943                                 mtCOVERAGE_TEST_MARKER();
00944                             }
00945                         }
00946                         #endif
00947 
00948                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
00949                         {
00950                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
00951                             {
00952                                 portYIELD_WITHIN_API();
00953                             }
00954                             else
00955                             {
00956                                 mtCOVERAGE_TEST_MARKER();
00957                             }
00958                         }
00959                     }
00960                     else
00961                     {
00962                         traceQUEUE_PEEK( pxQueue );
00963 
00964                         /* The data is not being removed, so reset our read
00965                         pointer. */
00966                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;
00967 
00968                         /* The data is being left in the queue, so see if there are
00969                         any other tasks waiting for the data. */
00970                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
00971                         {
00972                             /* Tasks that are removed from the event list will get added to
00973                             the pending ready list as the scheduler is still suspended. */
00974                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
00975                             {
00976                                 /* The task waiting has a higher priority than this task. */
00977                                 portYIELD_WITHIN_API();
00978                             }
00979                             else
00980                             {
00981                                 mtCOVERAGE_TEST_MARKER();
00982                             }
00983                         }
00984                         else
00985                         {
00986                             mtCOVERAGE_TEST_MARKER();
00987                         }
00988                     }
00989 
00990                     taskEXIT_CRITICAL();
00991                     return pdPASS;
00992                 }
00993                 else
00994                 {
00995                     if( xTicksToWait == ( TickType_t ) 0 )
00996                     {
00997                         taskEXIT_CRITICAL();
00998                         traceQUEUE_RECEIVE_FAILED( pxQueue );
00999                         return errQUEUE_EMPTY;
01000                     }
01001                     else if( xEntryTimeSet == pdFALSE )
01002                     {
01003                         vTaskSetTimeOutState( &xTimeOut );
01004                         xEntryTimeSet = pdTRUE;
01005                     }
01006                 }
01007             }
01008             taskEXIT_CRITICAL();
01009 
01010             taskENTER_CRITICAL();
01011             {
01012                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
01013                 {
01014                     if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
01015                     {
01016                         traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
01017 
01018                         #if ( configUSE_MUTEXES == 1 )
01019                         {
01020                             if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
01021                             {
01022                                 taskENTER_CRITICAL();
01023                                 {
01024                                     vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
01025                                 }
01026                                 taskEXIT_CRITICAL();
01027                             }
01028                             else
01029                             {
01030                                 mtCOVERAGE_TEST_MARKER();
01031                             }
01032                         }
01033                         #endif
01034 
01035                         vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
01036                         portYIELD_WITHIN_API();
01037                     }
01038                     else
01039                     {
01040                         mtCOVERAGE_TEST_MARKER();
01041                     }
01042                 }
01043                 else
01044                 {
01045                     taskEXIT_CRITICAL();
01046                     traceQUEUE_RECEIVE_FAILED( pxQueue );
01047                     return errQUEUE_EMPTY;
01048                 }
01049             }
01050             taskEXIT_CRITICAL();
01051         }
01052     }
01053 
01054 
01055 #endif /* configUSE_ALTERNATIVE_API */
01056 /*-----------------------------------------------------------*/
01057 
01058 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
01059 {
01060 BaseType_t xReturn;
01061 UBaseType_t uxSavedInterruptStatus;
01062 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
01063 
01064     configASSERT( pxQueue );
01065     configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
01066     configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
01067 
01068     /* RTOS ports that support interrupt nesting have the concept of a maximum
01069     system call (or maximum API call) interrupt priority.  Interrupts that are
01070     above the maximum system call priority are kept permanently enabled, even
01071     when the RTOS kernel is in a critical section, but cannot make any calls to
01072     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
01073     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
01074     failure if a FreeRTOS API function is called from an interrupt that has been
01075     assigned a priority above the configured maximum system call priority.
01076     Only FreeRTOS functions that end in FromISR can be called from interrupts
01077     that have been assigned a priority at or (logically) below the maximum
01078     system call interrupt priority.  FreeRTOS maintains a separate interrupt
01079     safe API to ensure interrupt entry is as fast and as simple as possible.
01080     More information (albeit Cortex-M specific) is provided on the following
01081     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
01082     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
01083 
01084     /* Similar to xQueueGenericSend, except without blocking if there is no room
01085     in the queue.  Also don't directly wake a task that was blocked on a queue
01086     read, instead return a flag to say whether a context switch is required or
01087     not (i.e. has a task with a higher priority than us been woken by this
01088     post). */
01089     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
01090     {
01091         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
01092         {
01093             traceQUEUE_SEND_FROM_ISR( pxQueue );
01094 
01095             /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
01096             semaphore or mutex.  That means prvCopyDataToQueue() cannot result
01097             in a task disinheriting a priority and prvCopyDataToQueue() can be
01098             called here even though the disinherit function does not check if
01099             the scheduler is suspended before accessing the ready lists. */
01100             ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
01101 
01102             /* The event list is not altered if the queue is locked.  This will
01103             be done when the queue is unlocked later. */
01104             if( pxQueue->xTxLock == queueUNLOCKED )
01105             {
01106                 #if ( configUSE_QUEUE_SETS == 1 )
01107                 {
01108                     if( pxQueue->pxQueueSetContainer != NULL )
01109                     {
01110                         if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
01111                         {
01112                             /* The queue is a member of a queue set, and posting
01113                             to the queue set caused a higher priority task to
01114                             unblock.  A context switch is required. */
01115                             if( pxHigherPriorityTaskWoken != NULL )
01116                             {
01117                                 *pxHigherPriorityTaskWoken = pdTRUE;
01118                             }
01119                             else
01120                             {
01121                                 mtCOVERAGE_TEST_MARKER();
01122                             }
01123                         }
01124                         else
01125                         {
01126                             mtCOVERAGE_TEST_MARKER();
01127                         }
01128                     }
01129                     else
01130                     {
01131                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
01132                         {
01133                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
01134                             {
01135                                 /* The task waiting has a higher priority so
01136                                 record that a context switch is required. */
01137                                 if( pxHigherPriorityTaskWoken != NULL )
01138                                 {
01139                                     *pxHigherPriorityTaskWoken = pdTRUE;
01140                                 }
01141                                 else
01142                                 {
01143                                     mtCOVERAGE_TEST_MARKER();
01144                                 }
01145                             }
01146                             else
01147                             {
01148                                 mtCOVERAGE_TEST_MARKER();
01149                             }
01150                         }
01151                         else
01152                         {
01153                             mtCOVERAGE_TEST_MARKER();
01154                         }
01155                     }
01156                 }
01157                 #else /* configUSE_QUEUE_SETS */
01158                 {
01159                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
01160                     {
01161                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
01162                         {
01163                             /* The task waiting has a higher priority so record that a
01164                             context switch is required. */
01165                             if( pxHigherPriorityTaskWoken != NULL )
01166                             {
01167                                 *pxHigherPriorityTaskWoken = pdTRUE;
01168                             }
01169                             else
01170                             {
01171                                 mtCOVERAGE_TEST_MARKER();
01172                             }
01173                         }
01174                         else
01175                         {
01176                             mtCOVERAGE_TEST_MARKER();
01177                         }
01178                     }
01179                     else
01180                     {
01181                         mtCOVERAGE_TEST_MARKER();
01182                     }
01183                 }
01184                 #endif /* configUSE_QUEUE_SETS */
01185             }
01186             else
01187             {
01188                 /* Increment the lock count so the task that unlocks the queue
01189                 knows that data was posted while it was locked. */
01190                 ++( pxQueue->xTxLock );
01191             }
01192 
01193             xReturn = pdPASS;
01194         }
01195         else
01196         {
01197             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
01198             xReturn = errQUEUE_FULL;
01199         }
01200     }
01201     portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
01202 
01203     return xReturn;
01204 }
01205 /*-----------------------------------------------------------*/
01206 
01207 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
01208 {
01209 BaseType_t xReturn;
01210 UBaseType_t uxSavedInterruptStatus;
01211 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
01212 
01213     /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
01214     item size is 0.  Don't directly wake a task that was blocked on a queue
01215     read, instead return a flag to say whether a context switch is required or
01216     not (i.e. has a task with a higher priority than us been woken by this
01217     post). */
01218 
01219     configASSERT( pxQueue );
01220 
01221     /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
01222     if the item size is not 0. */
01223     configASSERT( pxQueue->uxItemSize == 0 );
01224 
01225     /* Normally a mutex would not be given from an interrupt, and doing so is
01226     definitely wrong if there is a mutex holder as priority inheritance makes no
01227     sense for an interrupts, only tasks. */
01228     configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
01229 
01230     /* RTOS ports that support interrupt nesting have the concept of a maximum
01231     system call (or maximum API call) interrupt priority.  Interrupts that are
01232     above the maximum system call priority are kept permanently enabled, even
01233     when the RTOS kernel is in a critical section, but cannot make any calls to
01234     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
01235     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
01236     failure if a FreeRTOS API function is called from an interrupt that has been
01237     assigned a priority above the configured maximum system call priority.
01238     Only FreeRTOS functions that end in FromISR can be called from interrupts
01239     that have been assigned a priority at or (logically) below the maximum
01240     system call interrupt priority.  FreeRTOS maintains a separate interrupt
01241     safe API to ensure interrupt entry is as fast and as simple as possible.
01242     More information (albeit Cortex-M specific) is provided on the following
01243     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
01244     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
01245 
01246     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
01247     {
01248         /* When the queue is used to implement a semaphore no data is ever
01249         moved through the queue but it is still valid to see if the queue 'has
01250         space'. */
01251         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
01252         {
01253             traceQUEUE_SEND_FROM_ISR( pxQueue );
01254 
01255             /* A task can only have an inherited priority if it is a mutex
01256             holder - and if there is a mutex holder then the mutex cannot be
01257             given from an ISR.  As this is the ISR version of the function it
01258             can be assumed there is no mutex holder and no need to determine if
01259             priority disinheritance is needed.  Simply increase the count of
01260             messages (semaphores) available. */
01261             ++( pxQueue->uxMessagesWaiting );
01262 
01263             /* The event list is not altered if the queue is locked.  This will
01264             be done when the queue is unlocked later. */
01265             if( pxQueue->xTxLock == queueUNLOCKED )
01266             {
01267                 #if ( configUSE_QUEUE_SETS == 1 )
01268                 {
01269                     if( pxQueue->pxQueueSetContainer != NULL )
01270                     {
01271                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
01272                         {
01273                             /* The semaphore is a member of a queue set, and
01274                             posting to the queue set caused a higher priority
01275                             task to unblock.  A context switch is required. */
01276                             if( pxHigherPriorityTaskWoken != NULL )
01277                             {
01278                                 *pxHigherPriorityTaskWoken = pdTRUE;
01279                             }
01280                             else
01281                             {
01282                                 mtCOVERAGE_TEST_MARKER();
01283                             }
01284                         }
01285                         else
01286                         {
01287                             mtCOVERAGE_TEST_MARKER();
01288                         }
01289                     }
01290                     else
01291                     {
01292                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
01293                         {
01294                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
01295                             {
01296                                 /* The task waiting has a higher priority so
01297                                 record that a context switch is required. */
01298                                 if( pxHigherPriorityTaskWoken != NULL )
01299                                 {
01300                                     *pxHigherPriorityTaskWoken = pdTRUE;
01301                                 }
01302                                 else
01303                                 {
01304                                     mtCOVERAGE_TEST_MARKER();
01305                                 }
01306                             }
01307                             else
01308                             {
01309                                 mtCOVERAGE_TEST_MARKER();
01310                             }
01311                         }
01312                         else
01313                         {
01314                             mtCOVERAGE_TEST_MARKER();
01315                         }
01316                     }
01317                 }
01318                 #else /* configUSE_QUEUE_SETS */
01319                 {
01320                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
01321                     {
01322                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
01323                         {
01324                             /* The task waiting has a higher priority so record that a
01325                             context switch is required. */
01326                             if( pxHigherPriorityTaskWoken != NULL )
01327                             {
01328                                 *pxHigherPriorityTaskWoken = pdTRUE;
01329                             }
01330                             else
01331                             {
01332                                 mtCOVERAGE_TEST_MARKER();
01333                             }
01334                         }
01335                         else
01336                         {
01337                             mtCOVERAGE_TEST_MARKER();
01338                         }
01339                     }
01340                     else
01341                     {
01342                         mtCOVERAGE_TEST_MARKER();
01343                     }
01344                 }
01345                 #endif /* configUSE_QUEUE_SETS */
01346             }
01347             else
01348             {
01349                 /* Increment the lock count so the task that unlocks the queue
01350                 knows that data was posted while it was locked. */
01351                 ++( pxQueue->xTxLock );
01352             }
01353 
01354             xReturn = pdPASS;
01355         }
01356         else
01357         {
01358             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
01359             xReturn = errQUEUE_FULL;
01360         }
01361     }
01362     portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
01363 
01364     return xReturn;
01365 }
01366 /*-----------------------------------------------------------*/
01367 
01368 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
01369 {
01370 BaseType_t xEntryTimeSet = pdFALSE;
01371 TimeOut_t xTimeOut;
01372 int8_t *pcOriginalReadPosition;
01373 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
01374 
01375     configASSERT( pxQueue );
01376     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
01377     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
01378     {
01379         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
01380     }
01381     #endif
01382 
01383     /* This function relaxes the coding standard somewhat to allow return
01384     statements within the function itself.  This is done in the interest
01385     of execution time efficiency. */
01386 
01387     for( ;; )
01388     {
01389         taskENTER_CRITICAL();
01390         {
01391             /* Is there data in the queue now?  To be running the calling task
01392             must be the highest priority task wanting to access the queue. */
01393             if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
01394             {
01395                 /* Remember the read position in case the queue is only being
01396                 peeked. */
01397                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
01398 
01399                 prvCopyDataFromQueue( pxQueue, pvBuffer );
01400 
01401                 if( xJustPeeking == pdFALSE )
01402                 {
01403                     traceQUEUE_RECEIVE( pxQueue );
01404 
01405                     /* Actually removing data, not just peeking. */
01406                     --( pxQueue->uxMessagesWaiting );
01407 
01408                     #if ( configUSE_MUTEXES == 1 )
01409                     {
01410                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
01411                         {
01412                             /* Record the information required to implement
01413                             priority inheritance should it become necessary. */
01414                             pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
01415                         }
01416                         else
01417                         {
01418                             mtCOVERAGE_TEST_MARKER();
01419                         }
01420                     }
01421                     #endif /* configUSE_MUTEXES */
01422 
01423                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
01424                     {
01425                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
01426                         {
01427                             queueYIELD_IF_USING_PREEMPTION();
01428                         }
01429                         else
01430                         {
01431                             mtCOVERAGE_TEST_MARKER();
01432                         }
01433                     }
01434                     else
01435                     {
01436                         mtCOVERAGE_TEST_MARKER();
01437                     }
01438                 }
01439                 else
01440                 {
01441                     traceQUEUE_PEEK( pxQueue );
01442 
01443                     /* The data is not being removed, so reset the read
01444                     pointer. */
01445                     pxQueue->u.pcReadFrom = pcOriginalReadPosition;
01446 
01447                     /* The data is being left in the queue, so see if there are
01448                     any other tasks waiting for the data. */
01449                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
01450                     {
01451                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
01452                         {
01453                             /* The task waiting has a higher priority than this task. */
01454                             queueYIELD_IF_USING_PREEMPTION();
01455                         }
01456                         else
01457                         {
01458                             mtCOVERAGE_TEST_MARKER();
01459                         }
01460                     }
01461                     else
01462                     {
01463                         mtCOVERAGE_TEST_MARKER();
01464                     }
01465                 }
01466 
01467                 taskEXIT_CRITICAL();
01468                 return pdPASS;
01469             }
01470             else
01471             {
01472                 if( xTicksToWait == ( TickType_t ) 0 )
01473                 {
01474                     /* The queue was empty and no block time is specified (or
01475                     the block time has expired) so leave now. */
01476                     taskEXIT_CRITICAL();
01477                     traceQUEUE_RECEIVE_FAILED( pxQueue );
01478                     return errQUEUE_EMPTY;
01479                 }
01480                 else if( xEntryTimeSet == pdFALSE )
01481                 {
01482                     /* The queue was empty and a block time was specified so
01483                     configure the timeout structure. */
01484                     vTaskSetTimeOutState( &xTimeOut );
01485                     xEntryTimeSet = pdTRUE;
01486                 }
01487                 else
01488                 {
01489                     /* Entry time was already set. */
01490                     mtCOVERAGE_TEST_MARKER();
01491                 }
01492             }
01493         }
01494         taskEXIT_CRITICAL();
01495 
01496         /* Interrupts and other tasks can send to and receive from the queue
01497         now the critical section has been exited. */
01498 
01499         vTaskSuspendAll();
01500         prvLockQueue( pxQueue );
01501 
01502         /* Update the timeout state to see if it has expired yet. */
01503         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
01504         {
01505             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
01506             {
01507                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
01508 
01509                 #if ( configUSE_MUTEXES == 1 )
01510                 {
01511                     if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
01512                     {
01513                         taskENTER_CRITICAL();
01514                         {
01515                             vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
01516                         }
01517                         taskEXIT_CRITICAL();
01518                     }
01519                     else
01520                     {
01521                         mtCOVERAGE_TEST_MARKER();
01522                     }
01523                 }
01524                 #endif
01525 
01526                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
01527                 prvUnlockQueue( pxQueue );
01528                 if( xTaskResumeAll() == pdFALSE )
01529                 {
01530                     portYIELD_WITHIN_API();
01531                 }
01532                 else
01533                 {
01534                     mtCOVERAGE_TEST_MARKER();
01535                 }
01536             }
01537             else
01538             {
01539                 /* Try again. */
01540                 prvUnlockQueue( pxQueue );
01541                 ( void ) xTaskResumeAll();
01542             }
01543         }
01544         else
01545         {
01546             prvUnlockQueue( pxQueue );
01547             ( void ) xTaskResumeAll();
01548             traceQUEUE_RECEIVE_FAILED( pxQueue );
01549             return errQUEUE_EMPTY;
01550         }
01551     }
01552 }
01553 /*-----------------------------------------------------------*/
01554 
01555 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
01556 {
01557 BaseType_t xReturn;
01558 UBaseType_t uxSavedInterruptStatus;
01559 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
01560 
01561     configASSERT( pxQueue );
01562     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
01563 
01564     /* RTOS ports that support interrupt nesting have the concept of a maximum
01565     system call (or maximum API call) interrupt priority.  Interrupts that are
01566     above the maximum system call priority are kept permanently enabled, even
01567     when the RTOS kernel is in a critical section, but cannot make any calls to
01568     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
01569     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
01570     failure if a FreeRTOS API function is called from an interrupt that has been
01571     assigned a priority above the configured maximum system call priority.
01572     Only FreeRTOS functions that end in FromISR can be called from interrupts
01573     that have been assigned a priority at or (logically) below the maximum
01574     system call interrupt priority.  FreeRTOS maintains a separate interrupt
01575     safe API to ensure interrupt entry is as fast and as simple as possible.
01576     More information (albeit Cortex-M specific) is provided on the following
01577     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
01578     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
01579 
01580     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
01581     {
01582         /* Cannot block in an ISR, so check there is data available. */
01583         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
01584         {
01585             traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
01586 
01587             prvCopyDataFromQueue( pxQueue, pvBuffer );
01588             --( pxQueue->uxMessagesWaiting );
01589 
01590             /* If the queue is locked the event list will not be modified.
01591             Instead update the lock count so the task that unlocks the queue
01592             will know that an ISR has removed data while the queue was
01593             locked. */
01594             if( pxQueue->xRxLock == queueUNLOCKED )
01595             {
01596                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
01597                 {
01598                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
01599                     {
01600                         /* The task waiting has a higher priority than us so
01601                         force a context switch. */
01602                         if( pxHigherPriorityTaskWoken != NULL )
01603                         {
01604                             *pxHigherPriorityTaskWoken = pdTRUE;
01605                         }
01606                         else
01607                         {
01608                             mtCOVERAGE_TEST_MARKER();
01609                         }
01610                     }
01611                     else
01612                     {
01613                         mtCOVERAGE_TEST_MARKER();
01614                     }
01615                 }
01616                 else
01617                 {
01618                     mtCOVERAGE_TEST_MARKER();
01619                 }
01620             }
01621             else
01622             {
01623                 /* Increment the lock count so the task that unlocks the queue
01624                 knows that data was removed while it was locked. */
01625                 ++( pxQueue->xRxLock );
01626             }
01627 
01628             xReturn = pdPASS;
01629         }
01630         else
01631         {
01632             xReturn = pdFAIL;
01633             traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
01634         }
01635     }
01636     portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
01637 
01638     return xReturn;
01639 }
01640 /*-----------------------------------------------------------*/
01641 
01642 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )
01643 {
01644 BaseType_t xReturn;
01645 UBaseType_t uxSavedInterruptStatus;
01646 int8_t *pcOriginalReadPosition;
01647 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
01648 
01649     configASSERT( pxQueue );
01650     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
01651     configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
01652 
01653     /* RTOS ports that support interrupt nesting have the concept of a maximum
01654     system call (or maximum API call) interrupt priority.  Interrupts that are
01655     above the maximum system call priority are kept permanently enabled, even
01656     when the RTOS kernel is in a critical section, but cannot make any calls to
01657     FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
01658     then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
01659     failure if a FreeRTOS API function is called from an interrupt that has been
01660     assigned a priority above the configured maximum system call priority.
01661     Only FreeRTOS functions that end in FromISR can be called from interrupts
01662     that have been assigned a priority at or (logically) below the maximum
01663     system call interrupt priority.  FreeRTOS maintains a separate interrupt
01664     safe API to ensure interrupt entry is as fast and as simple as possible.
01665     More information (albeit Cortex-M specific) is provided on the following
01666     link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
01667     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
01668 
01669     uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
01670     {
01671         /* Cannot block in an ISR, so check there is data available. */
01672         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
01673         {
01674             traceQUEUE_PEEK_FROM_ISR( pxQueue );
01675 
01676             /* Remember the read position so it can be reset as nothing is
01677             actually being removed from the queue. */
01678             pcOriginalReadPosition = pxQueue->u.pcReadFrom;
01679             prvCopyDataFromQueue( pxQueue, pvBuffer );
01680             pxQueue->u.pcReadFrom = pcOriginalReadPosition;
01681 
01682             xReturn = pdPASS;
01683         }
01684         else
01685         {
01686             xReturn = pdFAIL;
01687             traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
01688         }
01689     }
01690     portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
01691 
01692     return xReturn;
01693 }
01694 /*-----------------------------------------------------------*/
01695 
01696 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
01697 {
01698 UBaseType_t uxReturn;
01699 
01700     configASSERT( xQueue );
01701 
01702     taskENTER_CRITICAL();
01703     {
01704         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
01705     }
01706     taskEXIT_CRITICAL();
01707 
01708     return uxReturn;
01709 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
01710 /*-----------------------------------------------------------*/
01711 
01712 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
01713 {
01714 UBaseType_t uxReturn;
01715 Queue_t *pxQueue;
01716 
01717     pxQueue = ( Queue_t * ) xQueue;
01718     configASSERT( pxQueue );
01719 
01720     taskENTER_CRITICAL();
01721     {
01722         uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
01723     }
01724     taskEXIT_CRITICAL();
01725 
01726     return uxReturn;
01727 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
01728 /*-----------------------------------------------------------*/
01729 
01730 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
01731 {
01732 UBaseType_t uxReturn;
01733 
01734     configASSERT( xQueue );
01735 
01736     uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
01737 
01738     return uxReturn;
01739 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
01740 /*-----------------------------------------------------------*/
01741 
01742 void vQueueDelete( QueueHandle_t xQueue )
01743 {
01744 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
01745 
01746     configASSERT( pxQueue );
01747 
01748     traceQUEUE_DELETE( pxQueue );
01749     #if ( configQUEUE_REGISTRY_SIZE > 0 )
01750     {
01751         vQueueUnregisterQueue( pxQueue );
01752     }
01753     #endif
01754     vPortFree( pxQueue );
01755 }
01756 /*-----------------------------------------------------------*/
01757 
01758 #if ( configUSE_TRACE_FACILITY == 1 )
01759 
01760     UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
01761     {
01762         return ( ( Queue_t * ) xQueue )->uxQueueNumber;
01763     }
01764 
01765 #endif /* configUSE_TRACE_FACILITY */
01766 /*-----------------------------------------------------------*/
01767 
01768 #if ( configUSE_TRACE_FACILITY == 1 )
01769 
01770     void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
01771     {
01772         ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
01773     }
01774 
01775 #endif /* configUSE_TRACE_FACILITY */
01776 /*-----------------------------------------------------------*/
01777 
01778 #if ( configUSE_TRACE_FACILITY == 1 )
01779 
01780     uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
01781     {
01782         return ( ( Queue_t * ) xQueue )->ucQueueType;
01783     }
01784 
01785 #endif /* configUSE_TRACE_FACILITY */
01786 /*-----------------------------------------------------------*/
01787 
01788 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
01789 {
01790 BaseType_t xReturn = pdFALSE;
01791 
01792     if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
01793     {
01794         #if ( configUSE_MUTEXES == 1 )
01795         {
01796             if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
01797             {
01798                 /* The mutex is no longer being held. */
01799                 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
01800                 pxQueue->pxMutexHolder = NULL;
01801             }
01802             else
01803             {
01804                 mtCOVERAGE_TEST_MARKER();
01805             }
01806         }
01807         #endif /* configUSE_MUTEXES */
01808     }
01809     else if( xPosition == queueSEND_TO_BACK )
01810     {
01811         ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
01812         pxQueue->pcWriteTo += pxQueue->uxItemSize;
01813         if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
01814         {
01815             pxQueue->pcWriteTo = pxQueue->pcHead;
01816         }
01817         else
01818         {
01819             mtCOVERAGE_TEST_MARKER();
01820         }
01821     }
01822     else
01823     {
01824         ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
01825         pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
01826         if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
01827         {
01828             pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
01829         }
01830         else
01831         {
01832             mtCOVERAGE_TEST_MARKER();
01833         }
01834 
01835         if( xPosition == queueOVERWRITE )
01836         {
01837             if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
01838             {
01839                 /* An item is not being added but overwritten, so subtract
01840                 one from the recorded number of items in the queue so when
01841                 one is added again below the number of recorded items remains
01842                 correct. */
01843                 --( pxQueue->uxMessagesWaiting );
01844             }
01845             else
01846             {
01847                 mtCOVERAGE_TEST_MARKER();
01848             }
01849         }
01850         else
01851         {
01852             mtCOVERAGE_TEST_MARKER();
01853         }
01854     }
01855 
01856     ++( pxQueue->uxMessagesWaiting );
01857 
01858     return xReturn;
01859 }
01860 /*-----------------------------------------------------------*/
01861 
01862 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
01863 {
01864     if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
01865     {
01866         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
01867         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
01868         {
01869             pxQueue->u.pcReadFrom = pxQueue->pcHead;
01870         }
01871         else
01872         {
01873             mtCOVERAGE_TEST_MARKER();
01874         }
01875         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
01876     }
01877 }
01878 /*-----------------------------------------------------------*/
01879 
01880 static void prvUnlockQueue( Queue_t * const pxQueue )
01881 {
01882     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
01883 
01884     /* The lock counts contains the number of extra data items placed or
01885     removed from the queue while the queue was locked.  When a queue is
01886     locked items can be added or removed, but the event lists cannot be
01887     updated. */
01888     taskENTER_CRITICAL();
01889     {
01890         /* See if data was added to the queue while it was locked. */
01891         while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
01892         {
01893             /* Data was posted while the queue was locked.  Are any tasks
01894             blocked waiting for data to become available? */
01895             #if ( configUSE_QUEUE_SETS == 1 )
01896             {
01897                 if( pxQueue->pxQueueSetContainer != NULL )
01898                 {
01899                     if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
01900                     {
01901                         /* The queue is a member of a queue set, and posting to
01902                         the queue set caused a higher priority task to unblock.
01903                         A context switch is required. */
01904                         vTaskMissedYield();
01905                     }
01906                     else
01907                     {
01908                         mtCOVERAGE_TEST_MARKER();
01909                     }
01910                 }
01911                 else
01912                 {
01913                     /* Tasks that are removed from the event list will get added to
01914                     the pending ready list as the scheduler is still suspended. */
01915                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
01916                     {
01917                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
01918                         {
01919                             /* The task waiting has a higher priority so record that a
01920                             context switch is required. */
01921                             vTaskMissedYield();
01922                         }
01923                         else
01924                         {
01925                             mtCOVERAGE_TEST_MARKER();
01926                         }
01927                     }
01928                     else
01929                     {
01930                         break;
01931                     }
01932                 }
01933             }
01934             #else /* configUSE_QUEUE_SETS */
01935             {
01936                 /* Tasks that are removed from the event list will get added to
01937                 the pending ready list as the scheduler is still suspended. */
01938                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
01939                 {
01940                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
01941                     {
01942                         /* The task waiting has a higher priority so record that a
01943                         context switch is required. */
01944                         vTaskMissedYield();
01945                     }
01946                     else
01947                     {
01948                         mtCOVERAGE_TEST_MARKER();
01949                     }
01950                 }
01951                 else
01952                 {
01953                     break;
01954                 }
01955             }
01956             #endif /* configUSE_QUEUE_SETS */
01957 
01958             --( pxQueue->xTxLock );
01959         }
01960 
01961         pxQueue->xTxLock = queueUNLOCKED;
01962     }
01963     taskEXIT_CRITICAL();
01964 
01965     /* Do the same for the Rx lock. */
01966     taskENTER_CRITICAL();
01967     {
01968         while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
01969         {
01970             if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
01971             {
01972                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
01973                 {
01974                     vTaskMissedYield();
01975                 }
01976                 else
01977                 {
01978                     mtCOVERAGE_TEST_MARKER();
01979                 }
01980 
01981                 --( pxQueue->xRxLock );
01982             }
01983             else
01984             {
01985                 break;
01986             }
01987         }
01988 
01989         pxQueue->xRxLock = queueUNLOCKED;
01990     }
01991     taskEXIT_CRITICAL();
01992 }
01993 /*-----------------------------------------------------------*/
01994 
01995 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
01996 {
01997 BaseType_t xReturn;
01998 
01999     taskENTER_CRITICAL();
02000     {
02001         if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )
02002         {
02003             xReturn = pdTRUE;
02004         }
02005         else
02006         {
02007             xReturn = pdFALSE;
02008         }
02009     }
02010     taskEXIT_CRITICAL();
02011 
02012     return xReturn;
02013 }
02014 /*-----------------------------------------------------------*/
02015 
02016 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
02017 {
02018 BaseType_t xReturn;
02019 
02020     configASSERT( xQueue );
02021     if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
02022     {
02023         xReturn = pdTRUE;
02024     }
02025     else
02026     {
02027         xReturn = pdFALSE;
02028     }
02029 
02030     return xReturn;
02031 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
02032 /*-----------------------------------------------------------*/
02033 
02034 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
02035 {
02036 BaseType_t xReturn;
02037 
02038     taskENTER_CRITICAL();
02039     {
02040         if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
02041         {
02042             xReturn = pdTRUE;
02043         }
02044         else
02045         {
02046             xReturn = pdFALSE;
02047         }
02048     }
02049     taskEXIT_CRITICAL();
02050 
02051     return xReturn;
02052 }
02053 /*-----------------------------------------------------------*/
02054 
02055 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
02056 {
02057 BaseType_t xReturn;
02058 
02059     configASSERT( xQueue );
02060     if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
02061     {
02062         xReturn = pdTRUE;
02063     }
02064     else
02065     {
02066         xReturn = pdFALSE;
02067     }
02068 
02069     return xReturn;
02070 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
02071 /*-----------------------------------------------------------*/
02072 
02073 #if ( configUSE_CO_ROUTINES == 1 )
02074 
02075     BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
02076     {
02077     BaseType_t xReturn;
02078     Queue_t * const pxQueue = ( Queue_t * ) xQueue;
02079 
02080         /* If the queue is already full we may have to block.  A critical section
02081         is required to prevent an interrupt removing something from the queue
02082         between the check to see if the queue is full and blocking on the queue. */
02083         portDISABLE_INTERRUPTS();
02084         {
02085             if( prvIsQueueFull( pxQueue ) != pdFALSE )
02086             {
02087                 /* The queue is full - do we want to block or just leave without
02088                 posting? */
02089                 if( xTicksToWait > ( TickType_t ) 0 )
02090                 {
02091                     /* As this is called from a coroutine we cannot block directly, but
02092                     return indicating that we need to block. */
02093                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
02094                     portENABLE_INTERRUPTS();
02095                     return errQUEUE_BLOCKED;
02096                 }
02097                 else
02098                 {
02099                     portENABLE_INTERRUPTS();
02100                     return errQUEUE_FULL;
02101                 }
02102             }
02103         }
02104         portENABLE_INTERRUPTS();
02105 
02106         portDISABLE_INTERRUPTS();
02107         {
02108             if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
02109             {
02110                 /* There is room in the queue, copy the data into the queue. */
02111                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
02112                 xReturn = pdPASS;
02113 
02114                 /* Were any co-routines waiting for data to become available? */
02115                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
02116                 {
02117                     /* In this instance the co-routine could be placed directly
02118                     into the ready list as we are within a critical section.
02119                     Instead the same pending ready list mechanism is used as if
02120                     the event were caused from within an interrupt. */
02121                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
02122                     {
02123                         /* The co-routine waiting has a higher priority so record
02124                         that a yield might be appropriate. */
02125                         xReturn = errQUEUE_YIELD;
02126                     }
02127                     else
02128                     {
02129                         mtCOVERAGE_TEST_MARKER();
02130                     }
02131                 }
02132                 else
02133                 {
02134                     mtCOVERAGE_TEST_MARKER();
02135                 }
02136             }
02137             else
02138             {
02139                 xReturn = errQUEUE_FULL;
02140             }
02141         }
02142         portENABLE_INTERRUPTS();
02143 
02144         return xReturn;
02145     }
02146 
02147 #endif /* configUSE_CO_ROUTINES */
02148 /*-----------------------------------------------------------*/
02149 
02150 #if ( configUSE_CO_ROUTINES == 1 )
02151 
02152     BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
02153     {
02154     BaseType_t xReturn;
02155     Queue_t * const pxQueue = ( Queue_t * ) xQueue;
02156 
02157         /* If the queue is already empty we may have to block.  A critical section
02158         is required to prevent an interrupt adding something to the queue
02159         between the check to see if the queue is empty and blocking on the queue. */
02160         portDISABLE_INTERRUPTS();
02161         {
02162             if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
02163             {
02164                 /* There are no messages in the queue, do we want to block or just
02165                 leave with nothing? */
02166                 if( xTicksToWait > ( TickType_t ) 0 )
02167                 {
02168                     /* As this is a co-routine we cannot block directly, but return
02169                     indicating that we need to block. */
02170                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
02171                     portENABLE_INTERRUPTS();
02172                     return errQUEUE_BLOCKED;
02173                 }
02174                 else
02175                 {
02176                     portENABLE_INTERRUPTS();
02177                     return errQUEUE_FULL;
02178                 }
02179             }
02180             else
02181             {
02182                 mtCOVERAGE_TEST_MARKER();
02183             }
02184         }
02185         portENABLE_INTERRUPTS();
02186 
02187         portDISABLE_INTERRUPTS();
02188         {
02189             if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
02190             {
02191                 /* Data is available from the queue. */
02192                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
02193                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
02194                 {
02195                     pxQueue->u.pcReadFrom = pxQueue->pcHead;
02196                 }
02197                 else
02198                 {
02199                     mtCOVERAGE_TEST_MARKER();
02200                 }
02201                 --( pxQueue->uxMessagesWaiting );
02202                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
02203 
02204                 xReturn = pdPASS;
02205 
02206                 /* Were any co-routines waiting for space to become available? */
02207                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
02208                 {
02209                     /* In this instance the co-routine could be placed directly
02210                     into the ready list as we are within a critical section.
02211                     Instead the same pending ready list mechanism is used as if
02212                     the event were caused from within an interrupt. */
02213                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
02214                     {
02215                         xReturn = errQUEUE_YIELD;
02216                     }
02217                     else
02218                     {
02219                         mtCOVERAGE_TEST_MARKER();
02220                     }
02221                 }
02222                 else
02223                 {
02224                     mtCOVERAGE_TEST_MARKER();
02225                 }
02226             }
02227             else
02228             {
02229                 xReturn = pdFAIL;
02230             }
02231         }
02232         portENABLE_INTERRUPTS();
02233 
02234         return xReturn;
02235     }
02236 
02237 #endif /* configUSE_CO_ROUTINES */
02238 /*-----------------------------------------------------------*/
02239 
02240 #if ( configUSE_CO_ROUTINES == 1 )
02241 
02242     BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
02243     {
02244     Queue_t * const pxQueue = ( Queue_t * ) xQueue;
02245 
02246         /* Cannot block within an ISR so if there is no space on the queue then
02247         exit without doing anything. */
02248         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
02249         {
02250             prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
02251 
02252             /* We only want to wake one co-routine per ISR, so check that a
02253             co-routine has not already been woken. */
02254             if( xCoRoutinePreviouslyWoken == pdFALSE )
02255             {
02256                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
02257                 {
02258                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
02259                     {
02260                         return pdTRUE;
02261                     }
02262                     else
02263                     {
02264                         mtCOVERAGE_TEST_MARKER();
02265                     }
02266                 }
02267                 else
02268                 {
02269                     mtCOVERAGE_TEST_MARKER();
02270                 }
02271             }
02272             else
02273             {
02274                 mtCOVERAGE_TEST_MARKER();
02275             }
02276         }
02277         else
02278         {
02279             mtCOVERAGE_TEST_MARKER();
02280         }
02281 
02282         return xCoRoutinePreviouslyWoken;
02283     }
02284 
02285 #endif /* configUSE_CO_ROUTINES */
02286 /*-----------------------------------------------------------*/
02287 
02288 #if ( configUSE_CO_ROUTINES == 1 )
02289 
02290     BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
02291     {
02292     BaseType_t xReturn;
02293     Queue_t * const pxQueue = ( Queue_t * ) xQueue;
02294 
02295         /* We cannot block from an ISR, so check there is data available. If
02296         not then just leave without doing anything. */
02297         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
02298         {
02299             /* Copy the data from the queue. */
02300             pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
02301             if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
02302             {
02303                 pxQueue->u.pcReadFrom = pxQueue->pcHead;
02304             }
02305             else
02306             {
02307                 mtCOVERAGE_TEST_MARKER();
02308             }
02309             --( pxQueue->uxMessagesWaiting );
02310             ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
02311 
02312             if( ( *pxCoRoutineWoken ) == pdFALSE )
02313             {
02314                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
02315                 {
02316                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
02317                     {
02318                         *pxCoRoutineWoken = pdTRUE;
02319                     }
02320                     else
02321                     {
02322                         mtCOVERAGE_TEST_MARKER();
02323                     }
02324                 }
02325                 else
02326                 {
02327                     mtCOVERAGE_TEST_MARKER();
02328                 }
02329             }
02330             else
02331             {
02332                 mtCOVERAGE_TEST_MARKER();
02333             }
02334 
02335             xReturn = pdPASS;
02336         }
02337         else
02338         {
02339             xReturn = pdFAIL;
02340         }
02341 
02342         return xReturn;
02343     }
02344 
02345 #endif /* configUSE_CO_ROUTINES */
02346 /*-----------------------------------------------------------*/
02347 
02348 #if ( configQUEUE_REGISTRY_SIZE > 0 )
02349 
02350     void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
02351     {
02352     UBaseType_t ux;
02353 
02354         /* See if there is an empty space in the registry.  A NULL name denotes
02355         a free slot. */
02356         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
02357         {
02358             if( xQueueRegistry[ ux ].pcQueueName == NULL )
02359             {
02360                 /* Store the information on this queue. */
02361                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
02362                 xQueueRegistry[ ux ].xHandle = xQueue;
02363 
02364                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
02365                 break;
02366             }
02367             else
02368             {
02369                 mtCOVERAGE_TEST_MARKER();
02370             }
02371         }
02372     }
02373 
02374 #endif /* configQUEUE_REGISTRY_SIZE */
02375 /*-----------------------------------------------------------*/
02376 
02377 #if ( configQUEUE_REGISTRY_SIZE > 0 )
02378 
02379     void vQueueUnregisterQueue( QueueHandle_t xQueue )
02380     {
02381     UBaseType_t ux;
02382 
02383         /* See if the handle of the queue being unregistered in actually in the
02384         registry. */
02385         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
02386         {
02387             if( xQueueRegistry[ ux ].xHandle == xQueue )
02388             {
02389                 /* Set the name to NULL to show that this slot if free again. */
02390                 xQueueRegistry[ ux ].pcQueueName = NULL;
02391                 break;
02392             }
02393             else
02394             {
02395                 mtCOVERAGE_TEST_MARKER();
02396             }
02397         }
02398 
02399     } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
02400 
02401 #endif /* configQUEUE_REGISTRY_SIZE */
02402 /*-----------------------------------------------------------*/
02403 
02404 #if ( configUSE_TIMERS == 1 )
02405 
02406     void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
02407     {
02408     Queue_t * const pxQueue = ( Queue_t * ) xQueue;
02409 
02410         /* This function should not be called by application code hence the
02411         'Restricted' in its name.  It is not part of the public API.  It is
02412         designed for use by kernel code, and has special calling requirements.
02413         It can result in vListInsert() being called on a list that can only
02414         possibly ever have one item in it, so the list will be fast, but even
02415         so it should be called with the scheduler locked and not from a critical
02416         section. */
02417 
02418         /* Only do anything if there are no messages in the queue.  This function
02419         will not actually cause the task to block, just place it on a blocked
02420         list.  It will not block until the scheduler is unlocked - at which
02421         time a yield will be performed.  If an item is added to the queue while
02422         the queue is locked, and the calling task blocks on the queue, then the
02423         calling task will be immediately unblocked when the queue is unlocked. */
02424         prvLockQueue( pxQueue );
02425         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
02426         {
02427             /* There is nothing in the queue, block for the specified period. */
02428             vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
02429         }
02430         else
02431         {
02432             mtCOVERAGE_TEST_MARKER();
02433         }
02434         prvUnlockQueue( pxQueue );
02435     }
02436 
02437 #endif /* configUSE_TIMERS */
02438 /*-----------------------------------------------------------*/
02439 
02440 #if ( configUSE_QUEUE_SETS == 1 )
02441 
02442     QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
02443     {
02444     QueueSetHandle_t pxQueue;
02445 
02446         pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
02447 
02448         return pxQueue;
02449     }
02450 
02451 #endif /* configUSE_QUEUE_SETS */
02452 /*-----------------------------------------------------------*/
02453 
02454 #if ( configUSE_QUEUE_SETS == 1 )
02455 
02456     BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
02457     {
02458     BaseType_t xReturn;
02459 
02460         taskENTER_CRITICAL();
02461         {
02462             if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
02463             {
02464                 /* Cannot add a queue/semaphore to more than one queue set. */
02465                 xReturn = pdFAIL;
02466             }
02467             else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
02468             {
02469                 /* Cannot add a queue/semaphore to a queue set if there are already
02470                 items in the queue/semaphore. */
02471                 xReturn = pdFAIL;
02472             }
02473             else
02474             {
02475                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
02476                 xReturn = pdPASS;
02477             }
02478         }
02479         taskEXIT_CRITICAL();
02480 
02481         return xReturn;
02482     }
02483 
02484 #endif /* configUSE_QUEUE_SETS */
02485 /*-----------------------------------------------------------*/
02486 
02487 #if ( configUSE_QUEUE_SETS == 1 )
02488 
02489     BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
02490     {
02491     BaseType_t xReturn;
02492     Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
02493 
02494         if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
02495         {
02496             /* The queue was not a member of the set. */
02497             xReturn = pdFAIL;
02498         }
02499         else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
02500         {
02501             /* It is dangerous to remove a queue from a set when the queue is
02502             not empty because the queue set will still hold pending events for
02503             the queue. */
02504             xReturn = pdFAIL;
02505         }
02506         else
02507         {
02508             taskENTER_CRITICAL();
02509             {
02510                 /* The queue is no longer contained in the set. */
02511                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
02512             }
02513             taskEXIT_CRITICAL();
02514             xReturn = pdPASS;
02515         }
02516 
02517         return xReturn;
02518     } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
02519 
02520 #endif /* configUSE_QUEUE_SETS */
02521 /*-----------------------------------------------------------*/
02522 
02523 #if ( configUSE_QUEUE_SETS == 1 )
02524 
02525     QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
02526     {
02527     QueueSetMemberHandle_t xReturn = NULL;
02528 
02529         ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
02530         return xReturn;
02531     }
02532 
02533 #endif /* configUSE_QUEUE_SETS */
02534 /*-----------------------------------------------------------*/
02535 
02536 #if ( configUSE_QUEUE_SETS == 1 )
02537 
02538     QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
02539     {
02540     QueueSetMemberHandle_t xReturn = NULL;
02541 
02542         ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
02543         return xReturn;
02544     }
02545 
02546 #endif /* configUSE_QUEUE_SETS */
02547 /*-----------------------------------------------------------*/
02548 
02549 #if ( configUSE_QUEUE_SETS == 1 )
02550 
02551     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
02552     {
02553     Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
02554     BaseType_t xReturn = pdFALSE;
02555 
02556         /* This function must be called form a critical section. */
02557 
02558         configASSERT( pxQueueSetContainer );
02559         configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
02560 
02561         if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
02562         {
02563             traceQUEUE_SEND( pxQueueSetContainer );
02564 
02565             /* The data copied is the handle of the queue that contains data. */
02566             xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
02567 
02568             if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
02569             {
02570                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
02571                 {
02572                     if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
02573                     {
02574                         /* The task waiting has a higher priority. */
02575                         xReturn = pdTRUE;
02576                     }
02577                     else
02578                     {
02579                         mtCOVERAGE_TEST_MARKER();
02580                     }
02581                 }
02582                 else
02583                 {
02584                     mtCOVERAGE_TEST_MARKER();
02585                 }
02586             }
02587             else
02588             {
02589                 ( pxQueueSetContainer->xTxLock )++;
02590             }
02591         }
02592         else
02593         {
02594             mtCOVERAGE_TEST_MARKER();
02595         }
02596 
02597         return xReturn;
02598     }
02599 
02600 #endif /* configUSE_QUEUE_SETS */
02601 
02602 
02603 
02604 
02605 
02606 
02607 
02608 
02609 
02610 
02611 
02612 
02613