// there is a procedure in progress that can schedule a task via this
// message queue. This causes |btu_bta_msg_queue| to get cleaned up before
// it gets used here; hence we check for NULL before using it.
- if (btu_task_post(SIG_BTU_BTA_MSG, p_msg, OSI_THREAD_BLOCKING) == false) {
+ if (btu_task_post(SIG_BTU_BTA_MSG, p_msg, OSI_THREAD_MAX_TIMEOUT) == false) {
osi_free(p_msg);
}
}
assert(data != NULL);
TIMER_LIST_ENT *p_tle = (TIMER_LIST_ENT *)data;
- btu_task_post(SIG_BTU_BTA_ALARM, p_tle, OSI_THREAD_BLOCKING);
+ btu_task_post(SIG_BTU_BTA_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
void bta_sys_start_timer(TIMER_LIST_ENT *p_tle, UINT16 type, INT32 timeout_ms)
osi_free(msg);
}
-static bt_status_t btc_task_post(btc_msg_t *msg, osi_thread_blocking_t blocking)
+static bt_status_t btc_task_post(btc_msg_t *msg, uint32_t timeout)
{
btc_msg_t *lmsg;
memcpy(lmsg, msg, sizeof(btc_msg_t));
- if (osi_thread_post(btc_thread, btc_thread_handler, lmsg, 2, blocking) == false) {
+ if (osi_thread_post(btc_thread, btc_thread_handler, lmsg, 2, timeout) == false) {
return BT_STATUS_BUSY;
}
lmsg.arg = NULL;
}
- return btc_task_post(&lmsg, OSI_THREAD_BLOCKING);
+ return btc_task_post(&lmsg, OSI_THREAD_MAX_TIMEOUT);
}
evt->sig = sig;
evt->param = param;
- return osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_ctrl_handler, evt, 0, OSI_THREAD_BLOCKING);
+ return osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_ctrl_handler, evt, 0, OSI_THREAD_MAX_TIMEOUT);
}
static void btc_a2dp_sink_ctrl_handler(void *arg)
static void btc_a2dp_sink_data_post(void)
{
- osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_data_ready, NULL, 1, OSI_THREAD_BLOCKING);
+ osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_data_ready, NULL, 1, OSI_THREAD_MAX_TIMEOUT);
}
/*******************************************************************************
return;
}
btc_a2dp_sink_handle_inc_media(p_msg);
- p_msg = (tBT_SBC_HDR *)fixed_queue_try_dequeue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ);
+ p_msg = (tBT_SBC_HDR *)fixed_queue_dequeue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ, 0);
if ( p_msg == NULL ) {
APPL_TRACE_ERROR("Insufficient data in que ");
break;
memcpy(p_msg, p_pkt, (sizeof(BT_HDR) + p_pkt->offset + p_pkt->len));
p_msg->num_frames_to_be_processed = (*((UINT8 *)(p_msg + 1) + p_msg->offset)) & 0x0f;
APPL_TRACE_VERBOSE("btc_a2dp_sink_enque_buf %d + \n", p_msg->num_frames_to_be_processed);
- fixed_queue_enqueue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ, p_msg);
+ fixed_queue_enqueue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ, p_msg, FIXED_QUEUE_MAX_TIMEOUT);
btc_a2dp_sink_data_post();
} else {
/* let caller deal with a failed allocation */
static void btc_a2dp_sink_flush_q(fixed_queue_t *p_q)
{
while (! fixed_queue_is_empty(p_q)) {
- osi_free(fixed_queue_try_dequeue(p_q));
+ osi_free(fixed_queue_dequeue(p_q, 0));
}
}
evt->sig = sig;
evt->param = param;
- return osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_ctrl_handler, evt, 0, OSI_THREAD_BLOCKING);
+ return osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_ctrl_handler, evt, 0, OSI_THREAD_MAX_TIMEOUT);
}
static void btc_a2dp_source_ctrl_handler(void *arg)
static void btc_a2dp_source_data_post(void)
{
- osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_handle_timer, NULL, 1, OSI_THREAD_BLOCKING);
+ osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_handle_timer, NULL, 1, OSI_THREAD_MAX_TIMEOUT);
}
static UINT64 time_now_us()
if (btc_a2dp_source_state != BTC_A2DP_SOURCE_STATE_ON){
return NULL;
}
- return fixed_queue_try_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ);
+ return fixed_queue_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, 0);
}
/*******************************************************************************
}
/* Enqueue the encoded SBC frame in AA Tx Queue */
- fixed_queue_enqueue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, p_buf);
+ fixed_queue_enqueue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
} else {
osi_free(p_buf);
}
}
while (fixed_queue_length(a2dp_source_local_param.btc_aa_src_cb.TxAaQ) > (MAX_OUTPUT_A2DP_SRC_FRAME_QUEUE_SZ - nb_frame)) {
- osi_free(fixed_queue_try_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ));
+ osi_free(fixed_queue_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, 0));
}
// Transcode frame
static void btc_a2dp_source_flush_q(fixed_queue_t *p_q)
{
while (! fixed_queue_is_empty(p_q)) {
- osi_free(fixed_queue_try_dequeue(p_q));
+ osi_free(fixed_queue_dequeue(p_q, 0));
}
}
fixed_queue_process(hci_hal_env.rx_q);
}
-bool hci_hal_h4_task_post(osi_thread_blocking_t blocking)
+bool hci_hal_h4_task_post(uint32_t timeout)
{
- return osi_thread_post(hci_h4_thread, hci_hal_h4_rx_handler, NULL, 1, blocking);
+ return osi_thread_post(hci_h4_thread, hci_hal_h4_rx_handler, NULL, 1, timeout);
}
#if (C2H_FLOW_CONTROL_INCLUDED == TRUE)
{
BT_HDR *packet;
while (!fixed_queue_is_empty(queue)) {
- packet = fixed_queue_dequeue(queue);
+ packet = fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
hci_hal_h4_hdl_rx_packet(packet);
}
}
{
//Controller rx cache buffer is ready for receiving new host packet
//Just Call Host main thread task to process pending packets.
- hci_host_task_post(OSI_THREAD_BLOCKING);
+ hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
static int host_recv_pkt_cb(uint8_t *data, uint16_t len)
pkt->len = len;
pkt->layer_specific = 0;
memcpy(pkt->data, data, len);
- fixed_queue_enqueue(hci_hal_env.rx_q, pkt);
- hci_hal_h4_task_post(OSI_THREAD_NON_BLOCKING);
+ fixed_queue_enqueue(hci_hal_env.rx_q, pkt, FIXED_QUEUE_MAX_TIMEOUT);
+ hci_hal_h4_task_post(0);
BTTRC_DUMP_BUFFER("Recv Pkt", pkt->data, len);
}
-bool hci_host_task_post(osi_thread_blocking_t blocking)
+bool hci_host_task_post(uint32_t timeout)
{
- return osi_thread_post(hci_host_thread, hci_host_thread_handler, NULL, 0, blocking);
+ return osi_thread_post(hci_host_thread, hci_host_thread_handler, NULL, 0, timeout);
}
static int hci_layer_init_env(void)
HCI_TRACE_DEBUG("HCI Enqueue Comamnd opcode=0x%x\n", wait_entry->opcode);
BTTRC_DUMP_BUFFER(NULL, command->data + command->offset, command->len);
- fixed_queue_enqueue(hci_host_env.command_queue, wait_entry);
- hci_host_task_post(OSI_THREAD_BLOCKING);
+ fixed_queue_enqueue(hci_host_env.command_queue, wait_entry, FIXED_QUEUE_MAX_TIMEOUT);
+ hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
// in case the upper layer didn't already
command->event = MSG_STACK_TO_HC_HCI_CMD;
- fixed_queue_enqueue(hci_host_env.command_queue, wait_entry);
- hci_host_task_post(OSI_THREAD_BLOCKING);
+ fixed_queue_enqueue(hci_host_env.command_queue, wait_entry, FIXED_QUEUE_MAX_TIMEOUT);
+ hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
return future;
}
transmit_command((BT_HDR *)data, NULL, NULL, NULL);
HCI_TRACE_WARNING("%s legacy transmit of command. Use transmit_command instead.\n", __func__);
} else {
- fixed_queue_enqueue(hci_host_env.packet_queue, data);
+ fixed_queue_enqueue(hci_host_env.packet_queue, data, FIXED_QUEUE_MAX_TIMEOUT);
}
- hci_host_task_post(OSI_THREAD_BLOCKING);
+ hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
waiting_command_t *wait_entry = NULL;
command_waiting_response_t *cmd_wait_q = &hci_host_env.cmd_waiting_q;
- wait_entry = fixed_queue_dequeue(queue);
+ wait_entry = fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
if(wait_entry->opcode == HCI_HOST_NUM_PACKETS_DONE
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
static void event_packet_ready(fixed_queue_t *queue)
{
- BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue);
+ BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
// The queue may be the command queue or the packet queue, we don't care
packet_fragmenter->fragment_and_dispatch(packet);
/*Tell HCI Host Task to continue TX Pending commands*/
if (hci_host_env.command_credits &&
!fixed_queue_is_empty(hci_host_env.command_queue)) {
- hci_host_task_post(OSI_THREAD_BLOCKING);
+ hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
if (wait_entry) {
{
// Events should already have been dispatched before this point
//Tell Up-layer received packet.
- if (btu_task_post(SIG_BTU_HCI_MSG, packet, OSI_THREAD_BLOCKING) == false) {
+ if (btu_task_post(SIG_BTU_HCI_MSG, packet, OSI_THREAD_MAX_TIMEOUT) == false) {
osi_free(packet);
}
}
int hci_start_up(void);
void hci_shut_down(void);
-bool hci_host_task_post(osi_thread_blocking_t blocking);
+bool hci_host_task_post(uint32_t timeout);
#endif /* _HCI_LAYER_H_ */
return queue->capacity;
}
-void fixed_queue_enqueue(fixed_queue_t *queue, void *data)
+bool fixed_queue_enqueue(fixed_queue_t *queue, void *data, uint32_t timeout)
{
assert(queue != NULL);
assert(data != NULL);
- osi_sem_take(&queue->enqueue_sem, OSI_SEM_MAX_TIMEOUT);
-
- osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
-
- list_append(queue->list, data);
- osi_mutex_unlock(&queue->lock);
-
- osi_sem_give(&queue->dequeue_sem);
-}
-
-void *fixed_queue_dequeue(fixed_queue_t *queue)
-{
- void *ret = NULL;
-
- assert(queue != NULL);
-
- osi_sem_take(&queue->dequeue_sem, OSI_SEM_MAX_TIMEOUT);
-
- osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
- ret = list_front(queue->list);
- list_remove(queue->list, ret);
- osi_mutex_unlock(&queue->lock);
-
- osi_sem_give(&queue->enqueue_sem);
-
- return ret;
-}
-
-bool fixed_queue_try_enqueue(fixed_queue_t *queue, void *data)
-{
- assert(queue != NULL);
- assert(data != NULL);
-
- if (osi_sem_take(&queue->enqueue_sem, 0) != 0) {
+ if (osi_sem_take(&queue->enqueue_sem, timeout) != 0) {
return false;
}
return true;
}
-void *fixed_queue_try_dequeue(fixed_queue_t *queue)
+void *fixed_queue_dequeue(fixed_queue_t *queue, uint32_t timeout)
{
void *ret = NULL;
- if (queue == NULL) {
- return NULL;
- }
+ assert(queue != NULL);
- if (osi_sem_take(queue->dequeue_sem, 0) != 0) {
+ if (osi_sem_take(queue->dequeue_sem, timeout) != 0) {
return NULL;
}
#include <stdbool.h>
#include "osi/list.h"
+#include "osi/semaphore.h"
#ifndef QUEUE_SIZE_MAX
#define QUEUE_SIZE_MAX 254
#endif
+#define FIXED_QUEUE_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
+
struct fixed_queue_t;
typedef struct fixed_queue_t fixed_queue_t;
// not be NULL.
size_t fixed_queue_capacity(fixed_queue_t *queue);
-// Enqueues the given |data| into the |queue|. The caller will be blocked
-// if nore more space is available in the queue. Neither |queue| nor |data|
-// may be NULL.
-void fixed_queue_enqueue(fixed_queue_t *queue, void *data);
+// Enqueues the given |data| into the |queue|. The caller will be blocked or immediately return or wait for timeout according to the parameter timeout.
+// If enqueue failed, it will return false, otherwise return true
+bool fixed_queue_enqueue(fixed_queue_t *queue, void *data, uint32_t timeout);
// Dequeues the next element from |queue|. If the queue is currently empty,
-// this function will block the caller until an item is enqueued. This
-// function will never return NULL. |queue| may not be NULL.
-void *fixed_queue_dequeue(fixed_queue_t *queue);
-
-// Tries to enqueue |data| into the |queue|. This function will never block
-// the caller. If the queue capacity would be exceeded by adding one more
-// element, this function returns false immediately. Otherwise, this function
-// returns true. Neither |queue| nor |data| may be NULL.
-bool fixed_queue_try_enqueue(fixed_queue_t *queue, void *data);
-
-// Tries to dequeue an element from |queue|. This function will never block
-// the caller. If the queue is empty, this function returns NULL immediately.
-// Otherwise, the next element in the queue is returned. |queue| may not be
-// NULL.
-void *fixed_queue_try_dequeue(fixed_queue_t *queue);
+// this function will block the caller until an item is enqueued or immediately return or wait for timeout according to the parameter timeout.
+// If dequeue failed, it will return NULL, otherwise return a point.
+void *fixed_queue_dequeue(fixed_queue_t *queue, uint32_t timeout);
// Returns the first element from |queue|, if present, without dequeuing it.
// This function will never block the caller. Returns NULL if there are no
#include "freertos/task.h"
#include "freertos/queue.h"
#include "freertos/semphr.h"
+#include "osi/semaphore.h"
-
-#define OSI_MUTEX_MAX_TIMEOUT 0xffffffffUL
+#define OSI_MUTEX_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
#define osi_mutex_valid( x ) ( ( ( *x ) == NULL) ? pdFALSE : pdTRUE )
#define osi_mutex_set_invalid( x ) ( ( *x ) = NULL )
#include "freertos/task.h"
#include "esp_task.h"
#include "common/bt_defs.h"
+#include "osi/semaphore.h"
#define portBASE_TYPE int
+#define OSI_THREAD_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
+
struct osi_thread;
typedef struct osi_thread osi_thread_t;
OSI_THREAD_CORE_AFFINITY,
} osi_thread_core_t;
-typedef enum {
- OSI_THREAD_NON_BLOCKING = 0,
- OSI_THREAD_BLOCKING,
-} osi_thread_blocking_t;
-
+/*
+ * brief: Create a thread or task
+ * param name: thread name
+ * param stack_size: thread stack size
+ * param priority: thread priority
+ * param core: the CPU core which this thread run, OSI_THREAD_CORE_AFFINITY means unspecific CPU core
+ * param work_queue_num: speicify queue number, the queue[0] has highest priority, and the priority is decrease by index
+ * return : if create successfully, return thread handler; otherwise return NULL.
+ */
osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priority, osi_thread_core_t core, uint8_t work_queue_num);
+/*
+ * brief: Destroy a thread or task
+ * param thread: point of thread handler
+ */
void osi_thread_free(osi_thread_t *thread);
-bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, osi_thread_blocking_t blocking);
-
+/*
+ * brief: Post an msg to a thread and told the thread call the function
+ * param thread: point of thread handler
+ * param func: callback function that called by target thread
+ * param context: argument of callback function
+ * param queue_idx: the queue which the msg send to
+ * param timeout: post timeout, OSI_THREAD_MAX_TIMEOUT means blocking forever, 0 means never blocking, others means block millisecond
+ * return : if post successfully, return true, otherwise return false
+ */
+bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, uint32_t timeout);
+
+/*
+ * brief: Set the priority of thread
+ * param thread: point of thread handler
+ * param priority: priority
+ * return : if set successfully, return true, otherwise return false
+ */
bool osi_thread_set_priority(osi_thread_t *thread, int priority);
+/* brief: Get thread name
+ * param thread: point of thread handler
+ * return: constant point of thread name
+ */
const char *osi_thread_name(osi_thread_t *thread);
+/* brief: Get the size of the specified queue
+ * param thread: point of thread handler
+ * param wq_idx: the queue index of the thread
+ * return: queue size
+ */
int osi_thread_queue_wait_size(osi_thread_t *thread, int wq_idx);
#endif /* __THREAD_H__ */
}
while (!thread->stop && idx < thread->work_queue_num) {
- work_item_t *item = fixed_queue_try_dequeue(thread->work_queues[idx]);
+ work_item_t *item = fixed_queue_dequeue(thread->work_queues[idx], 0);
if (item) {
item->func(item->context);
osi_free(item);
osi_free(thread);
}
-bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, osi_thread_blocking_t blocking)
+bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, uint32_t timeout)
{
assert(thread != NULL);
assert(func != NULL);
item->func = func;
item->context = context;
- if (blocking == OSI_THREAD_BLOCKING) {
- fixed_queue_enqueue(thread->work_queues[queue_idx], item);
- } else {
- if (fixed_queue_try_enqueue(thread->work_queues[queue_idx], item) == false) {
- osi_free(item);
- return false;
- }
+ if (fixed_queue_enqueue(thread->work_queues[queue_idx], item, timeout) == false) {
+ osi_free(item);
+ return false;
}
osi_sem_give(&thread->work_sem);
if (p_lcb->cong == FALSE && !fixed_queue_is_empty(p_lcb->tx_q))
{
while (!p_lcb->cong &&
- (p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_lcb->tx_q)) != NULL)
+ (p_buf = (BT_HDR *)fixed_queue_dequeue(p_lcb->tx_q, 0)) != NULL)
{
if (L2CA_DataWrite(p_lcb->ch_lcid, p_buf) == L2CAP_DW_CONGESTED)
{
}
if (p_lcb->cong == TRUE) {
- fixed_queue_enqueue(p_lcb->tx_q, p_buf);
+ fixed_queue_enqueue(p_lcb->tx_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
/* send message to L2CAP */
}
/* clear out response queue */
- while ((p_buf = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->rsp_q)) != NULL) {
+ while ((p_buf = (BT_HDR *) fixed_queue_dequeue(p_ccb->rsp_q, 0)) != NULL) {
osi_free(p_buf);
}
}
avdt_ccb_cmd_fail(p_ccb, (tAVDT_CCB_EVT *) &err_code);
/* set up next message */
- p_ccb->p_curr_cmd = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->cmd_q);
+ p_ccb->p_curr_cmd = (BT_HDR *) fixed_queue_dequeue(p_ccb->cmd_q, 0);
} while (p_ccb->p_curr_cmd != NULL);
** not congested, not sending fragment, not waiting for response
*/
if ((!p_ccb->cong) && (p_ccb->p_curr_msg == NULL) && (p_ccb->p_curr_cmd == NULL)) {
- if ((p_msg = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->cmd_q)) != NULL) {
+ if ((p_msg = (BT_HDR *) fixed_queue_dequeue(p_ccb->cmd_q, 0)) != NULL) {
/* make a copy of buffer in p_curr_cmd */
if ((p_ccb->p_curr_cmd = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE)) != NULL) {
memcpy(p_ccb->p_curr_cmd, p_msg, (sizeof(BT_HDR) + p_msg->offset + p_msg->len));
}
/* do we have responses to send? send them */
else if (!fixed_queue_is_empty(p_ccb->rsp_q)) {
- while ((p_msg = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->rsp_q)) != NULL) {
+ while ((p_msg = (BT_HDR *)fixed_queue_dequeue(p_ccb->rsp_q, 0)) != NULL) {
if (avdt_msg_send(p_ccb, p_msg) == TRUE) {
/* break out if congested */
break;
p_ccb->label = (p_ccb->label + 1) % 16;
/* queue message and trigger ccb to send it */
- fixed_queue_enqueue(p_ccb->cmd_q, p_buf);
+ fixed_queue_enqueue(p_ccb->cmd_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
AVDT_BLD_LAYERSPEC(p_buf->layer_specific, AVDT_MSG_TYPE_RSP, p_params->hdr.label);
/* queue message and trigger ccb to send it */
- fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
+ fixed_queue_enqueue(p_ccb->rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
AVDT_BLD_LAYERSPEC(p_buf->layer_specific, AVDT_MSG_TYPE_REJ, p_params->hdr.label);
/* queue message and trigger ccb to send it */
- fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
+ fixed_queue_enqueue(p_ccb->rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
AVDT_TRACE_DEBUG("avdt_msg_send_grej");
/* queue message and trigger ccb to send it */
- fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
+ fixed_queue_enqueue(p_ccb->rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
/* this shouldn't be happening */
AVDT_TRACE_WARNING("*** Dropped media packet; congested");
BT_HDR *p_frag;
- while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL)
+ while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL)
osi_free(p_frag);
}
/* clean fragments queue */
BT_HDR *p_frag;
- while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
+ while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
osi_free(p_frag);
}
p_scb->frag_off = 0;
#if AVDT_MULTIPLEXING == TRUE
/* clean fragments queue */
BT_HDR *p_frag;
- while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
+ while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
osi_free(p_frag);
}
#endif
AVDT_TRACE_DEBUG("Dropped fragments queue");
/* clean fragments queue */
BT_HDR *p_frag;
- while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
+ while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
osi_free(p_frag);
}
p_scb->frag_off = 0;
L2CA_FlushChannel(avdt_cb.ad.rt_tbl[avdt_ccb_to_idx(p_scb->p_ccb)][avdt_ad_type_to_tcid(AVDT_CHAN_MEDIA, p_scb)].lcid),
L2CAP_FLUSH_CHANS_GET);
#endif
- while ((p_pkt = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
+ while ((p_pkt = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
sent = TRUE;
AVDT_TRACE_DEBUG("Send fragment len=%d\n", p_pkt->len);
/* fragments queue contains fragment to send */
UINT16_TO_BE_STREAM(p, p_frag->layer_specific );
}
/* put fragment into gueue */
- fixed_queue_enqueue(p_scb->frag_q, p_frag);
+ fixed_queue_enqueue(p_scb->frag_q, p_frag, FIXED_QUEUE_MAX_TIMEOUT);
num_frag--;
}
}
BD_ADDR bda;
BTM_TRACE_DEBUG ("btm_acl_resubmit_page\n");
/* If there were other page request schedule can start the next one */
- if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(btm_cb.page_queue)) != NULL) {
+ if ((p_buf = (BT_HDR *)fixed_queue_dequeue(btm_cb.page_queue, 0)) != NULL) {
/* skip 3 (2 bytes opcode and 1 byte len) to get to the bd_addr
* for both create_conn and rmt_name */
pp = (UINT8 *)(p_buf + 1) + p_buf->offset + 3;
BT_HDR *p;
BTM_TRACE_DEBUG ("btm_acl_reset_paging\n");
/* If we sent reset we are definitely not paging any more */
- while ((p = (BT_HDR *)fixed_queue_try_dequeue(btm_cb.page_queue)) != NULL) {
+ while ((p = (BT_HDR *)fixed_queue_dequeue(btm_cb.page_queue, 0)) != NULL) {
osi_free (p);
}
(bda[0] << 16) + (bda[1] << 8) + bda[2], (bda[3] << 16) + (bda[4] << 8) + bda[5]);
if (btm_cb.discing) {
btm_cb.paging = TRUE;
- fixed_queue_enqueue(btm_cb.page_queue, p);
+ fixed_queue_enqueue(btm_cb.page_queue, p, FIXED_QUEUE_MAX_TIMEOUT);
} else {
if (!BTM_ACL_IS_CONNECTED (bda)) {
BTM_TRACE_DEBUG ("connecting_bda: %06x%06x\n",
btm_cb.connecting_bda[5]);
if (btm_cb.paging &&
memcmp (bda, btm_cb.connecting_bda, BD_ADDR_LEN) != 0) {
- fixed_queue_enqueue(btm_cb.page_queue, p);
+ fixed_queue_enqueue(btm_cb.page_queue, p, FIXED_QUEUE_MAX_TIMEOUT);
} else {
p_dev_rec = btm_find_or_alloc_dev (bda);
memcpy (btm_cb.connecting_bda, p_dev_rec->bd_addr, BD_ADDR_LEN);
p->p_param = p_param;
- fixed_queue_enqueue(btm_cb.ble_ctr_cb.conn_pending_q, p);
+ fixed_queue_enqueue(btm_cb.ble_ctr_cb.conn_pending_q, p, FIXED_QUEUE_MAX_TIMEOUT);
}
/*******************************************************************************
**
tBTM_BLE_CONN_REQ *p_req;
BOOLEAN rt = FALSE;
- p_req = (tBTM_BLE_CONN_REQ*)fixed_queue_try_dequeue(btm_cb.ble_ctr_cb.conn_pending_q);
+ p_req = (tBTM_BLE_CONN_REQ*)fixed_queue_dequeue(btm_cb.ble_ctr_cb.conn_pending_q, 0);
if (p_req != NULL) {
rt = l2cble_init_direct_conn((tL2C_LCB *)(p_req->p_param));
if (sco_inx < BTM_MAX_SCO_LINKS) {
p = &btm_cb.sco_cb.sco_db[sco_inx];
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p->xmit_data_q)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p->xmit_data_q, 0)) != NULL) {
osi_free(p_buf);
}
}
BT_HDR *p_buf;
while (p_cb->xmit_window_size != 0)
{
- if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_data_q)) == NULL) {
+ if ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_data_q, 0)) == NULL) {
break;
}
#if BTM_SCO_HCI_DEBUG
p_buf->len += HCI_SCO_PREAMBLE_SIZE;
if (fixed_queue_length(p_ccb->xmit_data_q) < BTM_SCO_XMIT_QUEUE_THRS) {
- fixed_queue_enqueue(p_ccb->xmit_data_q, p_buf);
+ fixed_queue_enqueue(p_ccb->xmit_data_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
btm_sco_check_send_pkts (sco_inx);
} else {
BTM_TRACE_WARNING ("SCO xmit Q overflow, pkt dropped");
btm_cb.sec_pending_q = fixed_queue_new(QUEUE_SIZE_MAX);
- while ((p_e = (tBTM_SEC_QUEUE_ENTRY *)fixed_queue_try_dequeue(bq)) != NULL) {
+ while ((p_e = (tBTM_SEC_QUEUE_ENTRY *)fixed_queue_dequeue(bq, 0)) != NULL) {
/* Check that the ACL is still up before starting security procedures */
if (btm_bda_to_acl(p_e->bd_addr, p_e->transport) != NULL) {
if (p_e->psm != 0) {
BTM_TRACE_EVENT ("%s() PSM: 0x%04x Is_Orig: %u mx_proto_id: %u mx_chan_id: %u\n",
__func__, psm, is_orig, mx_proto_id, mx_chan_id);
- fixed_queue_enqueue(btm_cb.sec_pending_q, p_e);
+ fixed_queue_enqueue(btm_cb.sec_pending_q, p_e, FIXED_QUEUE_MAX_TIMEOUT);
return (TRUE);
}
*(UINT8 *)p_e->p_ref_data = *(UINT8 *)(p_ref_data);
p_e->transport = transport;
memcpy(p_e->bd_addr, bd_addr, BD_ADDR_LEN);
- fixed_queue_enqueue(btm_cb.sec_pending_q, p_e);
+ fixed_queue_enqueue(btm_cb.sec_pending_q, p_e, FIXED_QUEUE_MAX_TIMEOUT);
return TRUE;
}
event->event = BTU_POST_TO_TASK_NO_GOOD_HORRIBLE_HACK;
- btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_BLOCKING);
+ btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_MAX_TIMEOUT);
}
event->event = BTU_POST_TO_TASK_NO_GOOD_HORRIBLE_HACK;
- btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_BLOCKING);
+ btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_MAX_TIMEOUT);
}
/*******************************************************************************
goto error_exit;
}
- if (btu_task_post(SIG_BTU_START_UP, NULL, OSI_THREAD_BLOCKING) == false) {
+ if (btu_task_post(SIG_BTU_START_UP, NULL, OSI_THREAD_MAX_TIMEOUT) == false) {
goto error_exit;
}
osi_free(evt);
}
-bool btu_task_post(uint32_t sig, void *param, osi_thread_blocking_t blocking)
+bool btu_task_post(uint32_t sig, void *param, uint32_t timeout)
{
btu_thread_evt_t *evt;
evt->sig = sig;
evt->param = param;
- return osi_thread_post(btu_thread, btu_thread_handler, evt, 0, blocking);
+ return osi_thread_post(btu_thread, btu_thread_handler, evt, 0, timeout);
}
void btu_task_start_up(void)
assert(data != NULL);
TIMER_LIST_ENT *p_tle = (TIMER_LIST_ENT *)data;
- btu_task_post(SIG_BTU_GENERAL_ALARM, p_tle, OSI_THREAD_BLOCKING);
+ btu_task_post(SIG_BTU_GENERAL_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
void btu_start_timer(TIMER_LIST_ENT *p_tle, UINT16 type, UINT32 timeout_sec)
assert(data != NULL);
TIMER_LIST_ENT *p_tle = (TIMER_LIST_ENT *)data;
- btu_task_post(SIG_BTU_L2CAP_ALARM, p_tle, OSI_THREAD_BLOCKING);
+ btu_task_post(SIG_BTU_L2CAP_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
void btu_start_quick_timer(TIMER_LIST_ENT *p_tle, UINT16 type, UINT32 timeout_ticks)
btu_stop_timer_oneshot(p_tle);
- btu_task_post(SIG_BTU_ONESHOT_ALARM, p_tle, OSI_THREAD_BLOCKING);
+ btu_task_post(SIG_BTU_ONESHOT_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
/*
{
tGAP_BLE_REQ *p_q;
- while ((p_q = (tGAP_BLE_REQ *)fixed_queue_try_dequeue(p_clcb->pending_req_q)) != NULL) {
+ while ((p_q = (tGAP_BLE_REQ *)fixed_queue_dequeue(p_clcb->pending_req_q, 0)) != NULL) {
/* send callback to all pending requests if being removed*/
if (p_q->p_cback != NULL) {
(*p_q->p_cback)(FALSE, p_clcb->bda, 0, NULL);
if (p_q != NULL) {
p_q->p_cback = p_cback;
p_q->uuid = uuid;
- fixed_queue_enqueue(p_clcb->pending_req_q, p_q);
+ fixed_queue_enqueue(p_clcb->pending_req_q, p_q, FIXED_QUEUE_MAX_TIMEOUT);
return TRUE;
}
*******************************************************************************/
BOOLEAN gap_ble_dequeue_request (tGAP_CLCB *p_clcb, UINT16 *p_uuid, tGAP_BLE_CMPL_CBACK **p_cback)
{
- tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)fixed_queue_try_dequeue(p_clcb->pending_req_q);;
+ tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)fixed_queue_dequeue(p_clcb->pending_req_q, 0);;
if (p_q != NULL) {
*p_cback = p_q->p_cback;
p_buf->len -= copy_len;
break;
}
- osi_free(fixed_queue_try_dequeue(p_ccb->rx_queue));
+ osi_free(fixed_queue_dequeue(p_ccb->rx_queue, 0));
}
p_ccb->rx_queue_size -= *p_len;
return (GAP_ERR_BAD_HANDLE);
}
- p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->rx_queue);
+ p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->rx_queue, 0);
if (p_buf) {
*pp_buf = p_buf;
return (GAP_ERR_BUF_OFFSET);
}
- fixed_queue_enqueue(p_ccb->tx_queue, p_buf);
+ fixed_queue_enqueue(p_ccb->tx_queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
if (p_ccb->is_congested) {
return (BT_PASS);
#if (GAP_CONN_POST_EVT_INCLUDED == TRUE)
gap_send_event (gap_handle);
#else
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->tx_queue, 0)) != NULL) {
UINT8 status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
if (status == L2CAP_DW_CONGESTED) {
GAP_TRACE_EVENT ("GAP_WriteData %d bytes", p_buf->len);
- fixed_queue_enqueue(p_ccb->tx_queue, p_buf);
+ fixed_queue_enqueue(p_ccb->tx_queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
if (p_ccb->is_congested) {
#if (GAP_CONN_POST_EVT_INCLUDED == TRUE)
gap_send_event (gap_handle);
#else
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL)
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->tx_queue, 0)) != NULL)
{
UINT8 status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
}
if (p_ccb->con_state == GAP_CCB_STATE_CONNECTED) {
- fixed_queue_enqueue(p_ccb->rx_queue, p_msg);
+ fixed_queue_enqueue(p_ccb->rx_queue, p_msg, FIXED_QUEUE_MAX_TIMEOUT);
p_ccb->rx_queue_size += p_msg->len;
/*
p_ccb->p_callback (p_ccb->gap_handle, event);
if (!is_congested) {
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->tx_queue, 0)) != NULL) {
status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
if (status == L2CAP_DW_CONGESTED) {
p_ccb->rx_queue_size = 0;
while (!fixed_queue_is_empty(p_ccb->rx_queue)) {
- osi_free(fixed_queue_try_dequeue(p_ccb->rx_queue));
+ osi_free(fixed_queue_dequeue(p_ccb->rx_queue, 0));
}
fixed_queue_free(p_ccb->rx_queue, NULL);
p_ccb->rx_queue = NULL;
while (!fixed_queue_is_empty(p_ccb->tx_queue)) {
- osi_free(fixed_queue_try_dequeue(p_ccb->tx_queue));
+ osi_free(fixed_queue_dequeue(p_ccb->tx_queue, 0));
}
fixed_queue_free(p_ccb->tx_queue, NULL);
p_ccb->tx_queue = NULL;
return;
}
tGATT_PENDING_ENC_CLCB *p_buf =
- (tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
+ (tGATT_PENDING_ENC_CLCB *)fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0);
if (p_buf != NULL) {
if (result == BTM_SUCCESS) {
if (gatt_get_sec_act(p_tcb) == GATT_SEC_ENCRYPT_MITM ) {
/* start all other pending operation in queue */
for (size_t count = fixed_queue_length(p_tcb->pending_enc_clcb);
count > 0; count--) {
- p_buf = (tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
+ p_buf = (tGATT_PENDING_ENC_CLCB *)fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0);
if (p_buf != NULL) {
gatt_security_check_start(p_buf->p_clcb);
osi_free(p_buf);
size_t count = fixed_queue_length(p_tcb->pending_enc_clcb);
for (; count > 0; count--) {
tGATT_PENDING_ENC_CLCB *p_buf =
- (tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
+ (tGATT_PENDING_ENC_CLCB *)fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0);
if (p_buf != NULL) {
gatt_security_check_start(p_buf->p_clcb);
osi_free(p_buf);
p_db->p_free_mem = (UINT8 *) p_buf;
p_db->mem_free = GATT_DB_BUF_SIZE;
- fixed_queue_enqueue(p_db->svc_buffer, p_buf);
+ fixed_queue_enqueue(p_db->svc_buffer, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
return TRUE;
if (p_tcb->sr_cmd.multi_rsp_q) {
while (!fixed_queue_is_empty(p_tcb->sr_cmd.multi_rsp_q)) {
- osi_free(fixed_queue_try_dequeue(p_tcb->sr_cmd.multi_rsp_q));
+ osi_free(fixed_queue_dequeue(p_tcb->sr_cmd.multi_rsp_q, 0));
}
fixed_queue_free(p_tcb->sr_cmd.multi_rsp_q, NULL);
}
}
memcpy((void *)p_buf, (const void *)p_msg, sizeof(tGATTS_RSP));
- fixed_queue_enqueue(p_cmd->multi_rsp_q, p_buf);
+ fixed_queue_enqueue(p_cmd->multi_rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_cmd->status = status;
if (status == GATT_SUCCESS) {
//dequeue prepare write data
while(fixed_queue_try_peek_first(prepare_record->queue)) {
- queue_data = fixed_queue_dequeue(prepare_record->queue);
+ queue_data = fixed_queue_dequeue(prepare_record->queue, FIXED_QUEUE_MAX_TIMEOUT);
if (is_prepare_write_valid){
if((queue_data->p_attr->p_value != NULL) && (queue_data->p_attr->p_value->attr_val.attr_val != NULL)){
if(is_first) {
if (prepare_record->queue == NULL) {
prepare_record->queue = fixed_queue_new(QUEUE_SIZE_MAX);
}
- fixed_queue_enqueue(prepare_record->queue, queue_data);
+ fixed_queue_enqueue(prepare_record->queue, queue_data, FIXED_QUEUE_MAX_TIMEOUT);
}
}
/* release all queued indications */
while (!fixed_queue_is_empty(p_tcb->pending_ind_q)) {
- osi_free(fixed_queue_try_dequeue(p_tcb->pending_ind_q));
+ osi_free(fixed_queue_dequeue(p_tcb->pending_ind_q, 0));
}
fixed_queue_free(p_tcb->pending_ind_q, NULL);
p_tcb->pending_ind_q = NULL;
/* release all queued indications */
while (!fixed_queue_is_empty(p_tcb->pending_enc_clcb)) {
- osi_free(fixed_queue_try_dequeue(p_tcb->pending_enc_clcb));
+ osi_free(fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0));
}
fixed_queue_free(p_tcb->pending_enc_clcb, NULL);
p_tcb->pending_enc_clcb = NULL;
if (p_tcb->prepare_write_record.queue) {
/* release all queued prepare write packets */
while (!fixed_queue_is_empty(p_tcb->prepare_write_record.queue)) {
- osi_free(fixed_queue_dequeue(p_tcb->prepare_write_record.queue));
+ osi_free(fixed_queue_dequeue(p_tcb->prepare_write_record.queue, FIXED_QUEUE_MAX_TIMEOUT));
}
fixed_queue_free(p_tcb->prepare_write_record.queue, NULL);
p_tcb->prepare_write_record.queue = NULL;
if ((p_buf = (tGATT_VALUE *)osi_malloc((UINT16)sizeof(tGATT_VALUE))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a pending indication");
memcpy(p_buf, p_ind, sizeof(tGATT_VALUE));
- fixed_queue_enqueue(p_tcb->pending_ind_q, p_buf);
+ fixed_queue_enqueue(p_tcb->pending_ind_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
}
if ((p_buf = (tGATTS_PENDING_NEW_SRV_START *)osi_malloc((UINT16)sizeof(tGATTS_PENDING_NEW_SRV_START))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a new pending new srv start");
p_buf->p_new_srv_start = p_new_srv_start;
- fixed_queue_enqueue(gatt_cb.pending_new_srv_start_q, p_buf);
+ fixed_queue_enqueue(gatt_cb.pending_new_srv_start_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
}
if ((p_buf = (tGATTS_SRV_CHG *)osi_malloc((UINT16)sizeof(tGATTS_SRV_CHG))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a srv chg client");
memcpy(p_buf, p_srv_chg, sizeof(tGATTS_SRV_CHG));
- fixed_queue_enqueue(gatt_cb.srv_chg_clt_q, p_buf);
+ fixed_queue_enqueue(gatt_cb.srv_chg_clt_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
if (p) {
while (!fixed_queue_is_empty(p->svc_db.svc_buffer)) {
- osi_free(fixed_queue_try_dequeue(p->svc_db.svc_buffer));
+ osi_free(fixed_queue_dequeue(p->svc_db.svc_buffer, 0));
}
fixed_queue_free(p->svc_db.svc_buffer, NULL);
memset(p, 0, sizeof(tGATT_HDL_LIST_ELEM));
if (memcmp(p_app_id, &p_elem->asgn_range.app_uuid128, sizeof(tBT_UUID)) == 0) {
gatt_free_attr_value_buffer(p_elem);
while (!fixed_queue_is_empty(p_elem->svc_db.svc_buffer)) {
- osi_free(fixed_queue_try_dequeue(p_elem->svc_db.svc_buffer));
+ osi_free(fixed_queue_dequeue(p_elem->svc_db.svc_buffer, 0));
}
fixed_queue_free(p_elem->svc_db.svc_buffer, NULL);
p_elem->svc_db.svc_buffer = NULL;
if ((p_buf = (tGATT_PENDING_ENC_CLCB *)osi_malloc((UINT16)sizeof(tGATT_PENDING_ENC_CLCB))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a new pending encryption channel clcb");
p_buf->p_clcb = p_clcb;
- fixed_queue_enqueue(p_tcb->pending_enc_clcb, p_buf);
+ fixed_queue_enqueue(p_tcb->pending_enc_clcb, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
}
UINT16 BTU_BleAclPktSize(void);
-bool btu_task_post(uint32_t sig, void *param, osi_thread_blocking_t blocking);
+bool btu_task_post(uint32_t sig, void *param, uint32_t timeout);
/*
#ifdef __cplusplus
/* If needed, flush buffers in the CCB xmit hold queue */
while ( (num_to_flush != 0) && (!fixed_queue_is_empty(p_ccb->xmit_hold_q))) {
- BT_HDR *p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
+ BT_HDR *p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (p_buf) {
osi_free (p_buf);
}
if (!fixed_queue_is_empty(p_lcb->le_sec_pending_q))
{
- p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q);
+ p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q, FIXED_QUEUE_MAX_TIMEOUT);
if (!p_buf)
{
L2CAP_TRACE_WARNING ("%s Security complete for request not initiated from L2CAP",
while (!fixed_queue_is_empty(p_lcb->le_sec_pending_q))
{
- p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q);
+ p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q, FIXED_QUEUE_MAX_TIMEOUT);
if (status != BTM_SUCCESS) {
(*(p_buf->p_callback))(p_bda, BT_TRANSPORT_LE, p_buf->p_ref_data, status);
p_buf->is_originator = is_originator;
p_buf->p_callback = p_callback;
p_buf->p_ref_data = p_ref_data;
- fixed_queue_enqueue(p_lcb->le_sec_pending_q, p_buf);
+ fixed_queue_enqueue(p_lcb->le_sec_pending_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
status = btm_ble_start_sec_check(bd_addr, psm, is_originator, &l2cble_sec_comp, p_ref_data);
return status;
UINT16_TO_STREAM (p, p_ccb->remote_cid);
}
- fixed_queue_enqueue(p_ccb->xmit_hold_q, p_buf);
+ fixed_queue_enqueue(p_ccb->xmit_hold_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
l2cu_check_channel_congestion (p_ccb);
fixed_queue_t *temp_q = p_ccb->fcrb.srej_rcv_hold_q;
p_ccb->fcrb.srej_rcv_hold_q = fixed_queue_new(QUEUE_SIZE_MAX);
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(temp_q)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(temp_q, 0)) != NULL) {
if (p_ccb->in_use && (p_ccb->chnl_state == CST_OPEN)) {
/* Get the control word */
p = ((UINT8 *)(p_buf + 1)) + p_buf->offset - L2CAP_FCR_OVERHEAD;
#endif
for (xx = 0; xx < num_bufs_acked; xx++) {
- BT_HDR *p_tmp = (BT_HDR *)fixed_queue_try_dequeue(p_fcrb->waiting_for_ack_q);
+ BT_HDR *p_tmp = (BT_HDR *)fixed_queue_dequeue(p_fcrb->waiting_for_ack_q, 0);
ls = p_tmp->layer_specific & L2CAP_FCR_SAR_BITS;
if ( (ls == L2CAP_FCR_UNSEG_SDU) || (ls == L2CAP_FCR_END_SDU) ) {
num_lost, tx_seq, p_fcrb->next_seq_expected, p_fcrb->rej_sent);
p_buf->layer_specific = tx_seq;
- fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf);
+ fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
} else {
L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x frame dropped in Srej Sent next_srej:%u hold_q.count:%u win_sz:%u",
p_ccb->local_cid, next_srej, fixed_queue_length(p_fcrb->srej_rcv_hold_q), p_ccb->our_cfg.fcr.tx_win_sz);
p_ccb->local_cid, tx_seq, fixed_queue_length(p_fcrb->srej_rcv_hold_q));
}
p_buf->layer_specific = tx_seq;
- fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf);
+ fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_fcrb->srej_sent = TRUE;
l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_SREJ, 0);
}
/* Also flush our retransmission queue */
while (!fixed_queue_is_empty(p_ccb->fcrb.retrans_q)) {
- osi_free(fixed_queue_try_dequeue(p_ccb->fcrb.retrans_q));
+ osi_free(fixed_queue_dequeue(p_ccb->fcrb.retrans_q, 0));
}
if (list_ack != NULL) {
{
p_buf2->layer_specific = p_buf->layer_specific;
- fixed_queue_enqueue(p_ccb->fcrb.retrans_q, p_buf2);
+ fixed_queue_enqueue(p_ccb->fcrb.retrans_q, p_buf2, FIXED_QUEUE_MAX_TIMEOUT);
}
if ( (tx_seq != L2C_FCR_RETX_ALL_PKTS) || (p_buf2 == NULL) ) {
/* If there is anything in the retransmit queue, that goes first
*/
- p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->fcrb.retrans_q);
+ p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->fcrb.retrans_q, 0);
if (p_buf != NULL) {
/* Update Rx Seq and FCS if we acked some packets while this one was queued */
prepare_I_frame (p_ccb, p_buf, TRUE);
return (NULL);
}
} else { /* Use the original buffer if no segmentation, or the last segment */
- p_xmit = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
+ p_xmit = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (p_xmit->event != 0) {
last_seg = TRUE;
}
/* Pretend we sent it and it got lost */
- fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_xmit);
+ fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_xmit, FIXED_QUEUE_MAX_TIMEOUT);
return (NULL);
} else {
#if (L2CAP_ERTM_STATS == TRUE)
}
p_wack->layer_specific = p_xmit->layer_specific;
- fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_wack);
+ fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_wack, FIXED_QUEUE_MAX_TIMEOUT);
}
#if (L2CAP_ERTM_STATS == TRUE)
{
/* clean up any security pending UCD */
while (p_lcb->ucd_out_sec_pending_q.p_first) {
- osi_free(fixed_queue_try_dequeue(p_lcb->ucd_out_sec_pending_q));
+ osi_free(fixed_queue_dequeue(p_lcb->ucd_out_sec_pending_q, 0));
}
fixed_queue_free(p_lcb->ucd_out_sec_pending_q, NULL);
p_lcb->ucd_out_sec_pending_q = NULL;
while (! fixed_queue_is_empty(p_lcb->ucd_in_sec_pending_q)) {
- osi_free(fixed_queue_try_dequeue(p_lcb->ucd_in_sec_pending_q));
+ osi_free(fixed_queue_dequeue(p_lcb->ucd_in_sec_pending_q, 0));
}
fixed_queue_free(p_lcb->ucd_in_sec_pending_q);
p_lcb->ucd_in_sec_pending_q = NULL;
*******************************************************************************/
void l2c_ucd_enqueue_pending_out_sec_q(tL2C_CCB *p_ccb, void *p_data)
{
- fixed_queue_enqueue(p_ccb->p_lcb->ucd_out_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_out_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
l2cu_check_channel_congestion (p_ccb);
}
*******************************************************************************/
void l2c_ucd_send_pending_out_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q, 0);
if (p_buf != NULL) {
l2c_enqueue_peer_data (p_ccb, (BT_HDR *)p_buf);
*******************************************************************************/
void l2c_ucd_discard_pending_out_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q, 0);
/* we may need to report to application */
*******************************************************************************/
BOOLEAN l2c_ucd_check_pending_in_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q, 0);
if (p_buf != NULL) {
UINT16 psm;
*******************************************************************************/
void l2c_ucd_send_pending_in_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q)
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q, 0)
if (p_buf != NULL) {
p_ccb->p_rcb->ucd.cb_info.pL2CA_UCD_Data_Cb(p_ccb->p_lcb->remote_bd_addr, (BT_HDR *)p_buf);
*******************************************************************************/
void l2c_ucd_discard_pending_in_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q, 0);
if (p_buf) {
osi_free (p_buf);
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
break;
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
break;
case L2CEVT_L2CAP_INFO_RSP:
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
break;
case L2CEVT_SEC_RE_SEND_CMD: /* BTM has enough info to proceed */
/* stop idle timer of UCD */
btu_stop_timer (&p_ccb->timer_entry);
- fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
l2c_ucd_check_pending_in_sec_q (p_ccb);
break;
{
while (!fixed_queue_is_empty(p_lcb->le_sec_pending_q))
{
- tL2CAP_SEC_DATA *p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q);
+ tL2CAP_SEC_DATA *p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q, FIXED_QUEUE_MAX_TIMEOUT);
if (p_buf->p_callback) {
p_buf->p_callback(p_lcb->remote_bd_addr, p_lcb->transport, p_buf->p_ref_data, BTM_DEV_RESET);
}
layer checks that all buffers are sent before disconnecting.
*/
if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE) {
- while ((p_buf2 = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q)) != NULL) {
+ while ((p_buf2 = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0)) != NULL) {
l2cu_set_acl_hci_header (p_buf2, p_ccb);
l2c_link_check_send_pkts (p_ccb->p_lcb, p_ccb, p_buf2);
}
} else {
if (!fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
- p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
+ p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (NULL == p_buf) {
L2CAP_TRACE_ERROR("l2cu_get_buffer_to_send: No data to be sent");
return (NULL);
}
} else {
- p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
+ p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (NULL == p_buf) {
L2CAP_TRACE_ERROR("l2cu_get_buffer_to_send() #2: No data to be sent");
return (NULL);
count = fixed_queue_length(p_port->rx.queue);
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->rx.queue, 0)) != NULL) {
osi_free (p_buf);
}
if (purge_flags & PORT_PURGE_TXCLEAR) {
osi_mutex_global_lock(); /* to prevent tx.queue_size from being negative */
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->tx.queue, 0)) != NULL) {
osi_free (p_buf);
}
p_data += p_buf->len;
}
- osi_free(fixed_queue_try_dequeue(p_port->rx.queue));
+ osi_free(fixed_queue_dequeue(p_port->rx.queue, 0));
osi_mutex_global_unlock();
osi_mutex_global_lock();
- p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue);
+ p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->rx.queue, 0);
if (p_buf) {
p_port->rx.queue_size -= p_buf->len;
p_port->rfc.state,
p_port->port_ctrl);
- fixed_queue_enqueue(p_port->tx.queue, p_buf);
+ fixed_queue_enqueue(p_port->tx.queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_port->tx.queue_size += p_buf->len;
return (PORT_CMD_PENDING);
osi_mutex_global_lock();
- fixed_queue_enqueue(p_port->rx.queue, p_buf);
+ fixed_queue_enqueue(p_port->rx.queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_port->rx.queue_size += p_buf->len;
osi_mutex_global_unlock();
/* get data from tx queue and send it */
osi_mutex_global_lock();
- if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
+ if ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->tx.queue, 0)) != NULL) {
p_port->tx.queue_size -= p_buf->len;
osi_mutex_global_unlock();
osi_mutex_global_lock();
RFCOMM_TRACE_DEBUG("port_release_port, p_port:%p", p_port);
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->rx.queue, 0)) != NULL) {
osi_free (p_buf);
}
p_port->rx.queue_size = 0;
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->tx.queue, 0)) != NULL) {
osi_free (p_buf);
}
rfc_save_lcid_mcb (p_mcb, p_mcb->lcid);
/* clean up before reuse it */
- while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_mcb->cmd_q)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_mcb->cmd_q, 0)) != NULL) {
osi_free(p_buf);
}
__func__, p_mcb, p_mcb->lcid,
rfc_find_lcid_mcb(p_mcb->lcid));
}
- fixed_queue_enqueue(p_mcb->cmd_q, p_buf);
+ fixed_queue_enqueue(p_mcb->cmd_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
/* handle queue if L2CAP not congested */
while (p_mcb->l2cap_congested == FALSE) {
- if ((p = (BT_HDR *)fixed_queue_try_dequeue(p_mcb->cmd_q)) == NULL) {
+ if ((p = (BT_HDR *)fixed_queue_dequeue(p_mcb->cmd_q, 0)) == NULL) {
break;
}