#include "btc_main.h"
#include "bt.h"
#include "future.h"
+#include "allocator.h"
static bool bd_already_enable = false;
static bool bd_already_init = false;
#include "bt_trace.h"
#include <string.h>
-// #include <cutils/properties.h>
+#include "allocator.h"
#include "bta_av_int.h"
#include "avdt_api.h"
/* we got a stream; get its capabilities */
if (p_scb->p_cap == NULL) {
- p_scb->p_cap = (tAVDT_CFG *) GKI_getbuf(sizeof(tAVDT_CFG));
+ p_scb->p_cap = (tAVDT_CFG *) osi_malloc(sizeof(tAVDT_CFG));
}
if (p_scb->p_cap == NULL) {
i = p_scb->num_seps;
}
}
- if (p_scb && (p_msg = (tBTA_AV_STR_MSG *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_STR_MSG) + sec_len))) != NULL) {
+ if (p_scb && (p_msg = (tBTA_AV_STR_MSG *) osi_malloc((UINT16) (sizeof(tBTA_AV_STR_MSG) + sec_len))) != NULL) {
/* copy event data, bd addr, and handle to event message buffer */
p_msg->hdr.offset = 0;
}
}
if (index == BTA_AV_NUM_STRS) { /* cannot find correct handler */
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
return;
}
p_pkt->event = BTA_AV_MEDIA_DATA_EVT;
p_scb->seps[p_scb->sep_idx].p_app_data_cback(BTA_AV_MEDIA_DATA_EVT, (tBTA_AV_MEDIA *)p_pkt);
- GKI_freebuf(p_pkt); /* a copy of packet had been delivered, we free this buffer */
+ osi_free(p_pkt); /* a copy of packet had been delivered, we free this buffer */
}
/*******************************************************************************
tBTA_AV_SDP_RES *p_msg;
tBTA_AV_SCB *p_scb;
- if ((p_msg = (tBTA_AV_SDP_RES *) GKI_getbuf(sizeof(tBTA_AV_SDP_RES))) != NULL) {
+ if ((p_msg = (tBTA_AV_SDP_RES *) osi_malloc(sizeof(tBTA_AV_SDP_RES))) != NULL) {
p_msg->hdr.event = (found) ? BTA_AV_SDP_DISC_OK_EVT : BTA_AV_SDP_DISC_FAIL_EVT;
p_scb = bta_av_hndl_to_scb(bta_av_cb.handle);
/* allocate discovery database */
if (p_scb->p_disc_db == NULL) {
- p_scb->p_disc_db = (tSDP_DISCOVERY_DB *) GKI_getbuf(BTA_AV_DISC_BUF_SIZE);
+ p_scb->p_disc_db = (tSDP_DISCOVERY_DB *) osi_malloc(BTA_AV_DISC_BUF_SIZE);
}
/* only one A2D find service is active at a time */
while (!list_is_empty(p_scb->a2d_list)) {
p_buf = (BT_HDR *)list_front(p_scb->a2d_list);
list_remove(p_scb->a2d_list, p_buf);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
/* drop the audio buffers queued in L2CAP */
p_scb->num_recfg = 0;
/* store the new configuration in control block */
if (p_scb->p_cap == NULL) {
- p_scb->p_cap = (tAVDT_CFG *) GKI_getbuf(sizeof(tAVDT_CFG));
+ p_scb->p_cap = (tAVDT_CFG *) osi_malloc(sizeof(tAVDT_CFG));
}
if ((p_cfg = p_scb->p_cap) == NULL) {
/* report failure */
} else {
/* too many buffers in a2d_list, drop it. */
bta_av_co_audio_drop(p_scb->hndl);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
}
p_scb->coll_mask = 0;
bta_av_set_scb_sst_init (p_scb);
- if ((p_buf = (tBTA_AV_API_OPEN *) GKI_getbuf(sizeof(tBTA_AV_API_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_OPEN *) osi_malloc(sizeof(tBTA_AV_API_OPEN))) != NULL) {
memcpy(p_buf, &(p_scb->open_api), sizeof(tBTA_AV_API_OPEN));
bta_sys_sendmsg(p_buf);
}
#include "avdt_api.h"
#include "utl.h"
#include "l2c_api.h"
-// #include "osi/include/list.h"
+#include "allocator.h"
#include "list.h"
#if( defined BTA_AR_INCLUDED ) && (BTA_AR_INCLUDED == TRUE)
#include "bta_ar_api.h"
BT_HDR *p_msg;
UNUSED(status);
- if ((p_msg = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_msg->event = BTA_AV_SDP_AVRC_DISC_EVT;
bta_sys_sendmsg(p_msg);
}
}
if (msg_event) {
- if ((p_msg = (tBTA_AV_RC_CONN_CHG *) GKI_getbuf(sizeof(tBTA_AV_RC_CONN_CHG))) != NULL) {
+ if ((p_msg = (tBTA_AV_RC_CONN_CHG *) osi_malloc(sizeof(tBTA_AV_RC_CONN_CHG))) != NULL) {
p_msg->hdr.event = msg_event;
p_msg->handle = handle;
if (peer_addr) {
/* Create a copy of the message */
tBTA_AV_RC_MSG *p_buf =
- (tBTA_AV_RC_MSG *)GKI_getbuf((UINT16)(sizeof(tBTA_AV_RC_MSG) + data_len));
+ (tBTA_AV_RC_MSG *)osi_malloc((UINT16)(sizeof(tBTA_AV_RC_MSG) + data_len));
if (p_buf != NULL) {
p_buf->hdr.event = BTA_AV_AVRC_MSG_EVT;
p_buf->handle = handle;
}
if (do_free) {
- GKI_freebuf (p_data->api_meta_rsp.p_pkt);
+ osi_free (p_data->api_meta_rsp.p_pkt);
}
}
{
UNUSED(p_cb);
- GKI_freebuf (p_data->api_meta_rsp.p_pkt);
+ osi_free (p_data->api_meta_rsp.p_pkt);
}
/*******************************************************************************
p_scb->coll_mask &= ~BTA_AV_COLL_API_CALLED;
/* BTA_AV_API_OPEN_EVT */
- if ((p_buf = (tBTA_AV_API_OPEN *) GKI_getbuf(sizeof(tBTA_AV_API_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_OPEN *) osi_malloc(sizeof(tBTA_AV_API_OPEN))) != NULL) {
memcpy(p_buf, &(p_scb->open_api), sizeof(tBTA_AV_API_OPEN));
bta_sys_sendmsg(p_buf);
}
if (p_addr) {
/* allocate discovery database */
if (p_cb->p_disc_db == NULL) {
- p_cb->p_disc_db = (tSDP_DISCOVERY_DB *) GKI_getbuf(BTA_AV_DISC_BUF_SIZE);
+ p_cb->p_disc_db = (tSDP_DISCOVERY_DB *) osi_malloc(BTA_AV_DISC_BUF_SIZE);
}
if (p_cb->p_disc_db) {
while (!list_is_empty(p_scb->a2d_list)) {
p_buf = (BT_HDR *)list_front(p_scb->a2d_list);
list_remove(p_scb->a2d_list, p_buf);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
#include "bt_target.h"
#if defined(BTA_AV_INCLUDED) && (BTA_AV_INCLUDED == TRUE)
+#include "allocator.h"
#include "bta_api.h"
#include "bta_sys.h"
#include "bta_av_api.h"
#include "bta_av_int.h"
-#include "gki.h"
#include <string.h>
/*****************************************************************************
/* register with BTA system manager */
bta_sys_register(BTA_ID_AV, &bta_av_reg);
- if ((p_buf = (tBTA_AV_API_ENABLE *) GKI_getbuf(sizeof(tBTA_AV_API_ENABLE))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_ENABLE *) osi_malloc(sizeof(tBTA_AV_API_ENABLE))) != NULL) {
p_buf->hdr.event = BTA_AV_API_ENABLE_EVT;
p_buf->p_cback = p_cback;
p_buf->features = features;
BT_HDR *p_buf;
bta_sys_deregister(BTA_ID_AV);
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_AV_API_DISABLE_EVT;
bta_sys_sendmsg(p_buf);
}
tBTA_AV_API_REG *p_buf;
- if ((p_buf = (tBTA_AV_API_REG *) GKI_getbuf(sizeof(tBTA_AV_API_REG))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_REG *) osi_malloc(sizeof(tBTA_AV_API_REG))) != NULL) {
p_buf->hdr.layer_specific = chnl;
p_buf->hdr.event = BTA_AV_API_REGISTER_EVT;
if (p_service_name) {
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->layer_specific = hndl;
p_buf->event = BTA_AV_API_DEREGISTER_EVT;
bta_sys_sendmsg(p_buf);
{
tBTA_AV_API_OPEN *p_buf;
- if ((p_buf = (tBTA_AV_API_OPEN *) GKI_getbuf(sizeof(tBTA_AV_API_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_OPEN *) osi_malloc(sizeof(tBTA_AV_API_OPEN))) != NULL) {
p_buf->hdr.event = BTA_AV_API_OPEN_EVT;
p_buf->hdr.layer_specific = handle;
bdcpy(p_buf->bd_addr, bd_addr);
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_AV_API_CLOSE_EVT;
p_buf->layer_specific = handle;
bta_sys_sendmsg(p_buf);
{
tBTA_AV_API_DISCNT *p_buf;
- if ((p_buf = (tBTA_AV_API_DISCNT *) GKI_getbuf(sizeof(tBTA_AV_API_DISCNT))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_DISCNT *) osi_malloc(sizeof(tBTA_AV_API_DISCNT))) != NULL) {
p_buf->hdr.event = BTA_AV_API_DISCONNECT_EVT;
bdcpy(p_buf->bd_addr, bd_addr);
bta_sys_sendmsg(p_buf);
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_AV_API_START_EVT;
bta_sys_sendmsg(p_buf);
}
{
#if (BTA_AV_SINK_INCLUDED == TRUE)
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_AV_API_SINK_ENABLE_EVT;
p_buf->layer_specific = enable;
bta_sys_sendmsg(p_buf);
{
tBTA_AV_API_STOP *p_buf;
- if ((p_buf = (tBTA_AV_API_STOP *) GKI_getbuf(sizeof(tBTA_AV_API_STOP))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_STOP *) osi_malloc(sizeof(tBTA_AV_API_STOP))) != NULL) {
p_buf->hdr.event = BTA_AV_API_STOP_EVT;
p_buf->flush = TRUE;
p_buf->suspend = suspend;
{
tBTA_AV_API_RCFG *p_buf;
- if ((p_buf = (tBTA_AV_API_RCFG *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_API_RCFG) + num_protect))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_RCFG *) osi_malloc((UINT16) (sizeof(tBTA_AV_API_RCFG) + num_protect))) != NULL) {
p_buf->hdr.layer_specific = hndl;
p_buf->hdr.event = BTA_AV_API_RECONFIG_EVT;
p_buf->num_protect = num_protect;
{
tBTA_AV_API_PROTECT_REQ *p_buf;
- if ((p_buf = (tBTA_AV_API_PROTECT_REQ *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_API_PROTECT_REQ) + len))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_PROTECT_REQ *) osi_malloc((UINT16) (sizeof(tBTA_AV_API_PROTECT_REQ) + len))) != NULL) {
p_buf->hdr.layer_specific = hndl;
p_buf->hdr.event = BTA_AV_API_PROTECT_REQ_EVT;
p_buf->len = len;
{
tBTA_AV_API_PROTECT_RSP *p_buf;
- if ((p_buf = (tBTA_AV_API_PROTECT_RSP *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_API_PROTECT_RSP) + len))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_PROTECT_RSP *) osi_malloc((UINT16) (sizeof(tBTA_AV_API_PROTECT_RSP) + len))) != NULL) {
p_buf->hdr.layer_specific = hndl;
p_buf->hdr.event = BTA_AV_API_PROTECT_RSP_EVT;
p_buf->len = len;
{
tBTA_AV_API_REMOTE_CMD *p_buf;
- if ((p_buf = (tBTA_AV_API_REMOTE_CMD *) GKI_getbuf(sizeof(tBTA_AV_API_REMOTE_CMD))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_REMOTE_CMD *) osi_malloc(sizeof(tBTA_AV_API_REMOTE_CMD))) != NULL) {
p_buf->hdr.event = BTA_AV_API_REMOTE_CMD_EVT;
p_buf->hdr.layer_specific = rc_handle;
p_buf->msg.op_id = rc_id;
{
tBTA_AV_API_VENDOR *p_buf;
- if ((p_buf = (tBTA_AV_API_VENDOR *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_API_VENDOR) + len))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_VENDOR *) osi_malloc((UINT16) (sizeof(tBTA_AV_API_VENDOR) + len))) != NULL) {
p_buf->hdr.event = BTA_AV_API_VENDOR_CMD_EVT;
p_buf->hdr.layer_specific = rc_handle;
p_buf->msg.hdr.ctype = cmd_code;
{
tBTA_AV_API_VENDOR *p_buf;
- if ((p_buf = (tBTA_AV_API_VENDOR *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_API_VENDOR) + len))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_VENDOR *) osi_malloc((UINT16) (sizeof(tBTA_AV_API_VENDOR) + len))) != NULL) {
p_buf->hdr.event = BTA_AV_API_VENDOR_RSP_EVT;
p_buf->hdr.layer_specific = rc_handle;
p_buf->msg.hdr.ctype = rsp_code;
{
tBTA_AV_API_OPEN_RC *p_buf;
- if ((p_buf = (tBTA_AV_API_OPEN_RC *) GKI_getbuf(sizeof(tBTA_AV_API_OPEN_RC))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_OPEN_RC *) osi_malloc(sizeof(tBTA_AV_API_OPEN_RC))) != NULL) {
p_buf->hdr.event = BTA_AV_API_RC_OPEN_EVT;
p_buf->hdr.layer_specific = handle;
bta_sys_sendmsg(p_buf);
{
tBTA_AV_API_CLOSE_RC *p_buf;
- if ((p_buf = (tBTA_AV_API_CLOSE_RC *) GKI_getbuf(sizeof(tBTA_AV_API_CLOSE_RC))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_CLOSE_RC *) osi_malloc(sizeof(tBTA_AV_API_CLOSE_RC))) != NULL) {
p_buf->hdr.event = BTA_AV_API_RC_CLOSE_EVT;
p_buf->hdr.layer_specific = rc_handle;
bta_sys_sendmsg(p_buf);
{
tBTA_AV_API_META_RSP *p_buf;
- if ((p_buf = (tBTA_AV_API_META_RSP *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_API_META_RSP)))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_META_RSP *) osi_malloc((UINT16) (sizeof(tBTA_AV_API_META_RSP)))) != NULL) {
p_buf->hdr.event = BTA_AV_API_META_RSP_EVT;
p_buf->hdr.layer_specific = rc_handle;
p_buf->rsp_code = rsp_code;
bta_sys_sendmsg(p_buf);
} else if (p_pkt) {
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
}
}
{
tBTA_AV_API_META_RSP *p_buf;
- if ((p_buf = (tBTA_AV_API_META_RSP *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_API_META_RSP)))) != NULL) {
+ if ((p_buf = (tBTA_AV_API_META_RSP *) osi_malloc((UINT16) (sizeof(tBTA_AV_API_META_RSP)))) != NULL) {
p_buf->hdr.event = BTA_AV_API_META_RSP_EVT;
p_buf->hdr.layer_specific = rc_handle;
p_buf->p_pkt = p_pkt;
#include <stddef.h>
#include "bt_target.h"
-#include "gki.h"
#include "bta_api.h"
#include "bta_av_int.h"
#include "bta_sys.h"
#include "bta_av_int.h"
#include "bta_av_ci.h"
+#include "allocator.h"
#include <string.h>
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->layer_specific = chnl;
p_buf->event = BTA_AV_CI_SRC_DATA_READY_EVT;
bta_sys_sendmsg(p_buf);
{
tBTA_AV_CI_SETCONFIG *p_buf;
- if ((p_buf = (tBTA_AV_CI_SETCONFIG *) GKI_getbuf(sizeof(tBTA_AV_CI_SETCONFIG))) != NULL) {
+ if ((p_buf = (tBTA_AV_CI_SETCONFIG *) osi_malloc(sizeof(tBTA_AV_CI_SETCONFIG))) != NULL) {
p_buf->hdr.layer_specific = hndl;
p_buf->hdr.event = (err_code == AVDT_SUCCESS) ?
BTA_AV_CI_SETCONFIG_OK_EVT : BTA_AV_CI_SETCONFIG_FAIL_EVT;
#include <string.h>
#include "bt_target.h"
-// #include "osi/include/log.h"
#include "bt_trace.h"
+#include "allocator.h"
#if defined(BTA_AV_INCLUDED) && (BTA_AV_INCLUDED == TRUE)
#include "bta_av_int.h"
}
}
- if (p_scb && (p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if (p_scb && (p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
/* send the event through the audio state machine.
* only when the audio SM is open, the main SM opens the RC connection as INT */
p_buf->event = p->event;
for (xx = 0; xx < BTA_AV_NUM_STRS; xx++) {
if (bta_av_cb.p_scb[xx] == NULL) {
/* found an empty spot */
- p_ret = (tBTA_AV_SCB *)GKI_getbuf(sizeof(tBTA_AV_SCB));
+ p_ret = (tBTA_AV_SCB *)osi_malloc(sizeof(tBTA_AV_SCB));
if (p_ret) {
memset(p_ret, 0, sizeof(tBTA_AV_SCB));
p_ret->rc_handle = BTA_AV_RC_HANDLE_NONE;
assert(p_scb != NULL);
list_free(p_scb->a2d_list);
- GKI_freebuf(p_scb);
+ osi_free(p_scb);
}
/*******************************************************************************
//(AVDT_CONNECT_IND_EVT == event && AVDT_ACP == p_data->hdr.err_param))
(AVDT_CONNECT_IND_EVT == event))&& */
- (p_msg = (tBTA_AV_STR_MSG *) GKI_getbuf((UINT16) (sizeof(tBTA_AV_STR_MSG)))) != NULL) {
+ (p_msg = (tBTA_AV_STR_MSG *) osi_malloc((UINT16) (sizeof(tBTA_AV_STR_MSG)))) != NULL) {
p_msg->hdr.event = evt;
p_msg->hdr.layer_specific = event;
p_msg->hdr.offset = p_data->hdr.err_param;
/* note that more than one SCB (a2dp & vdp) maybe waiting for this event */
p_scb = bta_av_cb.p_scb[i];
if (p_scb && (bdcmp (peer_addr, p_scb->peer_addr) == 0) &&
- (p_buf = (tBTA_AV_ROLE_RES *) GKI_getbuf(sizeof(tBTA_AV_ROLE_RES))) != NULL) {
+ (p_buf = (tBTA_AV_ROLE_RES *) osi_malloc(sizeof(tBTA_AV_ROLE_RES))) != NULL) {
APPL_TRACE_DEBUG("new_role:%d, hci_status:x%x hndl: x%x\n", id, app_id, p_scb->hndl);
/*
if ((id != BTM_ROLE_MASTER) && (app_id != HCI_SUCCESS))
{
tBTA_AV_SCB *p_scbi;
int i;
- UINT16 size, copy_size;
+ UINT16 copy_size;
BT_HDR *p_new;
if (!p_buf) {
}
if (bta_av_cb.audio_open_cnt >= 2) {
- size = GKI_get_buf_size(p_buf);
copy_size = BT_HDR_SIZE + p_buf->len + p_buf->offset;
/* more than one audio channel is connected */
for (i = 0; i < BTA_AV_NUM_STRS; i++) {
(bta_av_cb.conn_audio & BTA_AV_HNDL_TO_MSK(i)) && /* connected audio */
p_scbi && p_scbi->co_started ) { /* scb is used and started */
/* enqueue the data only when the stream is started */
- p_new = (BT_HDR *)GKI_getbuf(size);
+ p_new = (BT_HDR *)osi_malloc(copy_size);
if (p_new) {
memcpy(p_new, p_buf, copy_size);
list_append(p_scbi->a2d_list, p_new);
bta_av_co_audio_drop(p_scbi->hndl);
BT_HDR *p_buf = list_front(p_scbi->a2d_list);
list_remove(p_scbi->a2d_list, p_buf);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
}
#include "bt_target.h"
#include "bt_types.h"
-#include "gki.h"
#include "bta_sys.h"
#include "bta_api.h"
#include "bta_dm_int.h"
bta_dm_cb.is_bta_dm_active = TRUE;
/* send a message to BTA SYS */
- if ((sys_enable_event = (tBTA_SYS_HW_MSG *) GKI_getbuf(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
+ if ((sys_enable_event = (tBTA_SYS_HW_MSG *) osi_malloc(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
sys_enable_event->hdr.event = BTA_SYS_API_ENABLE_EVT;
sys_enable_event->hw_module = BTA_SYS_HW_BLUETOOTH;
if ((bta_dm_search_cb.num_uuid = p_data->search.num_uuid) != 0 &&
p_data->search.p_uuid != NULL) {
- if ((bta_dm_search_cb.p_srvc_uuid = (tBT_UUID *)GKI_getbuf(len)) == NULL) {
+ if ((bta_dm_search_cb.p_srvc_uuid = (tBT_UUID *)osi_malloc(len)) == NULL) {
APPL_TRACE_ERROR("%s no resources", __func__);
result.status = BTA_FAILURE;
if (BTM_IsInquiryActive()) {
if (BTM_CancelInquiry() != BTM_CMD_STARTED) {
bta_dm_search_cancel_notify(NULL);
- p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG));
+ p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG));
if (p_msg != NULL) {
p_msg->hdr.event = BTA_DM_SEARCH_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DISCOVER_EVT;
else if (!bta_dm_search_cb.name_discover_done) {
BTM_CancelRemoteDeviceName();
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_SEARCH_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DISCOVER_EVT;
bta_sys_sendmsg(p_msg);
}
} else {
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_INQUIRY_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DISCOVER_EVT;
bta_sys_sendmsg(p_msg);
utl_freebuf((void **)&bta_dm_search_cb.p_srvc_uuid);
if ((bta_dm_search_cb.num_uuid = p_data->discover.num_uuid) != 0 &&
p_data->discover.p_uuid != NULL) {
- if ((bta_dm_search_cb.p_srvc_uuid = (tBT_UUID *)GKI_getbuf(len)) == NULL) {
+ if ((bta_dm_search_cb.p_srvc_uuid = (tBT_UUID *)osi_malloc(len)) == NULL) {
p_data->discover.p_cback(BTA_DM_DISC_CMPL_EVT, NULL);
return;
}
{
tBTA_DM_MSG *p_msg;
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_SEARCH_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DI_DISCOVER_EVT;
p_msg->hdr.offset = result;
bdcpy(bta_dm_search_cb.peer_bdaddr, p_data->di_disc.bd_addr);
bta_dm_di_cb.p_di_db = p_data->di_disc.p_sdp_db;
- if ((bta_dm_search_cb.p_sdp_db = (tSDP_DISCOVERY_DB *)GKI_getbuf(BTA_DM_SDP_DB_SIZE)) != NULL) {
+ if ((bta_dm_search_cb.p_sdp_db = (tSDP_DISCOVERY_DB *)osi_malloc(BTA_DM_SDP_DB_SIZE)) != NULL) {
if ( SDP_DiDiscover(bta_dm_search_cb.peer_bdaddr, p_data->di_disc.p_sdp_db,
p_data->di_disc.len, bta_dm_di_disc_callback) == SDP_SUCCESS) {
result = BTA_SUCCESS;
}
if ( result == BTA_FAILURE &&
- (p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ (p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_SEARCH_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DI_DISCOVER_EVT;
p_data->hdr.offset = result;
/* no devices, search complete */
bta_dm_search_cb.services = 0;
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_SEARCH_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DISCOVER_EVT;
bta_sys_sendmsg(p_msg);
} while (bta_dm_search_cb.service_index <= BTA_MAX_SERVICE_ID);
-// GKI_freebuf(bta_dm_search_cb.p_sdp_db);
+// osi_free(bta_dm_search_cb.p_sdp_db);
// bta_dm_search_cb.p_sdp_db = NULL;
APPL_TRACE_DEBUG("%s services_found = %04x", __FUNCTION__,
bta_dm_search_cb.services_found);
BTM_SecDeleteRmtNameNotifyCallback(&bta_dm_service_search_remname_cback);
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_DISCOVERY_RESULT_EVT;
p_msg->disc_result.result.disc_res.result = BTA_SUCCESS;
p_msg->disc_result.result.disc_res.p_raw_data = NULL;
p_msg->disc_result.result.disc_res.num_uuids = num_uuids;
p_msg->disc_result.result.disc_res.p_uuid_list = NULL;
if (num_uuids > 0) {
- p_msg->disc_result.result.disc_res.p_uuid_list = (UINT8 *)GKI_getbuf(num_uuids * MAX_UUID_SIZE);
+ p_msg->disc_result.result.disc_res.p_uuid_list = (UINT8 *)osi_malloc(num_uuids * MAX_UUID_SIZE);
if (p_msg->disc_result.result.disc_res.p_uuid_list) {
memcpy(p_msg->disc_result.result.disc_res.p_uuid_list, uuid_list,
num_uuids * MAX_UUID_SIZE);
bta_dm_search_cb.p_sdp_db->raw_used,
bta_dm_search_cb.p_sdp_db->raw_data);
- p_msg->disc_result.result.disc_res.p_raw_data = GKI_getbuf(bta_dm_search_cb.p_sdp_db->raw_used);
+ p_msg->disc_result.result.disc_res.p_raw_data = osi_malloc(bta_dm_search_cb.p_sdp_db->raw_used);
if ( NULL != p_msg->disc_result.result.disc_res.p_raw_data ) {
memcpy( p_msg->disc_result.result.disc_res.p_raw_data,
bta_dm_search_cb.p_sdp_db->raw_data,
bta_dm_search_cb.p_sdp_db->raw_used;
} else {
- APPL_TRACE_DEBUG("%s GKI Alloc failed to allocate %d bytes !!", __func__,
+ APPL_TRACE_DEBUG("%s Alloc failed to allocate %d bytes !!", __func__,
bta_dm_search_cb.p_sdp_db->raw_used);
}
}
/* not able to connect go to next device */
- GKI_freebuf(bta_dm_search_cb.p_sdp_db);
+ osi_free(bta_dm_search_cb.p_sdp_db);
bta_dm_search_cb.p_sdp_db = NULL;
BTM_SecDeleteRmtNameNotifyCallback(&bta_dm_service_search_remname_cback);
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_DISCOVERY_RESULT_EVT;
p_msg->disc_result.result.disc_res.result = BTA_FAILURE;
p_msg->disc_result.result.disc_res.services = bta_dm_search_cb.services_found;
#endif
bta_dm_search_cb.p_search_cback(BTA_DM_DISC_RES_EVT, &p_data->disc_result.result);
- tBTA_DM_MSG *p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG));
+ tBTA_DM_MSG *p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG));
/* send a message to change state */
if (p_msg != NULL) {
{
UNUSED(p_data);
if (bta_dm_search_cb.p_sdp_db) {
- GKI_freebuf(bta_dm_search_cb.p_sdp_db);
+ osi_free(bta_dm_search_cb.p_sdp_db);
bta_dm_search_cb.p_sdp_db = NULL;
}
}
void bta_dm_queue_search (tBTA_DM_MSG *p_data)
{
if (bta_dm_search_cb.p_search_queue) {
- GKI_freebuf(bta_dm_search_cb.p_search_queue);
+ osi_free(bta_dm_search_cb.p_search_queue);
}
- bta_dm_search_cb.p_search_queue = (tBTA_DM_MSG *)GKI_getbuf(sizeof(tBTA_DM_API_SEARCH));
+ bta_dm_search_cb.p_search_queue = (tBTA_DM_MSG *)osi_malloc(sizeof(tBTA_DM_API_SEARCH));
memcpy(bta_dm_search_cb.p_search_queue, p_data, sizeof(tBTA_DM_API_SEARCH));
}
void bta_dm_queue_disc (tBTA_DM_MSG *p_data)
{
if (bta_dm_search_cb.p_search_queue) {
- GKI_freebuf(bta_dm_search_cb.p_search_queue);
+ osi_free(bta_dm_search_cb.p_search_queue);
}
- bta_dm_search_cb.p_search_queue = (tBTA_DM_MSG *)GKI_getbuf(sizeof(tBTA_DM_API_DISCOVER));
+ bta_dm_search_cb.p_search_queue = (tBTA_DM_MSG *)osi_malloc(sizeof(tBTA_DM_API_DISCOVER));
memcpy(bta_dm_search_cb.p_search_queue, p_data, sizeof(tBTA_DM_API_DISCOVER));
}
#endif ///SDP_INCLUDED == TRUE
{
UNUSED(p_data);
if (bta_dm_search_cb.p_search_queue) {
- GKI_freebuf(bta_dm_search_cb.p_search_queue);
+ osi_free(bta_dm_search_cb.p_search_queue);
bta_dm_search_cb.p_search_queue = NULL;
}
}
{
UNUSED(p_data);
if (bta_dm_search_cb.p_sdp_db) {
- GKI_freebuf(bta_dm_search_cb.p_sdp_db);
+ osi_free(bta_dm_search_cb.p_sdp_db);
bta_dm_search_cb.p_sdp_db = NULL;
}
while (bta_dm_search_cb.service_index < BTA_MAX_SERVICE_ID) {
if ( bta_dm_search_cb.services_to_search
& (tBTA_SERVICE_MASK)(BTA_SERVICE_ID_TO_SERVICE_MASK(bta_dm_search_cb.service_index))) {
- if ((bta_dm_search_cb.p_sdp_db = (tSDP_DISCOVERY_DB *)GKI_getbuf(BTA_DM_SDP_DB_SIZE)) != NULL) {
+ if ((bta_dm_search_cb.p_sdp_db = (tSDP_DISCOVERY_DB *)osi_malloc(BTA_DM_SDP_DB_SIZE)) != NULL) {
APPL_TRACE_DEBUG("bta_dm_search_cb.services = %04x***********", bta_dm_search_cb.services);
/* try to search all services by search based on L2CAP UUID */
if (bta_dm_search_cb.services == BTA_ALL_SERVICE_MASK ) {
if (!SDP_ServiceSearchAttributeRequest (bd_addr, bta_dm_search_cb.p_sdp_db, &bta_dm_sdp_callback)) {
/* if discovery not successful with this device
proceed to next one */
- GKI_freebuf(bta_dm_search_cb.p_sdp_db);
+ osi_free(bta_dm_search_cb.p_sdp_db);
bta_dm_search_cb.p_sdp_db = NULL;
bta_dm_search_cb.service_index = BTA_MAX_SERVICE_ID;
/* no more services to be discovered */
if (bta_dm_search_cb.service_index >= BTA_MAX_SERVICE_ID) {
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_DISCOVERY_RESULT_EVT;
p_msg->disc_result.result.disc_res.services = bta_dm_search_cb.services_found;
bdcpy (p_msg->disc_result.result.disc_res.bd_addr, bta_dm_search_cb.peer_bdaddr);
/* no devices, search complete */
bta_dm_search_cb.services = 0;
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_SEARCH_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DISCOVER_EVT;
bta_sys_sendmsg(p_msg);
}
/* name discovery and service discovery are done for this device */
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_DISCOVERY_RESULT_EVT;
/* initialize the data structure - includes p_raw_data and raw_data_size */
memset(&(p_msg->disc_result.result), 0, sizeof(tBTA_DM_DISC_RES));
tBTA_DM_SDP_RESULT *p_msg;
- if ((p_msg = (tBTA_DM_SDP_RESULT *) GKI_getbuf(sizeof(tBTA_DM_SDP_RESULT))) != NULL) {
+ if ((p_msg = (tBTA_DM_SDP_RESULT *) osi_malloc(sizeof(tBTA_DM_SDP_RESULT))) != NULL) {
p_msg->hdr.event = BTA_DM_SDP_RESULT_EVT;
p_msg->sdp_result = sdp_status;
bta_sys_sendmsg(p_msg);
if (bta_dm_search_cb.cancel_pending == FALSE) {
APPL_TRACE_DEBUG("%s", __FUNCTION__);
- p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG));
+ p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG));
if (p_msg != NULL) {
p_msg->inq_cmpl.hdr.event = BTA_DM_INQUIRY_CMPL_EVT;
p_msg->inq_cmpl.num = ((tBTM_INQUIRY_CMPL *)p_result)->num_resp;
bta_dm_search_cb.cancel_pending = FALSE;
bta_dm_search_cancel_notify(NULL);
- p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG));
+ p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG));
if (p_msg != NULL) {
p_msg->hdr.event = BTA_DM_SEARCH_CMPL_EVT;
p_msg->hdr.layer_specific = BTA_DM_API_DISCOVER_EVT;
}
#endif
- if ((p_msg = (tBTA_DM_REM_NAME *) GKI_getbuf(sizeof(tBTA_DM_REM_NAME))) != NULL) {
+ if ((p_msg = (tBTA_DM_REM_NAME *) osi_malloc(sizeof(tBTA_DM_REM_NAME))) != NULL) {
bdcpy (p_msg->result.disc_res.bd_addr, bta_dm_search_cb.peer_bdaddr);
BCM_STRNCPY_S((char *)p_msg->result.disc_res.bd_name, sizeof(BD_NAME), (char *)p_remote_name->remote_bd_name, (BD_NAME_LEN));
{
tBTA_DM_ACL_CHANGE *p_msg;
- if ((p_msg = (tBTA_DM_ACL_CHANGE *) GKI_getbuf(sizeof(tBTA_DM_ACL_CHANGE))) != NULL) {
+ if ((p_msg = (tBTA_DM_ACL_CHANGE *) osi_malloc(sizeof(tBTA_DM_ACL_CHANGE))) != NULL) {
p_msg->event = p_data->event;
p_msg->is_new = FALSE;
bta_sys_hw_register( BTA_SYS_HW_BLUETOOTH, bta_dm_sys_hw_cback );
/* send a message to BTA SYS */
- if ((sys_enable_event = (tBTA_SYS_HW_MSG *) GKI_getbuf(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
+ if ((sys_enable_event = (tBTA_SYS_HW_MSG *) osi_malloc(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
sys_enable_event->hdr.event = BTA_SYS_API_DISABLE_EVT;
sys_enable_event->hw_module = BTA_SYS_HW_BLUETOOTH;
bta_sys_sendmsg(sys_enable_event);
#endif // BTA_EIR_CANNED_UUID_LIST
/* Allocate a buffer to hold HCI command */
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf(BTM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(BTM_CMD_BUF_SIZE)) == NULL) {
APPL_TRACE_ERROR("bta_dm_set_eir couldn't allocate buffer");
return;
}
bta_dm_search_cb.uuid_to_search = 0;
/* no more services to be discovered */
- if ((p_msg = (tBTA_DM_MSG *) GKI_getbuf(sizeof(tBTA_DM_MSG))) != NULL) {
+ if ((p_msg = (tBTA_DM_MSG *) osi_malloc(sizeof(tBTA_DM_MSG))) != NULL) {
p_msg->hdr.event = BTA_DM_DISCOVERY_RESULT_EVT;
p_msg->disc_result.result.disc_res.result = (status == BTA_GATT_OK) ? BTA_SUCCESS : BTA_FAILURE;
APPL_TRACE_DEBUG("%s service found: 0x%08x", __FUNCTION__,
p_msg->disc_result.result.disc_res.device_type |= BT_DEVICE_TYPE_BLE;
if ( bta_dm_search_cb.ble_raw_used > 0 ) {
- p_msg->disc_result.result.disc_res.p_raw_data = GKI_getbuf(bta_dm_search_cb.ble_raw_used);
+ p_msg->disc_result.result.disc_res.p_raw_data = osi_malloc(bta_dm_search_cb.ble_raw_used);
memcpy( p_msg->disc_result.result.disc_res.p_raw_data,
bta_dm_search_cb.p_ble_rawdata,
*
******************************************************************************/
-#include "gki.h"
#include "bta_sys.h"
#include "bta_api.h"
#include "bta_dm_int.h"
#include "btm_int.h"
#include <string.h>
#include "utl.h"
+#include "allocator.h"
/*****************************************************************************
** Constants
/* if UUID list is not provided as static data */
bta_sys_eir_register(bta_dm_eir_update_uuid);
- if ((p_msg = (tBTA_DM_API_ENABLE *) GKI_getbuf(sizeof(tBTA_DM_API_ENABLE))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_ENABLE *) osi_malloc(sizeof(tBTA_DM_API_ENABLE))) != NULL) {
p_msg->hdr.event = BTA_DM_API_ENABLE_EVT;
p_msg->p_sec_cback = p_cback;
bta_sys_sendmsg(p_msg);
BT_HDR *p_msg;
- if ((p_msg = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_msg->event = BTA_DM_API_DISABLE_EVT;
bta_sys_sendmsg(p_msg);
} else {
APPL_TRACE_API("BTA_EnableTestMode");
- if ((p_msg = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_msg->event = BTA_DM_API_ENABLE_TEST_MODE_EVT;
bta_sys_sendmsg(p_msg);
return BTA_SUCCESS;
APPL_TRACE_API("BTA_DisableTestMode");
- if ((p_msg = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_msg->event = BTA_DM_API_DISABLE_TEST_MODE_EVT;
bta_sys_sendmsg(p_msg);
}
tBTA_DM_API_SET_NAME *p_msg;
- if ((p_msg = (tBTA_DM_API_SET_NAME *) GKI_getbuf(sizeof(tBTA_DM_API_SET_NAME))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_SET_NAME *) osi_malloc(sizeof(tBTA_DM_API_SET_NAME))) != NULL) {
p_msg->hdr.event = BTA_DM_API_SET_NAME_EVT;
/* truncate the name if needed */
BCM_STRNCPY_S((char *)p_msg->name, sizeof(p_msg->name), p_name, BD_NAME_LEN - 1);
tBTA_DM_API_SET_VISIBILITY *p_msg;
- if ((p_msg = (tBTA_DM_API_SET_VISIBILITY *) GKI_getbuf(sizeof(tBTA_DM_API_SET_VISIBILITY))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_SET_VISIBILITY *) osi_malloc(sizeof(tBTA_DM_API_SET_VISIBILITY))) != NULL) {
p_msg->hdr.event = BTA_DM_API_SET_VISIBILITY_EVT;
p_msg->disc_mode = disc_mode;
p_msg->conn_mode = conn_mode;
tBTA_DM_API_SEARCH *p_msg;
- if ((p_msg = (tBTA_DM_API_SEARCH *) GKI_getbuf(sizeof(tBTA_DM_API_SEARCH))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_SEARCH *) osi_malloc(sizeof(tBTA_DM_API_SEARCH))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_SEARCH));
p_msg->hdr.event = BTA_DM_API_SEARCH_EVT;
{
BT_HDR *p_msg;
- if ((p_msg = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_msg->event = BTA_DM_API_SEARCH_CANCEL_EVT;
bta_sys_sendmsg(p_msg);
}
{
tBTA_DM_API_DISCOVER *p_msg;
- if ((p_msg = (tBTA_DM_API_DISCOVER *) GKI_getbuf(sizeof(tBTA_DM_API_DISCOVER))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_DISCOVER *) osi_malloc(sizeof(tBTA_DM_API_DISCOVER))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_DISCOVER));
p_msg->hdr.event = BTA_DM_API_DISCOVER_EVT;
{
tBTA_DM_API_DISCOVER *p_msg;
- if ((p_msg = (tBTA_DM_API_DISCOVER *) GKI_getbuf(sizeof(tBTA_DM_API_DISCOVER))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_DISCOVER *) osi_malloc(sizeof(tBTA_DM_API_DISCOVER))) != NULL) {
p_msg->hdr.event = BTA_DM_API_DISCOVER_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
p_msg->services = BTA_USER_SERVICE_MASK; //Not exposed at API level
{
tBTA_DM_API_BOND *p_msg;
- p_msg = (tBTA_DM_API_BOND *) GKI_getbuf(sizeof(tBTA_DM_API_BOND));
+ p_msg = (tBTA_DM_API_BOND *) osi_malloc(sizeof(tBTA_DM_API_BOND));
if (p_msg != NULL) {
p_msg->hdr.event = BTA_DM_API_BOND_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
{
tBTA_DM_API_BOND *p_msg;
- if ((p_msg = (tBTA_DM_API_BOND *) GKI_getbuf(sizeof(tBTA_DM_API_BOND))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BOND *) osi_malloc(sizeof(tBTA_DM_API_BOND))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BOND_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
p_msg->transport = transport;
{
tBTA_DM_API_BOND_CANCEL *p_msg;
- if ((p_msg = (tBTA_DM_API_BOND_CANCEL *) GKI_getbuf(sizeof(tBTA_DM_API_BOND_CANCEL))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BOND_CANCEL *) osi_malloc(sizeof(tBTA_DM_API_BOND_CANCEL))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BOND_CANCEL_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
bta_sys_sendmsg(p_msg);
{
tBTA_DM_API_PIN_REPLY *p_msg;
- if ((p_msg = (tBTA_DM_API_PIN_REPLY *) GKI_getbuf(sizeof(tBTA_DM_API_PIN_REPLY))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_PIN_REPLY *) osi_malloc(sizeof(tBTA_DM_API_PIN_REPLY))) != NULL) {
p_msg->hdr.event = BTA_DM_API_PIN_REPLY_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
p_msg->accept = accept;
{
tBTA_DM_API_LOC_OOB *p_msg;
- if ((p_msg = (tBTA_DM_API_LOC_OOB *) GKI_getbuf(sizeof(tBTA_DM_API_LOC_OOB))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_LOC_OOB *) osi_malloc(sizeof(tBTA_DM_API_LOC_OOB))) != NULL) {
p_msg->hdr.event = BTA_DM_API_LOC_OOB_EVT;
bta_sys_sendmsg(p_msg);
}
{
tBTA_DM_API_CONFIRM *p_msg;
- if ((p_msg = (tBTA_DM_API_CONFIRM *) GKI_getbuf(sizeof(tBTA_DM_API_CONFIRM))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_CONFIRM *) osi_malloc(sizeof(tBTA_DM_API_CONFIRM))) != NULL) {
p_msg->hdr.event = BTA_DM_API_CONFIRM_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
p_msg->accept = accept;
tBTA_DM_API_ADD_DEVICE *p_msg;
- if ((p_msg = (tBTA_DM_API_ADD_DEVICE *) GKI_getbuf(sizeof(tBTA_DM_API_ADD_DEVICE))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_ADD_DEVICE *) osi_malloc(sizeof(tBTA_DM_API_ADD_DEVICE))) != NULL) {
memset (p_msg, 0, sizeof(tBTA_DM_API_ADD_DEVICE));
p_msg->hdr.event = BTA_DM_API_ADD_DEVICE_EVT;
{
tBTA_DM_API_REMOVE_DEVICE *p_msg;
- if ((p_msg = (tBTA_DM_API_REMOVE_DEVICE *) GKI_getbuf(sizeof(tBTA_DM_API_REMOVE_DEVICE))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_REMOVE_DEVICE *) osi_malloc(sizeof(tBTA_DM_API_REMOVE_DEVICE))) != NULL) {
memset (p_msg, 0, sizeof(tBTA_DM_API_REMOVE_DEVICE));
p_msg->hdr.event = BTA_DM_API_REMOVE_DEVICE_EVT;
{
tBTA_DM_API_EXECUTE_CBACK *p_msg;
- if ((p_msg = (tBTA_DM_API_EXECUTE_CBACK *) GKI_getbuf(sizeof(tBTA_DM_API_EXECUTE_CBACK))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_EXECUTE_CBACK *) osi_malloc(sizeof(tBTA_DM_API_EXECUTE_CBACK))) != NULL) {
p_msg->hdr.event = BTA_DM_API_EXECUTE_CBACK_EVT;
p_msg->p_param = p_param;
p_msg->p_exec_cback = p_callback;
{
tBTA_DM_API_ADD_BLEKEY *p_msg;
- if ((p_msg = (tBTA_DM_API_ADD_BLEKEY *) GKI_getbuf(sizeof(tBTA_DM_API_ADD_BLEKEY))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_ADD_BLEKEY *) osi_malloc(sizeof(tBTA_DM_API_ADD_BLEKEY))) != NULL) {
memset (p_msg, 0, sizeof(tBTA_DM_API_ADD_BLEKEY));
p_msg->hdr.event = BTA_DM_API_ADD_BLEKEY_EVT;
{
tBTA_DM_API_ADD_BLE_DEVICE *p_msg;
- if ((p_msg = (tBTA_DM_API_ADD_BLE_DEVICE *) GKI_getbuf(sizeof(tBTA_DM_API_ADD_BLE_DEVICE))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_ADD_BLE_DEVICE *) osi_malloc(sizeof(tBTA_DM_API_ADD_BLE_DEVICE))) != NULL) {
memset (p_msg, 0, sizeof(tBTA_DM_API_ADD_BLE_DEVICE));
p_msg->hdr.event = BTA_DM_API_ADD_BLEDEVICE_EVT;
{
tBTA_DM_API_PASSKEY_REPLY *p_msg;
- if ((p_msg = (tBTA_DM_API_PASSKEY_REPLY *) GKI_getbuf(sizeof(tBTA_DM_API_PASSKEY_REPLY))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_PASSKEY_REPLY *) osi_malloc(sizeof(tBTA_DM_API_PASSKEY_REPLY))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_PASSKEY_REPLY));
p_msg->hdr.event = BTA_DM_API_BLE_PASSKEY_REPLY_EVT;
*******************************************************************************/
void BTA_DmBleConfirmReply(BD_ADDR bd_addr, BOOLEAN accept)
{
- tBTA_DM_API_CONFIRM *p_msg = (tBTA_DM_API_CONFIRM *)GKI_getbuf(sizeof(tBTA_DM_API_CONFIRM));
+ tBTA_DM_API_CONFIRM *p_msg = (tBTA_DM_API_CONFIRM *)osi_malloc(sizeof(tBTA_DM_API_CONFIRM));
if (p_msg != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_CONFIRM));
p_msg->hdr.event = BTA_DM_API_BLE_CONFIRM_REPLY_EVT;
{
tBTA_DM_API_BLE_SEC_GRANT *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_SEC_GRANT *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_SEC_GRANT))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_SEC_GRANT *) osi_malloc(sizeof(tBTA_DM_API_BLE_SEC_GRANT))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_SEC_GRANT));
p_msg->hdr.event = BTA_DM_API_BLE_SEC_GRANT_EVT;
#if BLE_INCLUDED == TRUE
tBTA_DM_API_BLE_CONN_PARAMS *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_CONN_PARAMS *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_CONN_PARAMS))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_CONN_PARAMS *) osi_malloc(sizeof(tBTA_DM_API_BLE_CONN_PARAMS))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_CONN_PARAMS));
p_msg->hdr.event = BTA_DM_API_BLE_CONN_PARAM_EVT;
void BTA_DmSetBleConnScanParams(UINT32 scan_interval, UINT32 scan_window)
{
tBTA_DM_API_BLE_SCAN_PARAMS *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_SCAN_PARAMS *)GKI_getbuf(sizeof(tBTA_DM_API_BLE_SCAN_PARAMS))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_SCAN_PARAMS *)osi_malloc(sizeof(tBTA_DM_API_BLE_SCAN_PARAMS))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_SCAN_PARAMS));
p_msg->hdr.event = BTA_DM_API_BLE_CONN_SCAN_PARAM_EVT;
p_msg->scan_int = scan_interval;
{
tBTA_DM_API_BLE_SCAN_PARAMS *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_SCAN_PARAMS *)GKI_getbuf(sizeof(tBTA_DM_API_BLE_SCAN_PARAMS))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_SCAN_PARAMS *)osi_malloc(sizeof(tBTA_DM_API_BLE_SCAN_PARAMS))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_SCAN_PARAMS));
p_msg->hdr.event = BTA_DM_API_BLE_SCAN_PARAM_EVT;
p_msg->client_if = client_if;
{
tBTA_DM_API_BLE_SCAN_FILTER_PARAMS *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_SCAN_FILTER_PARAMS *)GKI_getbuf(sizeof(tBTA_DM_API_BLE_SCAN_FILTER_PARAMS))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_SCAN_FILTER_PARAMS *)osi_malloc(sizeof(tBTA_DM_API_BLE_SCAN_FILTER_PARAMS))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_SCAN_FILTER_PARAMS));
p_msg->hdr.event = BTA_DM_API_BLE_SCAN_FIL_PARAM_EVT;
p_msg->client_if = client_if;
APPL_TRACE_API ("BTA_DmSetBleAdvParam: %d, %d\n", adv_int_min, adv_int_max);
- if ((p_msg = (tBTA_DM_API_BLE_ADV_PARAMS *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_ADV_PARAMS)
+ if ((p_msg = (tBTA_DM_API_BLE_ADV_PARAMS *) osi_malloc(sizeof(tBTA_DM_API_BLE_ADV_PARAMS)
+ sizeof(tBLE_BD_ADDR))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_ADV_PARAMS) + sizeof(tBLE_BD_ADDR));
APPL_TRACE_API ("BTA_DmSetBleAdvParamsAll: %d, %d\n", adv_int_min, adv_int_max);
APPL_TRACE_API ("adv_type = %d, addr_type_own = %d, chnl_map = %d, adv_fil_pol = %d\n",
adv_type, addr_type_own, chnl_map, adv_fil_pol);
- if ((p_msg = (tBTA_DM_API_BLE_ADV_PARAMS_ALL *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_ADV_PARAMS_ALL)
+ if ((p_msg = (tBTA_DM_API_BLE_ADV_PARAMS_ALL *) osi_malloc(sizeof(tBTA_DM_API_BLE_ADV_PARAMS_ALL)
+ sizeof(tBLE_BD_ADDR))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_ADV_PARAMS_ALL));
tBTA_DM_API_SET_ADV_CONFIG *p_msg;
if ((p_msg = (tBTA_DM_API_SET_ADV_CONFIG *)
- GKI_getbuf(sizeof(tBTA_DM_API_SET_ADV_CONFIG))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_SET_ADV_CONFIG))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_SET_ADV_CONFIG_EVT;
p_msg->data_mask = data_mask;
p_msg->p_adv_data_cback = p_adv_data_cback;
tBTA_DM_API_SET_ADV_CONFIG_RAW *p_msg;
if ((p_msg = (tBTA_DM_API_SET_ADV_CONFIG_RAW *)
- GKI_getbuf(sizeof(tBTA_DM_API_SET_ADV_CONFIG_RAW))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_SET_ADV_CONFIG_RAW))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_SET_ADV_CONFIG_RAW_EVT;
p_msg->p_adv_data_cback = p_adv_data_cback;
p_msg->p_raw_adv = p_raw_adv;
tBTA_DM_API_SET_ADV_CONFIG *p_msg;
if ((p_msg = (tBTA_DM_API_SET_ADV_CONFIG *)
- GKI_getbuf(sizeof(tBTA_DM_API_SET_ADV_CONFIG))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_SET_ADV_CONFIG))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_SET_SCAN_RSP_EVT;
p_msg->data_mask = data_mask;
p_msg->p_adv_data_cback = p_adv_data_cback;
tBTA_DM_API_SET_ADV_CONFIG_RAW *p_msg;
if ((p_msg = (tBTA_DM_API_SET_ADV_CONFIG_RAW *)
- GKI_getbuf(sizeof(tBTA_DM_API_SET_ADV_CONFIG_RAW))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_SET_ADV_CONFIG_RAW))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_SET_SCAN_RSP_RAW_EVT;
p_msg->p_adv_data_cback = p_scan_rsp_data_cback;
p_msg->p_raw_adv = p_raw_scan_rsp;
tBTA_DM_API_SET_STORAGE_CONFIG *p_msg;
bta_dm_cb.p_setup_cback = p_setup_cback;
if ((p_msg = (tBTA_DM_API_SET_STORAGE_CONFIG *)
- GKI_getbuf(sizeof(tBTA_DM_API_SET_STORAGE_CONFIG))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_SET_STORAGE_CONFIG))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_SETUP_STORAGE_EVT;
p_msg->p_setup_cback = bta_ble_scan_setup_cb;
p_msg->p_thres_cback = p_thres_cback;
{
tBTA_DM_API_ENABLE_SCAN *p_msg;
- if ((p_msg = (tBTA_DM_API_ENABLE_SCAN *) GKI_getbuf(sizeof(tBTA_DM_API_ENABLE_SCAN))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_ENABLE_SCAN *) osi_malloc(sizeof(tBTA_DM_API_ENABLE_SCAN))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_ENABLE_BATCH_SCAN_EVT;
p_msg->scan_mode = scan_mode;
p_msg->scan_int = scan_interval;
tBTA_DM_API_DISABLE_SCAN *p_msg;
if ((p_msg = (tBTA_DM_API_DISABLE_SCAN *)
- GKI_getbuf(sizeof(tBTA_DM_API_DISABLE_SCAN))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_DISABLE_SCAN))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_DISABLE_BATCH_SCAN_EVT;
p_msg->ref_value = ref_value;
bta_sys_sendmsg(p_msg);
tBTA_DM_API_READ_SCAN_REPORTS *p_msg;
if ((p_msg = (tBTA_DM_API_READ_SCAN_REPORTS *)
- GKI_getbuf(sizeof(tBTA_DM_API_READ_SCAN_REPORTS))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_READ_SCAN_REPORTS))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_READ_SCAN_REPORTS_EVT;
p_msg->scan_type = scan_type;
p_msg->ref_value = ref_value;
tBTA_DM_API_TRACK_ADVERTISER *p_msg;
if ((p_msg = (tBTA_DM_API_TRACK_ADVERTISER *)
- GKI_getbuf(sizeof(tBTA_DM_API_TRACK_ADVERTISER))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_TRACK_ADVERTISER))) != NULL) {
p_msg->hdr.event = BTA_DM_API_BLE_TRACK_ADVERTISER_EVT;
p_msg->p_track_adv_cback = p_track_adv_cback;
p_msg->ref_value = ref_value;
APPL_TRACE_API("BTA_DmBleBroadcast: start = %d \n", start);
- if ((p_msg = (tBTA_DM_API_BLE_OBSERVE *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_OBSERVE))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_OBSERVE *) osi_malloc(sizeof(tBTA_DM_API_BLE_OBSERVE))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_OBSERVE));
p_msg->hdr.event = BTA_DM_API_BLE_BROADCAST_EVT;
#if BLE_INCLUDED == TRUE
tBTA_DM_API_BLE_SET_BG_CONN_TYPE *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_SET_BG_CONN_TYPE *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_SET_BG_CONN_TYPE))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_SET_BG_CONN_TYPE *) osi_malloc(sizeof(tBTA_DM_API_BLE_SET_BG_CONN_TYPE))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_SET_BG_CONN_TYPE));
p_msg->hdr.event = BTA_DM_API_BLE_SET_BG_CONN_TYPE;
sizeof(tBT_UUID) * p_services->num_uuid) :
sizeof(tBTA_DM_API_DISCOVER);
- if ((p_msg = (tBTA_DM_API_DISCOVER *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_DISCOVER *) osi_malloc(len)) != NULL) {
memset(p_msg, 0, len);
p_msg->hdr.event = BTA_DM_API_DISCOVER_EVT;
UINT16 len = p_services ? (sizeof(tBTA_DM_API_SEARCH) + sizeof(tBT_UUID) * p_services->num_uuid) :
sizeof(tBTA_DM_API_SEARCH);
- if ((p_msg = (tBTA_DM_API_SEARCH *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_SEARCH *) osi_malloc(len)) != NULL) {
memset(p_msg, 0, len);
p_msg->hdr.event = BTA_DM_API_SEARCH_EVT;
#if BLE_INCLUDED == TRUE
tBTA_DM_API_UPDATE_CONN_PARAM *p_msg;
- p_msg = (tBTA_DM_API_UPDATE_CONN_PARAM *) GKI_getbuf(sizeof(tBTA_DM_API_UPDATE_CONN_PARAM));
+ p_msg = (tBTA_DM_API_UPDATE_CONN_PARAM *) osi_malloc(sizeof(tBTA_DM_API_UPDATE_CONN_PARAM));
if (p_msg != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_UPDATE_CONN_PARAM));
#if BLE_INCLUDED == TRUE && BLE_PRIVACY_SPT == TRUE
tBTA_DM_API_LOCAL_PRIVACY *p_msg;
- if ((p_msg = (tBTA_DM_API_LOCAL_PRIVACY *) GKI_getbuf(sizeof(tBTA_DM_API_ENABLE_PRIVACY))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_LOCAL_PRIVACY *) osi_malloc(sizeof(tBTA_DM_API_ENABLE_PRIVACY))) != NULL) {
memset (p_msg, 0, sizeof(tBTA_DM_API_LOCAL_PRIVACY));
p_msg->hdr.event = BTA_DM_API_LOCAL_PRIVACY_EVT;
APPL_TRACE_API ("BTA_BleEnableAdvInstance");
- if ((p_msg = (tBTA_DM_API_BLE_MULTI_ADV_ENB *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_MULTI_ADV_ENB *) osi_malloc(len)) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_MULTI_ADV_ENB));
p_msg->hdr.event = BTA_DM_API_BLE_MULTI_ADV_ENB_EVT;
UINT16 len = sizeof(tBTA_BLE_ADV_PARAMS) + sizeof(tBTA_DM_API_BLE_MULTI_ADV_PARAM);
APPL_TRACE_API ("BTA_BleUpdateAdvInstParam");
- if ((p_msg = (tBTA_DM_API_BLE_MULTI_ADV_PARAM *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_MULTI_ADV_PARAM *) osi_malloc(len)) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_MULTI_ADV_PARAM));
p_msg->hdr.event = BTA_DM_API_BLE_MULTI_ADV_PARAM_UPD_EVT;
p_msg->inst_id = inst_id;
APPL_TRACE_API ("BTA_BleCfgAdvInstData");
- if ((p_msg = (tBTA_DM_API_BLE_MULTI_ADV_DATA *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_MULTI_ADV_DATA *) osi_malloc(len)) != NULL) {
memset(p_msg, 0, len);
p_msg->hdr.event = BTA_DM_API_BLE_MULTI_ADV_DATA_EVT;
p_msg->inst_id = inst_id;
APPL_TRACE_API ("BTA_BleDisableAdvInstance: %d", inst_id);
if ((p_msg = (tBTA_DM_API_BLE_MULTI_ADV_DISABLE *)
- GKI_getbuf(sizeof(tBTA_DM_API_BLE_MULTI_ADV_DISABLE))) != NULL) {
+ osi_malloc(sizeof(tBTA_DM_API_BLE_MULTI_ADV_DISABLE))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_MULTI_ADV_DISABLE));
p_msg->hdr.event = BTA_DM_API_BLE_MULTI_ADV_DISABLE_EVT;
p_msg->inst_id = inst_id;
}
}
- if ((p_msg = (tBTA_DM_API_CFG_FILTER_COND *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_CFG_FILTER_COND *) osi_malloc(len)) != NULL) {
memset (p_msg, 0, len);
p_msg->hdr.event = BTA_DM_API_CFG_FILTER_COND_EVT;
UINT16 len = sizeof(tBTA_DM_API_SCAN_FILTER_PARAM_SETUP) + sizeof(tBLE_BD_ADDR);
- if ((p_msg = (tBTA_DM_API_SCAN_FILTER_PARAM_SETUP *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_SCAN_FILTER_PARAM_SETUP *) osi_malloc(len)) != NULL) {
memset (p_msg, 0, len);
p_msg->hdr.event = BTA_DM_API_SCAN_FILTER_SETUP_EVT;
UINT16 len = sizeof(tBTA_DM_API_ENERGY_INFO) + sizeof(tBLE_BD_ADDR);
- if ((p_msg = (tBTA_DM_API_ENERGY_INFO *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_ENERGY_INFO *) osi_malloc(len)) != NULL) {
memset (p_msg, 0, len);
p_msg->hdr.event = BTA_DM_API_BLE_ENERGY_INFO_EVT;
p_msg->p_energy_info_cback = p_cmpl_cback;
UINT16 len = sizeof(tBTA_DM_API_ENABLE_SCAN_FILTER) + sizeof(tBLE_BD_ADDR);
- if ((p_msg = (tBTA_DM_API_ENABLE_SCAN_FILTER *) GKI_getbuf(len)) != NULL) {
+ if ((p_msg = (tBTA_DM_API_ENABLE_SCAN_FILTER *) osi_malloc(len)) != NULL) {
memset (p_msg, 0, len);
p_msg->hdr.event = BTA_DM_API_SCAN_FILTER_ENABLE_EVT;
{
tBTA_DM_API_UPDATE_CONN_PARAM *p_msg;
- if ((p_msg = (tBTA_DM_API_UPDATE_CONN_PARAM *) GKI_getbuf(sizeof(tBTA_DM_API_UPDATE_CONN_PARAM))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_UPDATE_CONN_PARAM *) osi_malloc(sizeof(tBTA_DM_API_UPDATE_CONN_PARAM))) != NULL) {
memset (p_msg, 0, sizeof(tBTA_DM_API_UPDATE_CONN_PARAM));
p_msg->hdr.event = BTA_DM_API_UPDATE_CONN_PARAM_EVT;
{
tBTA_DM_API_BLE_DISCONNECT *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_DISCONNECT *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_DISCONNECT))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_DISCONNECT *) osi_malloc(sizeof(tBTA_DM_API_BLE_DISCONNECT))) != NULL) {
memset (p_msg, 0, sizeof(tBTA_DM_API_BLE_DISCONNECT));
p_msg->hdr.event = BTA_DM_API_BLE_DISCONNECT_EVT;
{
tBTA_DM_API_BLE_SET_DATA_LENGTH *p_msg;
- if ((p_msg = (tBTA_DM_API_BLE_SET_DATA_LENGTH *)GKI_getbuf(sizeof(tBTA_DM_API_BLE_SET_DATA_LENGTH)))
+ if ((p_msg = (tBTA_DM_API_BLE_SET_DATA_LENGTH *)osi_malloc(sizeof(tBTA_DM_API_BLE_SET_DATA_LENGTH)))
!= NULL) {
bdcpy(p_msg->remote_bda, remote_device);
p_msg->hdr.event = BTA_DM_API_SET_DATA_LENGTH_EVT;
tBTA_DM_API_SET_ENCRYPTION *p_msg;
APPL_TRACE_API("BTA_DmSetEncryption"); //todo
- if ((p_msg = (tBTA_DM_API_SET_ENCRYPTION *) GKI_getbuf(sizeof(tBTA_DM_API_SET_ENCRYPTION))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_SET_ENCRYPTION *) osi_malloc(sizeof(tBTA_DM_API_SET_ENCRYPTION))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_SET_ENCRYPTION));
p_msg->hdr.event = BTA_DM_API_SET_ENCRYPTION_EVT;
APPL_TRACE_API("BTA_DmCloseACL");
- if ((p_msg = (tBTA_DM_API_REMOVE_ACL *) GKI_getbuf(sizeof(tBTA_DM_API_REMOVE_ACL))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_REMOVE_ACL *) osi_malloc(sizeof(tBTA_DM_API_REMOVE_ACL))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_REMOVE_ACL));
p_msg->hdr.event = BTA_DM_API_REMOVE_ACL_EVT;
APPL_TRACE_API("BTA_DmBleObserve:start = %d ", start);
- if ((p_msg = (tBTA_DM_API_BLE_OBSERVE *) GKI_getbuf(sizeof(tBTA_DM_API_BLE_OBSERVE))) != NULL) {
+ if ((p_msg = (tBTA_DM_API_BLE_OBSERVE *) osi_malloc(sizeof(tBTA_DM_API_BLE_OBSERVE))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_API_BLE_OBSERVE));
p_msg->hdr.event = BTA_DM_API_BLE_OBSERVE_EVT;
APPL_TRACE_API("BTA_DmBleStopAdvertising\n");
- if ((p_msg = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
memset(p_msg, 0, sizeof(BT_HDR));
p_msg->event = BTA_DM_API_BLE_STOP_ADV_EVT;
bta_sys_sendmsg(p_msg);
{
tBTA_DM_APT_SET_DEV_ADDR *p_msg;
APPL_TRACE_API("set the random address ");
- if ((p_msg = (tBTA_DM_APT_SET_DEV_ADDR *) GKI_getbuf(sizeof(tBTA_DM_APT_SET_DEV_ADDR))) != NULL) {
+ if ((p_msg = (tBTA_DM_APT_SET_DEV_ADDR *) osi_malloc(sizeof(tBTA_DM_APT_SET_DEV_ADDR))) != NULL) {
memset(p_msg, 0, sizeof(tBTA_DM_APT_SET_DEV_ADDR));
memcpy(p_msg->address, rand_addr, BD_ADDR_LEN);
p_msg->hdr.event = BTA_DM_API_SET_RAND_ADDR_EVT;
*
******************************************************************************/
-#include "gki.h"
#include "bta_sys.h"
#include "bta_api.h"
#include "bta_dm_int.h"
#include <string.h>
#include "bta_dm_ci.h"
+#include "allocator.h"
#if (BTM_OOB_INCLUDED == TRUE && SMP_INCLUDED == TRUE)
{
tBTA_DM_CI_IO_REQ *p_msg;
- if ((p_msg = (tBTA_DM_CI_IO_REQ *) GKI_getbuf(sizeof(tBTA_DM_CI_IO_REQ))) != NULL) {
+ if ((p_msg = (tBTA_DM_CI_IO_REQ *) osi_malloc(sizeof(tBTA_DM_CI_IO_REQ))) != NULL) {
p_msg->hdr.event = BTA_DM_CI_IO_REQ_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
p_msg->io_cap = io_cap;
{
tBTA_DM_CI_RMT_OOB *p_msg;
- if ((p_msg = (tBTA_DM_CI_RMT_OOB *) GKI_getbuf(sizeof(tBTA_DM_CI_RMT_OOB))) != NULL) {
+ if ((p_msg = (tBTA_DM_CI_RMT_OOB *) osi_malloc(sizeof(tBTA_DM_CI_RMT_OOB))) != NULL) {
p_msg->hdr.event = BTA_DM_CI_RMT_OOB_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
p_msg->accept = accept;
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = event;
p_buf->layer_specific = sco_handle;
/* union of all data types */
typedef union {
- /* GKI event buffer header */
+ /* event buffer header */
BT_HDR hdr;
tBTA_DM_API_ENABLE enable;
// #include <assert.h>
#include <string.h>
-#include "gki.h"
#include "bta_sys.h"
#include "bta_api.h"
#include "bta_dm_int.h"
{
tBTA_DM_PM_BTM_STATUS *p_buf;
- if ((p_buf = (tBTA_DM_PM_BTM_STATUS *) GKI_getbuf(sizeof(tBTA_DM_PM_BTM_STATUS))) != NULL) {
+ if ((p_buf = (tBTA_DM_PM_BTM_STATUS *) osi_malloc(sizeof(tBTA_DM_PM_BTM_STATUS))) != NULL) {
p_buf->hdr.event = BTA_DM_PM_BTM_STATUS_EVT;
p_buf->status = status;
p_buf->value = value;
return;
}
- tBTA_DM_PM_TIMER *p_buf = (tBTA_DM_PM_TIMER *) GKI_getbuf(sizeof(tBTA_DM_PM_TIMER));
+ tBTA_DM_PM_TIMER *p_buf = (tBTA_DM_PM_TIMER *) osi_malloc(sizeof(tBTA_DM_PM_TIMER));
if (p_buf != NULL) {
p_buf->hdr.event = BTA_DM_PM_TIMER_EVT;
p_buf->pm_request = bta_dm_cb.pm_timer[i].pm_action[j];
#include "bt_target.h"
#include "utl.h"
-#include "gki.h"
#include "bta_sys.h"
#include "bta_gattc_int.h"
#include "l2c_api.h"
#include "l2c_int.h"
#include "gatt_int.h"
+#include "allocator.h"
#if (defined BTA_HH_LE_INCLUDED && BTA_HH_LE_INCLUDED == TRUE)
#include "bta_hh_int.h"
/* BTA use the same client interface as BTE GATT statck */
cb_data.reg_oper.client_if = p_cb->cl_rcb[i].client_if;
- if ((p_buf = (tBTA_GATTC_INT_START_IF *) GKI_getbuf(sizeof(tBTA_GATTC_INT_START_IF))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_INT_START_IF *) osi_malloc(sizeof(tBTA_GATTC_INT_START_IF))) != NULL) {
p_buf->hdr.event = BTA_GATTC_INT_START_IF_EVT;
p_buf->client_if = p_cb->cl_rcb[i].client_if;
APPL_TRACE_DEBUG("GATTC getbuf sucess.\n");
if (p_clcb->status != GATT_SUCCESS) {
/* clean up cache */
if (p_clcb->p_srcb && p_clcb->p_srcb->p_srvc_cache) {
- while (!GKI_queue_is_empty(&p_clcb->p_srcb->cache_buffer)) {
- GKI_freebuf (GKI_dequeue (&p_clcb->p_srcb->cache_buffer));
+ while (!fixed_queue_is_empty(p_clcb->p_srcb->cache_buffer)) {
+ osi_free(fixed_queue_try_dequeue(p_clcb->p_srcb->cache_buffer));
}
+ //fixed_queue_free(p_clcb->p_srcb->cache_buffer, NULL);
p_clcb->p_srcb->p_srvc_cache = NULL;
}
else if ((transport == BT_TRANSPORT_LE) && (connected == FALSE) && (p_conn != NULL)){
p_conn->service_change_ccc_written = FALSE;
if (p_conn->ccc_timer_used == TRUE){
- GKI_freebuf((void *)p_conn->service_change_ccc_timer.param);
+ osi_free((void *)p_conn->service_change_ccc_timer.param);
bta_sys_stop_timer(&(p_conn->service_change_ccc_timer));
p_conn->ccc_timer_used = FALSE;
}
bt_bdaddr_t bdaddr;
bdcpy(bdaddr.address, bda);
- if ((p_buf = (tBTA_GATTC_DATA *) GKI_getbuf(sizeof(tBTA_GATTC_DATA))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_DATA *) osi_malloc(sizeof(tBTA_GATTC_DATA))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTC_DATA));
p_buf->int_conn.hdr.event = connected ? BTA_GATTC_INT_CONN_EVT :
APPL_TRACE_DEBUG("bta_gattc_enc_cmpl_cback: cif = %d", gattc_if);
- if ((p_buf = (tBTA_GATTC_DATA *) GKI_getbuf(sizeof(tBTA_GATTC_DATA))) != NULL) {
- memset(p_buf, 0, sizeof(tBTA_GATTC_DATA));
-
+ if ((p_buf = (tBTA_GATTC_DATA *) osi_calloc(sizeof(tBTA_GATTC_DATA))) != NULL) {
p_buf->enc_cmpl.hdr.event = BTA_GATTC_ENC_CMPL_EVT;
p_buf->enc_cmpl.hdr.layer_specific = p_clcb->bta_conn_id;
p_buf->enc_cmpl.client_if = gattc_if;
}
/* in all other cases, mark it and delete the cache */
if (p_srvc_cb->p_srvc_cache != NULL) {
- while (!GKI_queue_is_empty(&p_srvc_cb->cache_buffer)) {
- GKI_freebuf (GKI_dequeue (&p_srvc_cb->cache_buffer));
+ while (!fixed_queue_is_empty(p_clcb->p_srcb->cache_buffer)) {
+ osi_free(fixed_queue_try_dequeue(p_clcb->p_srcb->cache_buffer));
}
-
+ //fixed_queue_free(p_clcb->p_srcb->cache_buffer, NULL);
p_srvc_cb->p_srvc_cache = NULL;
}
}
tGATT_CL_COMPLETE *p_data)
{
const UINT16 len = sizeof(tBTA_GATTC_OP_CMPL) + sizeof(tGATT_CL_COMPLETE);
- tBTA_GATTC_OP_CMPL *p_buf = (tBTA_GATTC_OP_CMPL *) GKI_getbuf(len);
+ tBTA_GATTC_OP_CMPL *p_buf = (tBTA_GATTC_OP_CMPL *) osi_malloc(len);
if (p_buf != NULL) {
memset(p_buf, 0, len);
void bta_gattc_start_service_change_ccc_timer(UINT16 conn_id, BD_ADDR bda,UINT32 timeout_ms,
UINT8 timer_cnt, UINT8 last_status, TIMER_LIST_ENT *ccc_timer)
{
- tBTA_GATTC_WAIT_CCC_TIMER *p_timer_param = (tBTA_GATTC_WAIT_CCC_TIMER*) GKI_getbuf(sizeof(tBTA_GATTC_WAIT_CCC_TIMER));
+ tBTA_GATTC_WAIT_CCC_TIMER *p_timer_param = (tBTA_GATTC_WAIT_CCC_TIMER*) osi_malloc(sizeof(tBTA_GATTC_WAIT_CCC_TIMER));
if (p_timer_param != NULL){
p_timer_param->conn_id = conn_id;
memcpy(p_timer_param->remote_bda, bda, sizeof(BD_ADDR));
tBTA_GATTC_CONN *p_conn = bta_gattc_conn_find(p_timer_param->remote_bda);
if (p_conn == NULL){
APPL_TRACE_ERROR("p_conn is NULL in %s\n", __func__);
- GKI_freebuf(p_timer_param);
+ osi_free(p_timer_param);
return;
}
p_conn->service_change_ccc_written = TRUE;
}
- GKI_freebuf(p_timer_param);
+ osi_free(p_timer_param);
}
#endif
******************************************************************************/
#include "bt_target.h"
+#include "allocator.h"
#if defined(GATTC_INCLUDED) && (GATTC_INCLUDED == TRUE)
#include <string.h>
-#include "gki.h"
#include "bta_sys.h"
#include "bta_gatt_api.h"
#include "bta_gattc_int.h"
APPL_TRACE_WARNING("GATTC Module not enabled/already disabled\n");
return;
}
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_GATTC_API_DISABLE_EVT;
bta_sys_sendmsg(p_buf);
}
bta_sys_register(BTA_ID_GATTC, &bta_gattc_reg);
}
- if ((p_buf = (tBTA_GATTC_API_REG *) GKI_getbuf(sizeof(tBTA_GATTC_API_REG))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_REG *) osi_malloc(sizeof(tBTA_GATTC_API_REG))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_REG_EVT;
if (p_app_uuid != NULL) {
memcpy(&p_buf->app_uuid, p_app_uuid, sizeof(tBT_UUID));
{
tBTA_GATTC_API_DEREG *p_buf;
- if ((p_buf = (tBTA_GATTC_API_DEREG *) GKI_getbuf(sizeof(tBTA_GATTC_API_DEREG))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_DEREG *) osi_malloc(sizeof(tBTA_GATTC_API_DEREG))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_DEREG_EVT;
p_buf->client_if = client_if;
bta_sys_sendmsg(p_buf);
{
tBTA_GATTC_API_OPEN *p_buf;
- if ((p_buf = (tBTA_GATTC_API_OPEN *) GKI_getbuf(sizeof(tBTA_GATTC_API_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_OPEN *) osi_malloc(sizeof(tBTA_GATTC_API_OPEN))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_OPEN_EVT;
p_buf->client_if = client_if;
{
tBTA_GATTC_API_CANCEL_OPEN *p_buf;
- if ((p_buf = (tBTA_GATTC_API_CANCEL_OPEN *) GKI_getbuf(sizeof(tBTA_GATTC_API_CANCEL_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_CANCEL_OPEN *) osi_malloc(sizeof(tBTA_GATTC_API_CANCEL_OPEN))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_CANCEL_OPEN_EVT;
p_buf->client_if = client_if;
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_GATTC_API_CLOSE_EVT;
p_buf->layer_specific = conn_id;
{
tBTA_GATTC_API_CFG_MTU *p_buf;
- if ((p_buf = (tBTA_GATTC_API_CFG_MTU *) GKI_getbuf(sizeof(tBTA_GATTC_API_CFG_MTU))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_CFG_MTU *) osi_malloc(sizeof(tBTA_GATTC_API_CFG_MTU))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_CFG_MTU_EVT;
p_buf->hdr.layer_specific = conn_id;
tBTA_GATTC_API_SEARCH *p_buf;
UINT16 len = sizeof(tBTA_GATTC_API_SEARCH) + sizeof(tBT_UUID);
- if ((p_buf = (tBTA_GATTC_API_SEARCH *) GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_SEARCH *) osi_malloc(len)) != NULL) {
memset(p_buf, 0, len);
p_buf->hdr.event = BTA_GATTC_API_SEARCH_EVT;
{
tBTA_GATTC_API_READ *p_buf;
- if ((p_buf = (tBTA_GATTC_API_READ *) GKI_getbuf(sizeof(tBTA_GATTC_API_READ))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_READ *) osi_malloc(sizeof(tBTA_GATTC_API_READ))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTC_API_READ));
p_buf->hdr.event = BTA_GATTC_API_READ_EVT;
tBTA_GATTC_API_READ *p_buf;
UINT16 len = (UINT16)(sizeof(tBTA_GATT_ID) + sizeof(tBTA_GATTC_API_READ));
- if ((p_buf = (tBTA_GATTC_API_READ *) GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_READ *) osi_malloc(len)) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTC_API_READ));
p_buf->hdr.event = BTA_GATTC_API_READ_EVT;
p_read_multi->num_attr * sizeof(tBTA_GATTC_ATTR_ID));
UINT8 i;
- if ((p_buf = (tBTA_GATTC_API_READ_MULTI *) GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_READ_MULTI *) osi_malloc(len)) != NULL) {
memset(p_buf, 0, len);
p_buf->hdr.event = BTA_GATTC_API_READ_MULTI_EVT;
{
tBTA_GATTC_API_WRITE *p_buf;
- if ((p_buf = (tBTA_GATTC_API_WRITE *) GKI_getbuf((UINT16)(sizeof(tBTA_GATTC_API_WRITE) + len))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_WRITE *) osi_malloc((UINT16)(sizeof(tBTA_GATTC_API_WRITE) + len))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTC_API_WRITE) + len);
p_buf->hdr.event = BTA_GATTC_API_WRITE_EVT;
len += p_data->len;
}
- if ((p_buf = (tBTA_GATTC_API_WRITE *) GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_WRITE *) osi_malloc(len)) != NULL) {
memset(p_buf, 0, len);
p_buf->hdr.event = BTA_GATTC_API_WRITE_EVT;
{
tBTA_GATTC_API_WRITE *p_buf;
- if ((p_buf = (tBTA_GATTC_API_WRITE *) GKI_getbuf((UINT16)(sizeof(tBTA_GATTC_API_WRITE) + len))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_WRITE *) osi_malloc((UINT16)(sizeof(tBTA_GATTC_API_WRITE) + len))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTC_API_WRITE) + len);
p_buf->hdr.event = BTA_GATTC_API_WRITE_EVT;
len += p_data->len;
}
- if ((p_buf = (tBTA_GATTC_API_WRITE *) GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_WRITE *) osi_malloc(len)) != NULL) {
memset(p_buf, 0, len);
p_buf->hdr.event = BTA_GATTC_API_WRITE_EVT;
{
tBTA_GATTC_API_EXEC *p_buf;
- if ((p_buf = (tBTA_GATTC_API_EXEC *) GKI_getbuf((UINT16)sizeof(tBTA_GATTC_API_EXEC))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_EXEC *) osi_malloc((UINT16)sizeof(tBTA_GATTC_API_EXEC))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTC_API_EXEC));
p_buf->hdr.event = BTA_GATTC_API_EXEC_EVT;
APPL_TRACE_API("BTA_GATTC_SendIndConfirm conn_id=%d service uuid1=0x%x char uuid=0x%x",
conn_id, p_char_id->srvc_id.id.uuid.uu.uuid16, p_char_id->char_id.uuid.uu.uuid16);
- if ((p_buf = (tBTA_GATTC_API_CONFIRM *) GKI_getbuf(sizeof(tBTA_GATTC_API_CONFIRM))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_CONFIRM *) osi_malloc(sizeof(tBTA_GATTC_API_CONFIRM))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTC_API_CONFIRM));
p_buf->hdr.event = BTA_GATTC_API_CONFIRM_EVT;
{
tBTA_GATTC_API_OPEN *p_buf;
- if ((p_buf = (tBTA_GATTC_API_OPEN *) GKI_getbuf(sizeof(tBTA_GATTC_API_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_OPEN *) osi_malloc(sizeof(tBTA_GATTC_API_OPEN))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_REFRESH_EVT;
memcpy(p_buf->remote_bda, remote_bda, BD_ADDR_LEN);
{
tBTA_GATTC_API_LISTEN *p_buf;
- if ((p_buf = (tBTA_GATTC_API_LISTEN *) GKI_getbuf((UINT16)(sizeof(tBTA_GATTC_API_LISTEN) + BD_ADDR_LEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_LISTEN *) osi_malloc((UINT16)(sizeof(tBTA_GATTC_API_LISTEN) + BD_ADDR_LEN))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_LISTEN_EVT;
p_buf->client_if = client_if;
{
tBTA_GATTC_API_LISTEN *p_buf;
- if ((p_buf = (tBTA_GATTC_API_LISTEN *) GKI_getbuf((UINT16)(sizeof(tBTA_GATTC_API_LISTEN) + BD_ADDR_LEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTC_API_LISTEN *) osi_malloc((UINT16)(sizeof(tBTA_GATTC_API_LISTEN) + BD_ADDR_LEN))) != NULL) {
p_buf->hdr.event = BTA_GATTC_API_BROADCAST_EVT;
p_buf->client_if = client_if;
p_buf->start = start;
#include <string.h>
#include "utl.h"
-#include "gki.h"
#include "bta_sys.h"
#include "sdp_api.h"
#include "sdpdefs.h"
#include "bta_gattc_int.h"
#include "btm_api.h"
#include "btm_ble_api.h"
+#include "allocator.h"
#define LOG_TAG "bt_bta_gattc"
// #include "osi/include/log.h"
**
** Function bta_gattc_alloc_cache_buf
**
-** Description Allocate a GKI buffer for database cache.
+** Description Allocate a buffer for database cache.
**
** Returns status
**
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf(GATT_DB_POOL_ID)) == NULL) {
- APPL_TRACE_DEBUG("No resources: GKI buffer allocation failed.");
+ if ((p_buf = (BT_HDR *)osi_calloc(GATT_DB_BUF_SIZE)) == NULL) {
+ APPL_TRACE_DEBUG("No resources: buffer allocation failed.");
utl_freebuf((void **)&p_srvc_cb->p_srvc_list);
p_srvc_cb->free_byte = 0;
} else {
- memset(p_buf, 0, GKI_get_buf_size(p_buf));
p_srvc_cb->p_free = (UINT8 *) p_buf;
- p_srvc_cb->free_byte = GKI_get_buf_size(p_buf);
+ p_srvc_cb->free_byte = GATT_DB_BUF_SIZE;
/* link into buffer queue */
- GKI_enqueue(&p_srvc_cb->cache_buffer, p_buf);
+ fixed_queue_enqueue(p_srvc_cb->cache_buffer, p_buf);
}
#if BTA_GATT_DEBUG== TRUE
APPL_TRACE_DEBUG("allocating new buffer: free byte = %d", p_srvc_cb->free_byte);
{
tBTA_GATT_STATUS status = BTA_GATT_OK;
- while (!GKI_queue_is_empty(&p_srvc_cb->cache_buffer)) {
- GKI_freebuf (GKI_dequeue (&p_srvc_cb->cache_buffer));
+ while (!fixed_queue_is_empty(p_srvc_cb->cache_buffer)) {
+ osi_free (fixed_queue_dequeue(p_srvc_cb->cache_buffer));
}
utl_freebuf((void **)&p_srvc_cb->p_srvc_list);
- if ((p_srvc_cb->p_srvc_list = (tBTA_GATTC_ATTR_REC *)GKI_getbuf(BTA_GATTC_ATTR_LIST_SIZE)) == NULL) {
- APPL_TRACE_DEBUG("No resources: GKI buffer allocation failed.");
+ if ((p_srvc_cb->p_srvc_list = (tBTA_GATTC_ATTR_REC *)osi_malloc(BTA_GATTC_ATTR_LIST_SIZE)) == NULL) {
+ APPL_TRACE_DEBUG("No resources: buffer allocation failed.");
status = GATT_NO_RESOURCES;
} else {
p_srvc_cb->total_srvc = 0;
APPL_TRACE_ERROR("GATT service discovery is done on unknown connection");
}
- GKI_freebuf(bta_gattc_cb.p_sdp_db);
+ osi_free(bta_gattc_cb.p_sdp_db);
bta_gattc_cb.p_sdp_db = NULL;
bta_gattc_cb.sdp_conn_id = 0;
}
uuid.len = LEN_UUID_16;
uuid.uu.uuid16 = UUID_PROTOCOL_ATT;
- if ((bta_gattc_cb.p_sdp_db = (tSDP_DISCOVERY_DB *)GKI_getbuf(BTA_GATT_SDP_DB_SIZE)) != NULL) {
+ if ((bta_gattc_cb.p_sdp_db = (tSDP_DISCOVERY_DB *)osi_malloc(BTA_GATT_SDP_DB_SIZE)) != NULL) {
attr_list[0] = ATTR_ID_SERVICE_CLASS_ID_LIST;
attr_list[1] = ATTR_ID_PROTOCOL_DESC_LIST;
if (!SDP_ServiceSearchAttributeRequest (p_server_cb->server_bda,
bta_gattc_cb.p_sdp_db, &bta_gattc_sdp_callback)) {
- GKI_freebuf(bta_gattc_cb.p_sdp_db);
+ osi_free(bta_gattc_cb.p_sdp_db);
bta_gattc_cb.p_sdp_db = NULL;
} else {
bta_gattc_cb.sdp_conn_id = conn_id;
/* first attribute loading, initialize buffer */
APPL_TRACE_ERROR("bta_gattc_rebuild_cache");
if (attr_index == 0) {
- while (!GKI_queue_is_empty(&p_srvc_cb->cache_buffer)) {
- GKI_freebuf (GKI_dequeue (&p_srvc_cb->cache_buffer));
+ while (!fixed_queue_is_empty(p_srvc_cb->cache_buffer)) {
+ osi_free(fixed_queue_dequeue(p_srvc_cb->cache_buffer));
}
if (bta_gattc_alloc_cache_buf(p_srvc_cb) == NULL) {
#include "bta_api.h"
#include "bta_sys.h"
#include "bta_gattc_ci.h"
-#include "gki.h"
#include "utl.h"
+#include "allocator.h"
+
/*******************************************************************************
**
tBTA_GATTC_CI_EVT *p_evt;
UNUSED(server_bda);
- if ((p_evt = (tBTA_GATTC_CI_EVT *) GKI_getbuf(sizeof(tBTA_GATTC_CI_EVT))) != NULL) {
+ if ((p_evt = (tBTA_GATTC_CI_EVT *) osi_malloc(sizeof(tBTA_GATTC_CI_EVT))) != NULL) {
p_evt->hdr.event = evt;
p_evt->hdr.layer_specific = conn_id;
tBTA_GATTC_CI_LOAD *p_evt;
UNUSED(server_bda);
- if ((p_evt = (tBTA_GATTC_CI_LOAD *) GKI_getbuf(sizeof(tBTA_GATTC_CI_LOAD))) != NULL) {
+ if ((p_evt = (tBTA_GATTC_CI_LOAD *) osi_malloc(sizeof(tBTA_GATTC_CI_LOAD))) != NULL) {
memset(p_evt, 0, sizeof(tBTA_GATTC_CI_LOAD));
p_evt->hdr.event = evt;
tBTA_GATTC_CI_EVT *p_evt;
UNUSED(server_bda);
- if ((p_evt = (tBTA_GATTC_CI_EVT *) GKI_getbuf(sizeof(tBTA_GATTC_CI_EVT))) != NULL) {
+ if ((p_evt = (tBTA_GATTC_CI_EVT *) osi_malloc(sizeof(tBTA_GATTC_CI_EVT))) != NULL) {
p_evt->hdr.event = evt;
p_evt->hdr.layer_specific = conn_id;
#include <string.h>
#include "bta_gattc_int.h"
-#include "gki.h"
/*****************************************************************************
#include "bdaddr.h"
// #include "btif/include/btif_util.h"
-#include "gki.h"
#include "utl.h"
#include "bta_sys.h"
#include "bta_gattc_int.h"
#include "l2c_api.h"
+#include "allocator.h"
#define LOG_TAG "bt_bta_gattc"
/*****************************************************************************
}
if (p_tcb != NULL) {
- while (!GKI_queue_is_empty(&p_tcb->cache_buffer)) {
- GKI_freebuf (GKI_dequeue (&p_tcb->cache_buffer));
+ if (p_tcb->cache_buffer) {
+ while (!fixed_queue_is_empty(p_tcb->cache_buffer)) {
+ osi_free(fixed_queue_dequeue(p_tcb->cache_buffer));
+ }
+ fixed_queue_free(p_tcb->cache_buffer, NULL);
}
utl_freebuf((void **)&p_tcb->p_srvc_list);
p_tcb->in_use = TRUE;
bdcpy(p_tcb->server_bda, bda);
+
+ p_tcb->cache_buffer = fixed_queue_new(SIZE_MAX); //by Snake
}
return p_tcb;
}
#if defined(GATTS_INCLUDED) && (GATTS_INCLUDED == TRUE)
#include "utl.h"
-#include "gki.h"
#include "bta_sys.h"
#include "bta_gatts_int.h"
#include "bta_gatts_co.h"
#include "btm_ble_api.h"
#include <string.h>
+#include "allocator.h"
static void bta_gatts_nv_save_cback(BOOLEAN is_saved, tGATTS_HNDL_RANGE *p_hndl_range);
static BOOLEAN bta_gatts_nv_srv_chg_cback(tGATTS_SRV_CHG_CMD cmd, tGATTS_SRV_CHG_REQ *p_req,
status = BTA_GATT_NO_RESOURCES;
} else {
if ((p_buf =
- (tBTA_GATTS_INT_START_IF *) GKI_getbuf(sizeof(tBTA_GATTS_INT_START_IF))) != NULL) {
+ (tBTA_GATTS_INT_START_IF *) osi_malloc(sizeof(tBTA_GATTS_INT_START_IF))) != NULL) {
p_buf->hdr.event = BTA_GATTS_INT_START_IF_EVT;
p_buf->server_if = p_cb->rcb[first_unuse].gatt_if;
cb_data.add_result.status = BTA_GATT_ERROR;
}
if((p_attr_val != NULL) && (p_attr_val->attr_val != NULL)){
- GKI_freebuf(p_attr_val->attr_val);
+ osi_free(p_attr_val->attr_val);
}
if (p_rcb->p_cback) {
cb_data.add_result.status = BTA_GATT_ERROR;
}
if((p_attr_val != NULL) && (p_attr_val->attr_val != NULL)){
- GKI_freebuf(p_attr_val->attr_val);
+ osi_free(p_attr_val->attr_val);
}
if (p_rcb->p_cback) {
cb_data.attr_val.status = gatts_status;
if (p_msg->api_set_val.value != NULL){
- GKI_freebuf(p_msg->api_set_val.value);
+ osi_free(p_msg->api_set_val.value);
}
if (p_rcb->p_cback) {
#if defined(GATTS_INCLUDED) && (GATTS_INCLUDED == TRUE)
#include <string.h>
-#include "gki.h"
#include "bta_sys.h"
#include "bta_gatt_api.h"
#include "bta_gatts_int.h"
+#include "allocator.h"
/*****************************************************************************
** Constants
return;
}
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_GATTS_API_DISABLE_EVT;
bta_sys_sendmsg(p_buf);
}
bta_sys_register(BTA_ID_GATTS, &bta_gatts_reg);
}
- if ((p_buf = (tBTA_GATTS_API_REG *) GKI_getbuf(sizeof(tBTA_GATTS_API_REG))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_REG *) osi_malloc(sizeof(tBTA_GATTS_API_REG))) != NULL) {
p_buf->hdr.event = BTA_GATTS_API_REG_EVT;
if (p_app_uuid != NULL) {
{
tBTA_GATTS_API_DEREG *p_buf;
- if ((p_buf = (tBTA_GATTS_API_DEREG *) GKI_getbuf(sizeof(tBTA_GATTS_API_DEREG))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_DEREG *) osi_malloc(sizeof(tBTA_GATTS_API_DEREG))) != NULL) {
p_buf->hdr.event = BTA_GATTS_API_DEREG_EVT;
p_buf->server_if = server_if;
{
tBTA_GATTS_API_CREATE_SRVC *p_buf;
- if ((p_buf = (tBTA_GATTS_API_CREATE_SRVC *) GKI_getbuf(sizeof(tBTA_GATTS_API_CREATE_SRVC))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_CREATE_SRVC *) osi_malloc(sizeof(tBTA_GATTS_API_CREATE_SRVC))) != NULL) {
p_buf->hdr.event = BTA_GATTS_API_CREATE_SRVC_EVT;
p_buf->server_if = server_if;
tBTA_GATTS_API_ADD_INCL_SRVC *p_buf;
if ((p_buf =
- (tBTA_GATTS_API_ADD_INCL_SRVC *) GKI_getbuf(sizeof(tBTA_GATTS_API_ADD_INCL_SRVC)))
+ (tBTA_GATTS_API_ADD_INCL_SRVC *) osi_malloc(sizeof(tBTA_GATTS_API_ADD_INCL_SRVC)))
!= NULL) {
p_buf->hdr.event = BTA_GATTS_API_ADD_INCL_SRVC_EVT;
if(attr_val != NULL){
len = attr_val->attr_len;
}
- if ((p_buf = (tBTA_GATTS_API_ADD_CHAR *) GKI_getbuf(sizeof(tBTA_GATTS_API_ADD_CHAR))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_ADD_CHAR *) osi_malloc(sizeof(tBTA_GATTS_API_ADD_CHAR))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTS_API_ADD_CHAR));
p_buf->hdr.event = BTA_GATTS_API_ADD_CHAR_EVT;
APPL_TRACE_DEBUG("!!!!!!!attr_val->attr_max_len = %x\n",attr_val->attr_max_len);
p_buf->attr_val.attr_len = attr_val->attr_len;
p_buf->attr_val.attr_max_len = attr_val->attr_max_len;
- p_buf->attr_val.attr_val = (uint8_t *)GKI_getbuf(len);
+ p_buf->attr_val.attr_val = (uint8_t *)osi_malloc(len);
if(p_buf->attr_val.attr_val != NULL){
memcpy(p_buf->attr_val.attr_val, attr_val->attr_val, attr_val->attr_len);
}
tBTA_GATTS_API_ADD_DESCR *p_buf;
UINT16 value_len = 0;
- if ((p_buf = (tBTA_GATTS_API_ADD_DESCR *) GKI_getbuf(sizeof(tBTA_GATTS_API_ADD_DESCR))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_ADD_DESCR *) osi_malloc(sizeof(tBTA_GATTS_API_ADD_DESCR))) != NULL) {
memset(p_buf, 0, sizeof(tBTA_GATTS_API_ADD_DESCR));
p_buf->hdr.event = BTA_GATTS_API_ADD_DESCR_EVT;
p_buf->attr_val.attr_max_len = attr_val->attr_max_len;
value_len = attr_val->attr_len;
if (value_len != 0){
- p_buf->attr_val.attr_val = (uint8_t*)GKI_getbuf(value_len);
+ p_buf->attr_val.attr_val = (uint8_t*)osi_malloc(value_len);
if(p_buf->attr_val.attr_val != NULL){
memcpy(p_buf->attr_val.attr_val, attr_val->attr_val, value_len);
}
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_GATTS_API_DEL_SRVC_EVT;
p_buf->layer_specific = service_id;
{
tBTA_GATTS_API_START *p_buf;
- if ((p_buf = (tBTA_GATTS_API_START *) GKI_getbuf(sizeof(tBTA_GATTS_API_START))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_START *) osi_malloc(sizeof(tBTA_GATTS_API_START))) != NULL) {
p_buf->hdr.event = BTA_GATTS_API_START_SRVC_EVT;
p_buf->hdr.layer_specific = service_id;
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_GATTS_API_STOP_SRVC_EVT;
p_buf->layer_specific = service_id;
tBTA_GATTS_API_INDICATION *p_buf;
UINT16 len = sizeof(tBTA_GATTS_API_INDICATION);
- if ((p_buf = (tBTA_GATTS_API_INDICATION *) GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_INDICATION *) osi_malloc(len)) != NULL) {
memset(p_buf, 0, len);
p_buf->hdr.event = BTA_GATTS_API_INDICATION_EVT;
tBTA_GATTS_API_RSP *p_buf;
UINT16 len = sizeof(tBTA_GATTS_API_RSP) + sizeof(tBTA_GATTS_RSP);
- if ((p_buf = (tBTA_GATTS_API_RSP *) GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_RSP *) osi_malloc(len)) != NULL) {
memset(p_buf, 0, len);
p_buf->hdr.event = BTA_GATTS_API_RSP_EVT;
void BTA_SetAttributeValue(UINT16 attr_handle, UINT16 length, UINT8 *value)
{
tBTA_GATTS_API_SET_ATTR_VAL *p_buf;
- if((p_buf = (tBTA_GATTS_API_SET_ATTR_VAL *)GKI_getbuf(
+ if((p_buf = (tBTA_GATTS_API_SET_ATTR_VAL *)osi_malloc(
sizeof(tBTA_GATTS_API_SET_ATTR_VAL))) != NULL){
p_buf->hdr.event = BTA_GATTS_API_SET_ATTR_VAL_EVT;
p_buf->hdr.layer_specific = attr_handle;
p_buf->length = length;
if(value != NULL){
- if((p_buf->value = (UINT8 *)GKI_getbuf(length)) != NULL){
+ if((p_buf->value = (UINT8 *)osi_malloc(length)) != NULL){
memcpy(p_buf->value, value, length);
}
}
{
tBTA_GATTS_API_OPEN *p_buf;
- if ((p_buf = (tBTA_GATTS_API_OPEN *) GKI_getbuf(sizeof(tBTA_GATTS_API_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_OPEN *) osi_malloc(sizeof(tBTA_GATTS_API_OPEN))) != NULL) {
p_buf->hdr.event = BTA_GATTS_API_OPEN_EVT;
p_buf->server_if = server_if;
p_buf->is_direct = is_direct;
{
tBTA_GATTS_API_CANCEL_OPEN *p_buf;
- if ((p_buf = (tBTA_GATTS_API_CANCEL_OPEN *) GKI_getbuf(sizeof(tBTA_GATTS_API_CANCEL_OPEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_CANCEL_OPEN *) osi_malloc(sizeof(tBTA_GATTS_API_CANCEL_OPEN))) != NULL) {
p_buf->hdr.event = BTA_GATTS_API_CANCEL_OPEN_EVT;
p_buf->server_if = server_if;
p_buf->is_direct = is_direct;
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_GATTS_API_CLOSE_EVT;
p_buf->layer_specific = conn_id;
bta_sys_sendmsg(p_buf);
{
tBTA_GATTS_API_LISTEN *p_buf;
- if ((p_buf = (tBTA_GATTS_API_LISTEN *) GKI_getbuf((UINT16)(sizeof(tBTA_GATTS_API_LISTEN) + BD_ADDR_LEN))) != NULL) {
+ if ((p_buf = (tBTA_GATTS_API_LISTEN *) osi_malloc((UINT16)(sizeof(tBTA_GATTS_API_LISTEN) + BD_ADDR_LEN))) != NULL) {
p_buf->hdr.event = BTA_GATTS_API_LISTEN_EVT;
p_buf->server_if = server_if;
#include <string.h>
#include "bta_gatts_int.h"
-#include "gki.h"
/* type for service building action functions */
typedef void (*tBTA_GATTS_SRVC_ACT)(tBTA_GATTS_SRVC_CB *p_rcb, tBTA_GATTS_DATA *p_data);
#include <string.h>
#include "utl.h"
-#include "gki.h"
#include "bta_sys.h"
#include "bta_gatts_int.h"
}
/* GetSDPRecord. at one time only one SDP precedure can be active */
else if (!bta_hh_cb.p_disc_db) {
- bta_hh_cb.p_disc_db = (tSDP_DISCOVERY_DB *) GKI_getbuf(p_bta_hh_cfg->sdp_db_size);
+ bta_hh_cb.p_disc_db = (tSDP_DISCOVERY_DB *) osi_malloc(p_bta_hh_cfg->sdp_db_size);
if (bta_hh_cb.p_disc_db == NULL) {
status = BTA_HH_ERR_NO_RES;
}
if (sm_event != BTA_HH_INVALID_EVT &&
- (p_buf = (tBTA_HH_CBACK_DATA *)GKI_getbuf(sizeof(tBTA_HH_CBACK_DATA) +
+ (p_buf = (tBTA_HH_CBACK_DATA *)osi_malloc(sizeof(tBTA_HH_CBACK_DATA) +
sizeof(BT_HDR))) != NULL) {
p_buf->hdr.event = sm_event;
p_buf->hdr.layer_specific = (UINT16)dev_handle;
bta_sys_register(BTA_ID_HH, &bta_hh_reg);
LOG_INFO("%s sec_mask:0x%x p_cback:%p", __func__, sec_mask, p_cback);
- p_buf = (tBTA_HH_API_ENABLE *)GKI_getbuf((UINT16)sizeof(tBTA_HH_API_ENABLE));
+ p_buf = (tBTA_HH_API_ENABLE *)osi_malloc((UINT16)sizeof(tBTA_HH_API_ENABLE));
if (p_buf != NULL) {
memset(p_buf, 0, sizeof(tBTA_HH_API_ENABLE));
BT_HDR *p_buf;
bta_sys_deregister(BTA_ID_HH);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR))) != NULL) {
p_buf->event = BTA_HH_API_DISABLE_EVT;
bta_sys_sendmsg(p_buf);
}
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *)GKI_getbuf((UINT16)sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc((UINT16)sizeof(BT_HDR))) != NULL) {
memset(p_buf, 0, sizeof(BT_HDR));
p_buf->event = BTA_HH_API_CLOSE_EVT;
p_buf->layer_specific = (UINT16) dev_handle;
{
tBTA_HH_API_CONN *p_buf;
- p_buf = (tBTA_HH_API_CONN *)GKI_getbuf((UINT16)sizeof(tBTA_HH_API_CONN));
+ p_buf = (tBTA_HH_API_CONN *)osi_malloc((UINT16)sizeof(tBTA_HH_API_CONN));
if (p_buf != NULL) {
memset((void *)p_buf, 0, sizeof(tBTA_HH_API_CONN));
tBTA_HH_CMD_DATA *p_buf;
UINT16 len = (UINT16) (sizeof(tBTA_HH_CMD_DATA) );
- if ((p_buf = (tBTA_HH_CMD_DATA *)GKI_getbuf(len)) != NULL) {
+ if ((p_buf = (tBTA_HH_CMD_DATA *)osi_malloc(len)) != NULL) {
memset(p_buf, 0, sizeof(tBTA_HH_CMD_DATA));
p_buf->hdr.event = BTA_HH_API_WRITE_DEV_EVT;
{
BT_HDR *p_buf;
- if ((p_buf = (BT_HDR *)GKI_getbuf((UINT16)sizeof(BT_HDR))) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc((UINT16)sizeof(BT_HDR))) != NULL) {
memset(p_buf, 0, sizeof(BT_HDR));
p_buf->event = BTA_HH_API_GET_DSCP_EVT;
p_buf->layer_specific = (UINT16) dev_handle;
tBTA_HH_MAINT_DEV *p_buf;
UINT16 len = sizeof(tBTA_HH_MAINT_DEV) + dscp_info.descriptor.dl_len;
- p_buf = (tBTA_HH_MAINT_DEV *)GKI_getbuf(len);
+ p_buf = (tBTA_HH_MAINT_DEV *)osi_malloc(len);
if (p_buf != NULL) {
memset(p_buf, 0, sizeof(tBTA_HH_MAINT_DEV));
{
tBTA_HH_MAINT_DEV *p_buf;
- p_buf = (tBTA_HH_MAINT_DEV *)GKI_getbuf((UINT16)sizeof(tBTA_HH_MAINT_DEV));
+ p_buf = (tBTA_HH_MAINT_DEV *)osi_malloc((UINT16)sizeof(tBTA_HH_MAINT_DEV));
if (p_buf != NULL) {
memset(p_buf, 0, sizeof(tBTA_HH_MAINT_DEV));
{
tBTA_HH_SCPP_UPDATE *p_buf;
- p_buf = (tBTA_HH_SCPP_UPDATE *)GKI_getbuf((UINT16)sizeof(tBTA_HH_SCPP_UPDATE));
+ p_buf = (tBTA_HH_SCPP_UPDATE *)osi_malloc((UINT16)sizeof(tBTA_HH_SCPP_UPDATE));
if (p_buf != NULL) {
memset(p_buf, 0, sizeof(tBTA_HH_SCPP_UPDATE));
/* size of database for service discovery */
#ifndef BTA_HH_DISC_BUF_SIZE
-#define BTA_HH_DISC_BUF_SIZE GKI_MAX_BUF_SIZE
+#define BTA_HH_DISC_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
tBTA_HH_CFG *p_bta_hh_cfg = (tBTA_HH_CFG *) &bta_hh_cfg;
-#endif ///defined(BTA_HH_INCLUDED) && (BTA_HH_INCLUDED == TRUE)
\ No newline at end of file
+#endif ///defined(BTA_HH_INCLUDED) && (BTA_HH_INCLUDED == TRUE)
UINT16 sm_event = BTA_HH_GATT_CLOSE_EVT;
if (p_dev_cb != NULL &&
- (p_buf = (tBTA_HH_LE_CLOSE *)GKI_getbuf(sizeof(tBTA_HH_LE_CLOSE))) != NULL) {
+ (p_buf = (tBTA_HH_LE_CLOSE *)osi_malloc(sizeof(tBTA_HH_LE_CLOSE))) != NULL) {
p_buf->hdr.event = sm_event;
p_buf->hdr.layer_specific = (UINT16)p_dev_cb->hid_handle;
p_buf->conn_id = p_data->conn_id;
/* save report descriptor */
if (p_srvc->rpt_map != NULL) {
- GKI_freebuf((void *)p_srvc->rpt_map);
+ osi_free((void *)p_srvc->rpt_map);
}
if (p_data->p_value->unformat.len > 0) {
- p_srvc->rpt_map = (UINT8 *)GKI_getbuf(p_data->p_value->unformat.len);
+ p_srvc->rpt_map = (UINT8 *)osi_malloc(p_data->p_value->unformat.len);
}
if (p_srvc->rpt_map != NULL) {
if (p_rpt != NULL &&
p_data->p_value != NULL &&
- (p_buf = (BT_HDR *)GKI_getbuf((UINT16)(sizeof(BT_HDR) + p_data->p_value->unformat.len + 1))) != NULL) {
+ (p_buf = (BT_HDR *)osi_malloc((UINT16)(sizeof(BT_HDR) + p_data->p_value->unformat.len + 1))) != NULL) {
/* pack data send to app */
hs_data.status = BTA_HH_OK;
p_buf->len = p_data->p_value->unformat.len + 1;
/* need to append report ID to the head of data */
if (p_rpt->rpt_id != 0) {
- if ((p_buf = (UINT8 *)GKI_getbuf((UINT16)(p_data->len + 1))) == NULL) {
+ if ((p_buf = (UINT8 *)osi_malloc((UINT16)(p_data->len + 1))) == NULL) {
APPL_TRACE_ERROR("No resources to send report data");
return;
}
app_id);
if (p_buf != p_data->value) {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
if (p_rpt == NULL) {
APPL_TRACE_ERROR("bta_hh_le_write_rpt: no matching report");
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
return;
}
#include "bta_hh_api.h"
#include "bta_hh_int.h"
-#include "gki.h"
/*****************************************************************************
** Constants and types
cback_event = (p_data->api_sndcmd.t_type - BTA_HH_FST_BTE_TRANS_EVT) +
BTA_HH_FST_TRANS_CB_EVT;
if (p_data->api_sndcmd.p_data != NULL) {
- GKI_freebuf(p_data->api_sndcmd.p_data);
+ osi_free(p_data->api_sndcmd.p_data);
}
if (p_data->api_sndcmd.t_type == HID_TRANS_SET_PROTOCOL ||
p_data->api_sndcmd.t_type == HID_TRANS_SET_REPORT ||
APPL_TRACE_ERROR("wrong device handle: [%d]", p_data->hdr.layer_specific);
/* Free the callback buffer now */
if (p_data != NULL && p_data->hid_cback.p_data != NULL) {
- GKI_freebuf(p_data->hid_cback.p_data);
+ osi_free(p_data->hid_cback.p_data);
p_data->hid_cback.p_data = NULL;
}
break;
if (p_dscp_info->dl_len &&
(p_cb->dscp_info.descriptor.dsc_list =
- (UINT8 *)GKI_getbuf(p_dscp_info->dl_len)) != NULL) {
+ (UINT8 *)osi_malloc(p_dscp_info->dl_len)) != NULL) {
p_cb->dscp_info.descriptor.dl_len = p_dscp_info->dl_len;
memcpy(p_cb->dscp_info.descriptor.dsc_list, p_dscp_info->dsc_list,
p_dscp_info->dl_len);
** the audio codec
**
** Returns NULL if data is not ready.
-** Otherwise, a GKI buffer (BT_HDR*) containing the audio data.
+** Otherwise, a buffer (BT_HDR*) containing the audio data.
**
*******************************************************************************/
extern void *bta_av_co_audio_src_data_path(tBTA_AV_CODEC codec_type,
#include "bta_gatt_api.h"
#include "bta_gattc_ci.h"
#include "bta_gattc_co.h"
-#include "gki.h"
+#include "fixed_queue.h"
/*****************************************************************************
** Constants and data types
tBTA_GATTC_CACHE *p_srvc_cache;
tBTA_GATTC_CACHE *p_cur_srvc;
- BUFFER_Q cache_buffer; /* buffer queue used for storing the cache data */
+ fixed_queue_t *cache_buffer; /* buffer queue used for storing the cache data */
UINT8 *p_free; /* starting point to next available byte */
UINT16 free_byte; /* number of available bytes in server cache buffer */
UINT8 update_count; /* indication received */
#include "bta_gatt_api.h"
#include "gatt_api.h"
-#include "gki.h"
/*****************************************************************************
** Constants and data types
#define BTA_SYS_H
#include "bt_target.h"
-#include "gki.h"
+#include "bt_defs.h"
/*****************************************************************************
** Constants and data types
**
** Function utl_freebuf
**
-** Description This function calls GKI_freebuf to free the buffer passed
+** Description This function calls osi_free to free the buffer passed
** in, if buffer pointer is not NULL, and also initializes
** buffer pointer to NULL.
**
#include "bt_target.h"
#include "allocator.h"
#include "bt_types.h"
-#include "gki.h"
#include "utl.h"
#include "bta_sys.h"
#include "bta_api.h"
#include "bta_sys.h"
#include "bta_sdp_api.h"
#include "bta_sdp_int.h"
-#include "gki.h"
#include <string.h>
-// #include "port_api.h"
+#include "allocator.h"
#include "sdp_api.h"
#if defined(BTA_SDP_INCLUDED) && (BTA_SDP_INCLUDED == TRUE)
bta_sys_register(BTA_ID_SDP, &bta_sdp_reg);
if (p_cback &&
- (p_buf = (tBTA_SDP_API_ENABLE *) GKI_getbuf(sizeof(tBTA_SDP_API_ENABLE))) != NULL) {
+ (p_buf = (tBTA_SDP_API_ENABLE *) osi_malloc(sizeof(tBTA_SDP_API_ENABLE))) != NULL) {
p_buf->hdr.event = BTA_SDP_API_ENABLE_EVT;
p_buf->p_cback = p_cback;
bta_sys_sendmsg(p_buf);
tBTA_SDP_API_SEARCH *p_msg;
APPL_TRACE_API("%s\n", __FUNCTION__);
- if ((p_msg = (tBTA_SDP_API_SEARCH *)GKI_getbuf(sizeof(tBTA_SDP_API_SEARCH))) != NULL) {
+ if ((p_msg = (tBTA_SDP_API_SEARCH *)osi_malloc(sizeof(tBTA_SDP_API_SEARCH))) != NULL) {
p_msg->hdr.event = BTA_SDP_API_SEARCH_EVT;
bdcpy(p_msg->bd_addr, bd_addr);
//p_msg->uuid = uuid;
tBTA_SDP_API_RECORD_USER *p_msg;
APPL_TRACE_API("%s\n", __FUNCTION__);
- if ((p_msg = (tBTA_SDP_API_RECORD_USER *)GKI_getbuf(sizeof(tBTA_SDP_API_RECORD_USER))) != NULL) {
+ if ((p_msg = (tBTA_SDP_API_RECORD_USER *)osi_malloc(sizeof(tBTA_SDP_API_RECORD_USER))) != NULL) {
p_msg->hdr.event = BTA_SDP_API_CREATE_RECORD_USER_EVT;
p_msg->user_data = user_data;
bta_sys_sendmsg(p_msg);
tBTA_SDP_API_RECORD_USER *p_msg;
APPL_TRACE_API("%s\n", __FUNCTION__);
- if ((p_msg = (tBTA_SDP_API_RECORD_USER *)GKI_getbuf(sizeof(tBTA_SDP_API_RECORD_USER))) != NULL) {
+ if ((p_msg = (tBTA_SDP_API_RECORD_USER *)osi_malloc(sizeof(tBTA_SDP_API_RECORD_USER))) != NULL) {
p_msg->hdr.event = BTA_SDP_API_REMOVE_RECORD_USER_EVT;
p_msg->user_data = user_data;
bta_sys_sendmsg(p_msg);
******************************************************************************/
#include "bt_target.h"
-#include "gki.h"
#include "bta_api.h"
#include "bta_sdp_api.h"
/* union of all data types */
typedef union {
- /* GKI event buffer header */
+ /* event buffer header */
BT_HDR hdr;
tBTA_SDP_API_ENABLE enable;
tBTA_SDP_API_SEARCH get_search;
#include "bta_api.h"
#include "bta_sys.h"
#include "bta_sys_int.h"
-#include "gki.h"
#include "utl.h"
/*******************************************************************************
#include "bta_sys_int.h"
#include "fixed_queue.h"
-#include "gki.h"
#include "hash_map.h"
#include "osi.h"
#include "hash_functions.h"
-// #include "osi/include/log.h"
-// #include "osi/include/thread.h"
#if( defined BTA_AR_INCLUDED ) && (BTA_AR_INCLUDED == TRUE)
#include "bta_ar_api.h"
#endif
#include "utl.h"
+#include "allocator.h"
+#include "mutex.h"
/* system manager control block definition */
static hash_map_t *bta_alarm_hash_map;
static const size_t BTA_ALARM_HASH_MAP_SIZE = 17;
-static pthread_mutex_t bta_alarm_lock;
+static osi_mutex_t bta_alarm_lock;
// extern thread_t *bt_workqueue_thread;
/* trace level */
{
memset(&bta_sys_cb, 0, sizeof(tBTA_SYS_CB));
- pthread_mutex_init(&bta_alarm_lock, NULL);
+ osi_mutex_new(&bta_alarm_lock);
bta_alarm_hash_map = hash_map_new(BTA_ALARM_HASH_MAP_SIZE,
hash_function_pointer, NULL, (data_free_fn)osi_alarm_free, NULL);
void bta_sys_free(void)
{
hash_map_free(bta_alarm_hash_map);
- pthread_mutex_destroy(&bta_alarm_lock);
+ osi_mutex_free(&bta_alarm_lock);
}
/*******************************************************************************
APPL_TRACE_DEBUG(" bta_sys_hw_btm_cback was called with parameter: %i" , status );
/* send a message to BTA SYS */
- if ((sys_event = (tBTA_SYS_HW_MSG *) GKI_getbuf(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
+ if ((sys_event = (tBTA_SYS_HW_MSG *) osi_malloc(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
if (status == BTM_DEV_STATUS_UP) {
sys_event->hdr.event = BTA_SYS_EVT_STACK_ENABLED_EVT;
} else if (status == BTM_DEV_STATUS_DOWN) {
sys_event->hdr.event = BTA_SYS_ERROR_EVT;
} else {
/* BTM_DEV_STATUS_CMD_TOUT is ignored for now. */
- GKI_freebuf (sys_event);
+ osi_free (sys_event);
sys_event = NULL;
}
bta_sys_cb.sys_hw_module_active |= ((UINT32)1 << p_sys_hw_msg->hw_module );
tBTA_SYS_HW_MSG *p_msg;
- if ((p_msg = (tBTA_SYS_HW_MSG *) GKI_getbuf(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
+ if ((p_msg = (tBTA_SYS_HW_MSG *) osi_malloc(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
p_msg->hdr.event = BTA_SYS_EVT_ENABLED_EVT;
p_msg->hw_module = p_sys_hw_msg->hw_module;
bta_sys_cb.state = BTA_SYS_HW_STOPPING;
tBTA_SYS_HW_MSG *p_msg;
- if ((p_msg = (tBTA_SYS_HW_MSG *) GKI_getbuf(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
+ if ((p_msg = (tBTA_SYS_HW_MSG *) osi_malloc(sizeof(tBTA_SYS_HW_MSG))) != NULL) {
p_msg->hdr.event = BTA_SYS_EVT_DISABLED_EVT;
p_msg->hw_module = p_sys_hw_msg->hw_module;
}
if (freebuf) {
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
}
**
** Function bta_sys_sendmsg
**
-** Description Send a GKI message to BTA. This function is designed to
+** Description Send a message to BTA. This function is designed to
** optimize sending of messages to BTA. It is called by BTA
** API functions and call-in functions.
**
// message queue. This causes |btu_bta_msg_queue| to get cleaned up before
// it gets used here; hence we check for NULL before using it.
if (btu_task_post(SIG_BTU_BTA_MSG, p_msg, TASK_POST_BLOCKING) != TASK_POST_SUCCESS) {
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
}
assert(p_tle != NULL);
// Get the alarm for this p_tle.
- pthread_mutex_lock(&bta_alarm_lock);
+ osi_mutex_lock(&bta_alarm_lock, OSI_MUTEX_MAX_TIMEOUT);
if (!hash_map_has_key(bta_alarm_hash_map, p_tle)) {
hash_map_set(bta_alarm_hash_map, p_tle, osi_alarm_new("bta_sys", bta_alarm_cb, p_tle, 0));
}
- pthread_mutex_unlock(&bta_alarm_lock);
+ osi_mutex_unlock(&bta_alarm_lock);
osi_alarm_t *alarm = hash_map_get(bta_alarm_hash_map, p_tle);
if (alarm == NULL) {
UINT32 bta_sys_get_remaining_ticks(TIMER_LIST_ENT *p_target_tle)
{
period_ms_t remaining_ms = 0;
- pthread_mutex_lock(&bta_alarm_lock);
+ osi_mutex_lock(&bta_alarm_lock, OSI_MUTEX_MAX_TIMEOUT);
// Get the alarm for this p_tle
hash_map_foreach(bta_alarm_hash_map, hash_iter_ro_cb, &remaining_ms);
- pthread_mutex_unlock(&bta_alarm_lock);
+ osi_mutex_unlock(&bta_alarm_lock);
return remaining_ms;
}
******************************************************************************/
#include <stddef.h>
#include "utl.h"
-#include "gki.h"
#include "btm_api.h"
+#include "allocator.h"
/*******************************************************************************
**
**
** Function utl_freebuf
**
-** Description This function calls GKI_freebuf to free the buffer passed
+** Description This function calls osi_free to free the buffer passed
** in, if buffer pointer is not NULL, and also initializes
** buffer pointer to NULL.
**
void utl_freebuf(void **p)
{
if (*p != NULL) {
- GKI_freebuf(*p);
+ osi_free(*p);
*p = NULL;
}
}
#include "btc_util.h"
#include "config.h"
#include "osi.h"
+#include "mutex.h"
#include "bt_types.h"
return TRUE;
}
-static pthread_mutex_t lock; // protects operations on |config|.
+static osi_mutex_t lock; // protects operations on |config|.
static config_t *config;
bool btc_compare_address_key_value(const char *section, char *key_type, void *key_value, int key_length)
return false;
}
btc_key_value_to_string((uint8_t *)key_value, value_str, key_length);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
if ((status = config_has_key_in_section(config, key_type, value_str)) == true) {
config_remove_section(config, section);
}
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return status;
}
bool btc_config_init(void)
{
- pthread_mutex_init(&lock, NULL);
+ osi_mutex_new(&lock);
config = config_new(CONFIG_FILE_PATH);
if (!config) {
LOG_WARN("%s unable to load config file; starting unconfigured.\n", __func__);
error:;
config_free(config);
- pthread_mutex_destroy(&lock);
+ osi_mutex_free(&lock);
config = NULL;
LOG_ERROR("%s failed\n", __func__);
return false;
btc_config_flush();
config_free(config);
- pthread_mutex_destroy(&lock);
+ osi_mutex_free(&lock);
config = NULL;
return true;
}
assert(config != NULL);
assert(section != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
bool ret = config_has_section(config, section);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return ret;
}
assert(section != NULL);
assert(key != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
bool ret = config_has_key(config, section, key);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return ret;
}
assert(key != NULL);
assert(value != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
bool ret = config_has_key(config, section, key);
if (ret) {
*value = config_get_int(config, section, key, *value);
}
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return ret;
}
assert(section != NULL);
assert(key != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
config_set_int(config, section, key, value);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return true;
}
assert(value != NULL);
assert(size_bytes != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
const char *stored_value = config_get_string(config, section, key, NULL);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
if (!stored_value) {
return false;
assert(key != NULL);
assert(value != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
config_set_string(config, section, key, value, false);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return true;
}
assert(value != NULL);
assert(length != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
const char *value_str = config_get_string(config, section, key, NULL);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
if (!value_str) {
return false;
assert(section != NULL);
assert(key != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
const char *value_str = config_get_string(config, section, key, NULL);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
if (!value_str) {
return 0;
str[(i * 2) + 1] = lookup[value[i] & 0x0F];
}
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
config_set_string(config, section, key, str, false);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
osi_free(str);
return true;
assert(section != NULL);
assert(key != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
bool ret = config_remove_key(config, section, key);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return ret;
}
assert(config != NULL);
assert(section != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
bool ret = config_remove_section(config, section);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return ret;
}
size_t num_keys = 0;
size_t total_candidates = 0;
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
for (const config_section_node_t *snode = config_section_begin(config); snode != config_section_end(config); snode = config_section_next(snode)) {
const char *section = config_section_name(snode);
if (!string_is_bdaddr(section)) {
config_remove_section(config, keys[--num_keys]);
}
config_save(config, CONFIG_FILE_PATH);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
}
void btc_config_flush(void)
{
assert(config != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
config_save(config, CONFIG_FILE_PATH);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
}
int btc_config_clear(void)
assert(config != NULL);
- pthread_mutex_lock(&lock);
+ osi_mutex_lock(&lock, OSI_MUTEX_MAX_TIMEOUT);
config_free(config);
config = config_new_empty();
if (config == NULL) {
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return false;
}
int ret = config_save(config, CONFIG_FILE_PATH);
- pthread_mutex_unlock(&lock);
+ osi_mutex_unlock(&lock);
return ret;
}
#include "btm_int.h"
#include "bta_api.h"
#include "bta_gatt_api.h"
+#include "allocator.h"
/******************************************************************************
#include "btc_task.h"
#include "bt_trace.h"
#include "thread.h"
-#include "gki.h"
#include "esp_bt_defs.h"
#include "esp_gatt_defs.h"
#include "bt_trace.h"
#include "bt_defs.h"
#include "btc_profile_queue.h"
-#include "gki.h"
#include "list.h"
#include "allocator.h"
#include "allocator.h"
#include "btc_common.h"
#include "btc_sm.h"
-#include "gki.h"
#if BTC_SM_INCLUDED
/*****************************************************************************
#include "btc_task.h"
#include "bt_trace.h"
#include "thread.h"
-#include "gki.h"
#include "bt_defs.h"
+#include "allocator.h"
#include "btc_main.h"
#include "btc_dev.h"
#include "btc_gatts.h"
break;
}
if (msg.arg) {
- GKI_freebuf(msg.arg);
+ osi_free(msg.arg);
}
}
}
memcpy(&lmsg, msg, sizeof(btc_msg_t));
if (arg) {
- lmsg.arg = (void *)GKI_getbuf(arg_len);
+ lmsg.arg = (void *)osi_malloc(arg_len);
if (lmsg.arg == NULL) {
return BT_STATUS_NOMEM;
}
#include "bt_target.h"
#include "bt_trace.h"
+#include "allocator.h"
#include "bt_types.h"
#include "gatt_api.h"
#include "bta_api.h"
tBTA_GATT_STATUS status = GATT_SUCCESS;
if (blufi_env.prepare_buf == NULL) {
- blufi_env.prepare_buf = GKI_getbuf(BLUFI_PREPAIR_BUF_MAX_SIZE);
+ blufi_env.prepare_buf = osi_malloc(BLUFI_PREPAIR_BUF_MAX_SIZE);
if (blufi_env.prepare_buf == NULL) {
LOG_ERROR("Blufi prep no mem\n");
status = GATT_NO_RESOURCES;
}
if (blufi_env.prepare_buf) {
- GKI_freebuf(blufi_env.prepare_buf);
+ osi_free(blufi_env.prepare_buf);
blufi_env.prepare_buf = NULL;
}
if (BLUFI_FC_IS_FRAG(hdr->fc)) {
if (blufi_env.offset == 0) {
blufi_env.total_len = hdr->data[0] | (((uint16_t) hdr->data[1]) << 8);
- blufi_env.aggr_buf = GKI_getbuf(blufi_env.total_len);
+ blufi_env.aggr_buf = osi_malloc(blufi_env.total_len);
if (blufi_env.aggr_buf == NULL) {
LOG_ERROR("%s no mem, len %d\n", __func__, blufi_env.total_len);
return;
btc_blufi_protocol_handler(hdr->type, blufi_env.aggr_buf, blufi_env.total_len);
blufi_env.offset = 0;
- GKI_freebuf(blufi_env.aggr_buf);
+ osi_free(blufi_env.aggr_buf);
blufi_env.aggr_buf = NULL;
} else {
btc_blufi_protocol_handler(hdr->type, hdr->data, hdr->data_len);
while (remain_len > 0) {
if (remain_len > blufi_env.frag_size) {
- hdr = GKI_getbuf(sizeof(struct blufi_hdr) + 2 + blufi_env.frag_size + 2);
+ hdr = osi_malloc(sizeof(struct blufi_hdr) + 2 + blufi_env.frag_size + 2);
if (hdr == NULL) {
LOG_ERROR("%s no mem\n", __func__);
return;
memcpy(hdr->data + 2, &data[total_data_len - remain_len], blufi_env.frag_size); //copy first, easy for check sum
hdr->fc |= BLUFI_FC_FRAG;
} else {
- hdr = GKI_getbuf(sizeof(struct blufi_hdr) + remain_len + 2);
+ hdr = osi_malloc(sizeof(struct blufi_hdr) + remain_len + 2);
if (hdr == NULL) {
LOG_ERROR("%s no mem\n", __func__);
return;
hdr->fc |= BLUFI_FC_ENC;
} else {
LOG_ERROR("%s encrypt error %d\n", __func__, ret);
- GKI_freebuf(hdr);
+ osi_free(hdr);
return;
}
}
hdr->data_len + sizeof(struct blufi_hdr) + 2 :
hdr->data_len + sizeof(struct blufi_hdr)));
- GKI_freebuf(hdr);
+ osi_free(hdr);
hdr = NULL;
}
}
uint8_t *p;
data_len = info_len + 3;
- p = data = GKI_getbuf(data_len);
+ p = data = osi_malloc(data_len);
if (data == NULL) {
return;
}
}
btc_blufi_send_encap(type, data, data_len);
- GKI_freebuf(data);
+ osi_free(data);
}
static void btc_blufi_send_ack(uint8_t seq)
switch (msg->act) {
case ESP_BLUFI_EVENT_RECV_STA_SSID:
- dst->sta_ssid.ssid = GKI_getbuf(src->sta_ssid.ssid_len);
+ dst->sta_ssid.ssid = osi_malloc(src->sta_ssid.ssid_len);
if (dst->sta_ssid.ssid == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->sta_ssid.ssid, src->sta_ssid.ssid, src->sta_ssid.ssid_len);
break;
case ESP_BLUFI_EVENT_RECV_STA_PASSWD:
- dst->sta_passwd.passwd = GKI_getbuf(src->sta_passwd.passwd_len);
+ dst->sta_passwd.passwd = osi_malloc(src->sta_passwd.passwd_len);
if (dst->sta_passwd.passwd == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->sta_passwd.passwd, src->sta_passwd.passwd, src->sta_passwd.passwd_len);
break;
case ESP_BLUFI_EVENT_RECV_SOFTAP_SSID:
- dst->softap_ssid.ssid = GKI_getbuf(src->softap_ssid.ssid_len);
+ dst->softap_ssid.ssid = osi_malloc(src->softap_ssid.ssid_len);
if (dst->softap_ssid.ssid == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->softap_ssid.ssid, src->softap_ssid.ssid, src->softap_ssid.ssid_len);
break;
case ESP_BLUFI_EVENT_RECV_SOFTAP_PASSWD:
- dst->softap_passwd.passwd = GKI_getbuf(src->softap_passwd.passwd_len);
+ dst->softap_passwd.passwd = osi_malloc(src->softap_passwd.passwd_len);
if (dst->softap_passwd.passwd == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->softap_passwd.passwd, src->softap_passwd.passwd, src->softap_passwd.passwd_len);
break;
case ESP_BLUFI_EVENT_RECV_USERNAME:
- dst->username.name = GKI_getbuf(src->username.name_len);
+ dst->username.name = osi_malloc(src->username.name_len);
if (dst->username.name == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->username.name, src->username.name, src->username.name_len);
break;
case ESP_BLUFI_EVENT_RECV_CA_CERT:
- dst->ca.cert = GKI_getbuf(src->ca.cert_len);
+ dst->ca.cert = osi_malloc(src->ca.cert_len);
if (dst->ca.cert == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->ca.cert, src->ca.cert, src->ca.cert_len);
break;
case ESP_BLUFI_EVENT_RECV_CLIENT_CERT:
- dst->client_cert.cert = GKI_getbuf(src->client_cert.cert_len);
+ dst->client_cert.cert = osi_malloc(src->client_cert.cert_len);
if (dst->client_cert.cert == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->client_cert.cert, src->client_cert.cert, src->client_cert.cert_len);
break;
case ESP_BLUFI_EVENT_RECV_SERVER_CERT:
- dst->server_cert.cert = GKI_getbuf(src->server_cert.cert_len);
+ dst->server_cert.cert = osi_malloc(src->server_cert.cert_len);
if (dst->server_cert.cert == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->server_cert.cert, src->server_cert.cert, src->server_cert.cert_len);
break;
case ESP_BLUFI_EVENT_RECV_CLIENT_PRIV_KEY:
- dst->client_pkey.pkey = GKI_getbuf(src->client_pkey.pkey_len);
+ dst->client_pkey.pkey = osi_malloc(src->client_pkey.pkey_len);
if (dst->client_pkey.pkey == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
memcpy(dst->client_pkey.pkey, src->client_pkey.pkey, src->client_pkey.pkey_len);
break;
case ESP_BLUFI_EVENT_RECV_SERVER_PRIV_KEY:
- dst->server_pkey.pkey = GKI_getbuf(src->server_pkey.pkey_len);
+ dst->server_pkey.pkey = osi_malloc(src->server_pkey.pkey_len);
if (dst->server_pkey.pkey == NULL) {
LOG_ERROR("%s %d no mem\n", __func__, msg->act);
}
switch (msg->act) {
case ESP_BLUFI_EVENT_RECV_STA_SSID:
- GKI_freebuf(param->sta_ssid.ssid);
+ osi_free(param->sta_ssid.ssid);
break;
case ESP_BLUFI_EVENT_RECV_STA_PASSWD:
- GKI_freebuf(param->sta_passwd.passwd);
+ osi_free(param->sta_passwd.passwd);
break;
case ESP_BLUFI_EVENT_RECV_SOFTAP_SSID:
- GKI_freebuf(param->softap_ssid.ssid);
+ osi_free(param->softap_ssid.ssid);
break;
case ESP_BLUFI_EVENT_RECV_SOFTAP_PASSWD:
- GKI_freebuf(param->softap_passwd.passwd);
+ osi_free(param->softap_passwd.passwd);
break;
case ESP_BLUFI_EVENT_RECV_USERNAME:
- GKI_freebuf(param->username.name);
+ osi_free(param->username.name);
break;
case ESP_BLUFI_EVENT_RECV_CA_CERT:
- GKI_freebuf(param->ca.cert);
+ osi_free(param->ca.cert);
break;
case ESP_BLUFI_EVENT_RECV_CLIENT_CERT:
- GKI_freebuf(param->client_cert.cert);
+ osi_free(param->client_cert.cert);
break;
case ESP_BLUFI_EVENT_RECV_SERVER_CERT:
- GKI_freebuf(param->server_cert.cert);
+ osi_free(param->server_cert.cert);
break;
case ESP_BLUFI_EVENT_RECV_CLIENT_PRIV_KEY:
- GKI_freebuf(param->client_pkey.pkey);
+ osi_free(param->client_pkey.pkey);
break;
case ESP_BLUFI_EVENT_RECV_SERVER_PRIV_KEY:
- GKI_freebuf(param->server_pkey.pkey);
+ osi_free(param->server_pkey.pkey);
break;
default:
break;
return;
}
- dst->wifi_conn_report.extra_info = GKI_getbuf(sizeof(esp_blufi_extra_info_t));
+ dst->wifi_conn_report.extra_info = osi_malloc(sizeof(esp_blufi_extra_info_t));
if (dst->wifi_conn_report.extra_info == NULL) {
return;
}
dst->wifi_conn_report.extra_info_len += (6 + 2);
}
if (src_info->sta_ssid) {
- dst->wifi_conn_report.extra_info->sta_ssid = GKI_getbuf(src_info->sta_ssid_len);
+ dst->wifi_conn_report.extra_info->sta_ssid = osi_malloc(src_info->sta_ssid_len);
if (dst->wifi_conn_report.extra_info->sta_ssid) {
memcpy(dst->wifi_conn_report.extra_info->sta_ssid, src_info->sta_ssid, src_info->sta_ssid_len);
dst->wifi_conn_report.extra_info->sta_ssid_len = src_info->sta_ssid_len;
}
}
if (src_info->sta_passwd) {
- dst->wifi_conn_report.extra_info->sta_passwd = GKI_getbuf(src_info->sta_passwd_len);
+ dst->wifi_conn_report.extra_info->sta_passwd = osi_malloc(src_info->sta_passwd_len);
if (dst->wifi_conn_report.extra_info->sta_passwd) {
memcpy(dst->wifi_conn_report.extra_info->sta_passwd, src_info->sta_passwd, src_info->sta_passwd_len);
dst->wifi_conn_report.extra_info->sta_passwd_len = src_info->sta_passwd_len;
}
}
if (src_info->softap_ssid) {
- dst->wifi_conn_report.extra_info->softap_ssid = GKI_getbuf(src_info->softap_ssid_len);
+ dst->wifi_conn_report.extra_info->softap_ssid = osi_malloc(src_info->softap_ssid_len);
if (dst->wifi_conn_report.extra_info->softap_ssid) {
memcpy(dst->wifi_conn_report.extra_info->softap_ssid, src_info->softap_ssid, src_info->softap_ssid_len);
dst->wifi_conn_report.extra_info->softap_ssid_len = src_info->softap_ssid_len;
}
}
if (src_info->softap_passwd) {
- dst->wifi_conn_report.extra_info->softap_passwd = GKI_getbuf(src_info->softap_passwd_len);
+ dst->wifi_conn_report.extra_info->softap_passwd = osi_malloc(src_info->softap_passwd_len);
if (dst->wifi_conn_report.extra_info->softap_passwd) {
memcpy(dst->wifi_conn_report.extra_info->softap_passwd, src_info->softap_passwd, src_info->softap_passwd_len);
dst->wifi_conn_report.extra_info->softap_passwd_len = src_info->softap_passwd_len;
return;
}
if (info->sta_ssid) {
- GKI_freebuf(info->sta_ssid);
+ osi_free(info->sta_ssid);
}
if (info->sta_passwd) {
- GKI_freebuf(info->sta_passwd);
+ osi_free(info->sta_passwd);
}
if (info->softap_ssid) {
- GKI_freebuf(info->softap_ssid);
+ osi_free(info->softap_ssid);
}
if (info->softap_passwd) {
- GKI_freebuf(info->softap_passwd);
+ osi_free(info->softap_passwd);
}
- GKI_freebuf(info);
+ osi_free(info);
break;
}
default:
#include "btc_media.h"
#include "btc_av_co.h"
#include "btc_util.h"
+#include "mutex.h"
#if BTC_AV_INCLUDED
APPL_TRACE_DEBUG("bta_av_audio_sink_getconfig last SRC reached");
/* Protect access to bta_av_co_cb.codec_cfg */
- GKI_disable();
+ osi_mutex_global_lock();
/* Find a src that matches the codec config */
if (bta_av_co_audio_peer_src_supports_codec(p_peer, &index)) {
}
}
/* Protect access to bta_av_co_cb.codec_cfg */
- GKI_enable();
+ osi_mutex_global_unlock();
}
return result;
}
APPL_TRACE_DEBUG("bta_av_co_audio_getconfig last sink reached");
/* Protect access to bta_av_co_cb.codec_cfg */
- GKI_disable();
+ osi_mutex_global_lock();
/* Find a sink that matches the codec config */
if (bta_av_co_audio_peer_supports_codec(p_peer, &index)) {
}
}
/* Protect access to bta_av_co_cb.codec_cfg */
- GKI_enable();
+ osi_mutex_global_unlock();
}
return result;
}
if (codec_cfg_supported) {
/* Protect access to bta_av_co_cb.codec_cfg */
- GKI_disable();
+ osi_mutex_global_lock();
/* Check if the configuration matches the current codec config */
switch (bta_av_co_cb.codec_cfg.id) {
break;
}
/* Protect access to bta_av_co_cb.codec_cfg */
- GKI_enable();
+ osi_mutex_global_unlock();
} else {
category = AVDT_ASC_CODEC;
status = A2D_WRONG_CODEC;
** Description This function is called to manage data transfer from
** the audio codec to AVDTP.
**
- ** Returns Pointer to the GKI buffer to send, NULL if no buffer to send
+ ** Returns Pointer to the buffer to send, NULL if no buffer to send
**
*******************************************************************************/
void *bta_av_co_audio_src_data_path(tBTA_AV_CODEC codec_type, UINT32 *p_len,
*******************************************************************************/
void bta_av_co_audio_codec_reset(void)
{
- GKI_disable();
+ osi_mutex_global_lock();
FUNC_TRACE();
/* Reset the current configuration to SBC */
APPL_TRACE_ERROR("bta_av_co_audio_codec_reset A2D_BldSbcInfo failed");
}
- GKI_enable();
+ osi_mutex_global_unlock();
}
/*******************************************************************************
/* Minimum MTU is by default very large */
*p_minmtu = 0xFFFF;
- GKI_disable();
+ osi_mutex_global_lock();
if (bta_av_co_cb.codec_cfg.id == BTC_AV_CODEC_SBC) {
if (A2D_ParsSbcInfo(p_sbc_config, bta_av_co_cb.codec_cfg.info, FALSE) == A2D_SUCCESS) {
for (index = 0; index < BTA_AV_CO_NUM_ELEMENTS(bta_av_co_cb.peers); index++) {
/* Not SBC, still return the default values */
*p_sbc_config = btc_av_sbc_default_config;
}
- GKI_enable();
+ osi_mutex_global_unlock();
return result;
}
#include "bta_api.h"
#include "btc_media.h"
#include "bta_av_api.h"
-#include "gki.h"
#include "btu.h"
#include "bt_utils.h"
#include "btc_common.h"
#include <stdio.h>
#include <stdint.h>
#include "fixed_queue.h"
-#include "gki.h"
#include "bta_api.h"
#include "btu.h"
#include "bta_sys.h"
#include "allocator.h"
#include "bt_utils.h"
#include "esp_a2dp_api.h"
+#include "mutex.h"
// #if (BTA_AV_SINK_INCLUDED == TRUE)
#include "oi_codec_sbc.h"
} tBT_SBC_HDR;
typedef struct {
- BUFFER_Q RxSbcQ;
+ fixed_queue_t *RxSbcQ;
void *av_sm_hdl;
UINT8 peer_sep;
UINT8 busy_level;
OI_BOOL enhanced);
// #endif
-static void btc_media_flush_q(BUFFER_Q *p_q);
+static void btc_media_flush_q(fixed_queue_t *p_q);
static void btc_media_task_aa_rx_flush(void);
static const char *dump_media_event(UINT16 event);
static void btc_media_thread_handle_cmd(fixed_queue_t *queue);
APPL_TRACE_EVENT("## A2DP SETUP CODEC ##\n");
- GKI_disable();
+ osi_mutex_global_lock();
/* for now hardcode 44.1 khz 16 bit stereo PCM format */
media_feeding.cfg.pcm.sampling_freq = 44100;
bta_av_co_audio_set_codec(&media_feeding, &status);
- GKI_enable();
+
+ osi_mutex_global_unlock();
}
/*****************************************************************************
{
BT_HDR *p_buf;
- if (NULL == (p_buf = GKI_getbuf(sizeof(BT_HDR)))) {
+ if (NULL == (p_buf = osi_malloc(sizeof(BT_HDR)))) {
return FALSE;
}
p_av[4], p_av[5], p_av[6]);
tBTC_MEDIA_SINK_CFG_UPDATE *p_buf;
- if (NULL == (p_buf = GKI_getbuf(sizeof(tBTC_MEDIA_SINK_CFG_UPDATE)))) {
+ if (NULL == (p_buf = osi_malloc(sizeof(tBTC_MEDIA_SINK_CFG_UPDATE)))) {
APPL_TRACE_ERROR("btc_reset_decoder No Buffer ");
return;
}
static void btc_media_task_avk_data_ready(UNUSED_ATTR void *context)
{
- UINT8 count;
tBT_SBC_HDR *p_msg;
- count = btc_media_cb.RxSbcQ._count;
- if (0 == count) {
+ if (fixed_queue_is_empty(btc_media_cb.RxSbcQ)) {
APPL_TRACE_DEBUG(" QUE EMPTY ");
} else {
if (btc_media_cb.rx_flush == TRUE) {
- btc_media_flush_q(&(btc_media_cb.RxSbcQ));
+ btc_media_flush_q(btc_media_cb.RxSbcQ);
return;
}
- while ((p_msg = (tBT_SBC_HDR *)GKI_getfirst(&(btc_media_cb.RxSbcQ))) != NULL ) {
+ while ((p_msg = (tBT_SBC_HDR *)fixed_queue_try_peek_first(btc_media_cb.RxSbcQ)) != NULL ) {
btc_media_task_handle_inc_media(p_msg);
- p_msg = GKI_dequeue(&(btc_media_cb.RxSbcQ));
+ p_msg = (tBT_SBC_HDR *)fixed_queue_try_dequeue(btc_media_cb.RxSbcQ);
if ( p_msg == NULL ) {
APPL_TRACE_ERROR("Insufficient data in que ");
break;
}
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
APPL_TRACE_DEBUG(" Process Frames - ");
}
btc_media_cb.av_sm_hdl = btc_av_get_sm_handle();
raise_priority_a2dp(TASK_HIGH_MEDIA);
media_task_running = MEDIA_TASK_STATE_ON;
+
+ btc_media_cb.RxSbcQ = fixed_queue_new(SIZE_MAX);
}
static void btc_media_thread_cleanup(UNUSED_ATTR void *context)
btc_media_cb.data_channel_open = FALSE;
/* Clear media task flag */
media_task_running = MEDIA_TASK_STATE_OFF;
+
+ fixed_queue_free(btc_media_cb.RxSbcQ, osi_free_func);
}
/*******************************************************************************
** Returns void
**
*******************************************************************************/
-static void btc_media_flush_q(BUFFER_Q *p_q)
+static void btc_media_flush_q(fixed_queue_t *p_q)
{
- while (!GKI_queue_is_empty(p_q)) {
- GKI_freebuf(GKI_dequeue(p_q));
+ while (! fixed_queue_is_empty(p_q)) {
+ osi_free(fixed_queue_try_dequeue(p_q));
}
}
default:
APPL_TRACE_ERROR("ERROR in %s unknown event %d\n", __func__, p_msg->event);
}
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
APPL_TRACE_VERBOSE("%s: %s DONE\n", __func__, dump_media_event(p_msg->event));
}
}
{
BT_HDR *p_buf;
- if (GKI_queue_is_empty(&(btc_media_cb.RxSbcQ)) == TRUE) { /* Que is already empty */
+ if (fixed_queue_is_empty(btc_media_cb.RxSbcQ) == TRUE) { /* Que is already empty */
return TRUE;
}
- if (NULL == (p_buf = GKI_getbuf(sizeof(BT_HDR)))) {
+ if (NULL == (p_buf = osi_malloc(sizeof(BT_HDR)))) {
return FALSE;
}
*******************************************************************************/
static void btc_media_task_aa_rx_flush(void)
{
- /* Flush all enqueued GKI SBC buffers (encoded) */
+ /* Flush all enqueued SBC buffers (encoded) */
APPL_TRACE_DEBUG("btc_media_task_aa_rx_flush");
- btc_media_flush_q(&(btc_media_cb.RxSbcQ));
+ btc_media_flush_q(btc_media_cb.RxSbcQ);
}
int btc_a2dp_get_track_frequency(UINT8 frequency)
tBT_SBC_HDR *p_msg;
if (btc_media_cb.rx_flush == TRUE) { /* Flush enabled, do not enque*/
- return GKI_queue_length(&btc_media_cb.RxSbcQ);
+ return fixed_queue_length(btc_media_cb.RxSbcQ);
}
- if (GKI_queue_length(&btc_media_cb.RxSbcQ) >= MAX_OUTPUT_A2DP_FRAME_QUEUE_SZ) {
+ if (fixed_queue_length(btc_media_cb.RxSbcQ) >= MAX_OUTPUT_A2DP_FRAME_QUEUE_SZ) {
APPL_TRACE_WARNING("Pkt dropped\n");
- return GKI_queue_length(&btc_media_cb.RxSbcQ);
+ return fixed_queue_length(btc_media_cb.RxSbcQ);
}
APPL_TRACE_DEBUG("btc_media_sink_enque_buf + ");
/* allocate and Queue this buffer */
- if ((p_msg = (tBT_SBC_HDR *) GKI_getbuf(sizeof(tBT_SBC_HDR) +
+ if ((p_msg = (tBT_SBC_HDR *) osi_malloc(sizeof(tBT_SBC_HDR) +
p_pkt->offset + p_pkt->len)) != NULL) {
memcpy(p_msg, p_pkt, (sizeof(BT_HDR) + p_pkt->offset + p_pkt->len));
p_msg->num_frames_to_be_processed = (*((UINT8 *)(p_msg + 1) + p_msg->offset)) & 0x0f;
APPL_TRACE_VERBOSE("btc_media_sink_enque_buf %d + \n", p_msg->num_frames_to_be_processed);
- GKI_enqueue(&(btc_media_cb.RxSbcQ), p_msg);
+ fixed_queue_enqueue(btc_media_cb.RxSbcQ, p_msg);
btc_media_data_post();
} else {
/* let caller deal with a failed allocation */
APPL_TRACE_WARNING("btc_media_sink_enque_buf No Buffer left - ");
}
- return GKI_queue_length(&btc_media_cb.RxSbcQ);
+ return fixed_queue_length(btc_media_cb.RxSbcQ);
}
/*******************************************************************************
#include "bta_api.h"
#include "bta_av_api.h"
#include "avrc_defs.h"
-#include "gki.h"
#include "btc_common.h"
#include "btc_util.h"
#include "btc_av.h"
#include "btc_avrc.h"
#include "btc_manage.h"
#include "esp_avrc_api.h"
+#include "mutex.h"
+
#if BTC_AV_INCLUDED
/*****************************************************************************
} rc_transaction_t;
typedef struct {
- pthread_mutex_t lbllock;
+ osi_mutex_t lbllock;
rc_transaction_t transaction[MAX_TRANSACTIONS_PER_SESSION];
} rc_device_t;
while (dis_attr_bit && i < (DIS_MAX_CHAR_NUM - 1 )) {
if (dis_attr_bit & (UINT16)(1 << i)) {
if (dis_cb.dis_value.data_string[i - 1] != NULL) {
- GKI_freebuf(dis_cb.dis_value.data_string[i - 1]);
+ osi_free(dis_cb.dis_value.data_string[i - 1]);
}
/* coverity[OVERRUN-STATIC] False-positive : when i = 8, (1 << i) == DIS_ATTR_PNP_ID_BIT, and it will never come down here
CID 49902: Out-of-bounds read (OVERRUN_STATIC)
Overrunning static array "dis_cb.dis_value.data_string", with 7 elements, at position 7 with index variable "i".
*/
- if ((dis_cb.dis_value.data_string[i - 1] = (UINT8 *)GKI_getbuf((UINT16)(p_info->data_str.len + 1))) != NULL) {
+ if ((dis_cb.dis_value.data_string[i - 1] = (UINT8 *)osi_malloc((UINT16)(p_info->data_str.len + 1))) != NULL) {
memcpy(dis_cb.dis_value.data_string[i - 1], p_info->data_str.p_data, p_info->data_str.len);
dis_cb.dis_value.data_string[i - 1][p_info->data_str.len] = 0; /* make sure null terminate */
#include <string.h>
+#include "allocator.h"
#include "bt_types.h"
#include "bt_defs.h"
#include "bta_api.h"
if (NULL == *buf) {
return;
}
- GKI_freebuf(*buf);
+ osi_free(*buf);
*buf = NULL;
}
}
if (p_adv_data->manufacturer_len > 0 && p_adv_data->p_manufacturer_data != NULL) {
- bta_adv_data->p_manu = GKI_getbuf(sizeof(tBTA_BLE_MANU));
+ bta_adv_data->p_manu = osi_malloc(sizeof(tBTA_BLE_MANU));
if (bta_adv_data->p_manu != NULL) {
- bta_adv_data->p_manu->p_val = GKI_getbuf(p_adv_data->manufacturer_len);
+ bta_adv_data->p_manu->p_val = osi_malloc(p_adv_data->manufacturer_len);
if (bta_adv_data->p_manu->p_val != NULL) {
mask |= BTM_BLE_AD_BIT_MANU;
bta_adv_data->p_manu->len = p_adv_data->manufacturer_len;
tBTA_BLE_PROP_ELEM *p_elem_service_data = NULL;
if (p_adv_data->service_data_len > 0 && p_adv_data->p_service_data != NULL) {
- p_elem_service_data = GKI_getbuf(sizeof(tBTA_BLE_PROP_ELEM));
+ p_elem_service_data = osi_malloc(sizeof(tBTA_BLE_PROP_ELEM));
if (p_elem_service_data != NULL) {
- p_elem_service_data->p_val = GKI_getbuf(p_adv_data->service_data_len);
+ p_elem_service_data->p_val = osi_malloc(p_adv_data->service_data_len);
if (p_elem_service_data->p_val != NULL) {
p_elem_service_data->adv_type = BTM_BLE_AD_TYPE_SERVICE_DATA;
p_elem_service_data->len = p_adv_data->service_data_len;
memcpy(p_elem_service_data->p_val, p_adv_data->p_service_data,
p_adv_data->service_data_len);
} else {
- GKI_freebuf(p_elem_service_data);
+ osi_free(p_elem_service_data);
p_elem_service_data = NULL;
}
}
}
if (NULL != p_elem_service_data) {
- bta_adv_data->p_proprietary = GKI_getbuf(sizeof(tBTA_BLE_PROPRIETARY));
+ bta_adv_data->p_proprietary = osi_malloc(sizeof(tBTA_BLE_PROPRIETARY));
if (NULL != bta_adv_data->p_proprietary) {
tBTA_BLE_PROP_ELEM *p_elem = NULL;
tBTA_BLE_PROPRIETARY *p_prop = bta_adv_data->p_proprietary;
p_prop->num_elem = 0;
mask |= BTM_BLE_AD_BIT_PROPRIETARY;
p_prop->num_elem = 1;
- p_prop->p_elem = GKI_getbuf(sizeof(tBTA_BLE_PROP_ELEM) * p_prop->num_elem);
+ p_prop->p_elem = osi_malloc(sizeof(tBTA_BLE_PROP_ELEM) * p_prop->num_elem);
p_elem = p_prop->p_elem;
if (NULL != p_elem) {
memcpy(p_elem++, p_elem_service_data, sizeof(tBTA_BLE_PROP_ELEM));
}
- GKI_freebuf(p_elem_service_data);
+ osi_free(p_elem_service_data);
}
}
switch (bt_uuid.len) {
case (LEN_UUID_16): {
if (NULL == bta_adv_data->p_services) {
- bta_adv_data->p_services = GKI_getbuf(sizeof(tBTA_BLE_SERVICE));
+ bta_adv_data->p_services = osi_malloc(sizeof(tBTA_BLE_SERVICE));
bta_adv_data->p_services->list_cmpl = FALSE;
bta_adv_data->p_services->num_service = 0;
- bta_adv_data->p_services->p_uuid = GKI_getbuf(p_adv_data->service_uuid_len / LEN_UUID_128 * LEN_UUID_16);
+ bta_adv_data->p_services->p_uuid = osi_malloc(p_adv_data->service_uuid_len / LEN_UUID_128 * LEN_UUID_16);
p_uuid_out16 = bta_adv_data->p_services->p_uuid;
}
case (LEN_UUID_32): {
if (NULL == bta_adv_data->p_service_32b) {
bta_adv_data->p_service_32b =
- GKI_getbuf(sizeof(tBTA_BLE_32SERVICE));
+ osi_malloc(sizeof(tBTA_BLE_32SERVICE));
bta_adv_data->p_service_32b->list_cmpl = FALSE;
bta_adv_data->p_service_32b->num_service = 0;
bta_adv_data->p_service_32b->p_uuid =
- GKI_getbuf(p_adv_data->service_uuid_len / LEN_UUID_128 * LEN_UUID_32);
+ osi_malloc(p_adv_data->service_uuid_len / LEN_UUID_128 * LEN_UUID_32);
p_uuid_out32 = bta_adv_data->p_service_32b->p_uuid;
}
/* Currently, only one 128-bit UUID is supported */
if (NULL == bta_adv_data->p_services_128b) {
bta_adv_data->p_services_128b =
- GKI_getbuf(sizeof(tBTA_BLE_128SERVICE));
+ osi_malloc(sizeof(tBTA_BLE_128SERVICE));
if (NULL != bta_adv_data->p_services_128b) {
LOG_ERROR("%s - In 128-UUID_data", __FUNCTION__);
mask |= BTM_BLE_AD_BIT_SERVICE_128;
esp_ble_bond_dev_t *bond_dev;
btc_msg_t msg;
int num_dev = btc_storage_get_num_ble_bond_devices();
- bond_dev = GKI_getbuf(sizeof(esp_ble_bond_dev_t)*num_dev);
+ bond_dev = (esp_ble_bond_dev_t *)osi_malloc(sizeof(esp_ble_bond_dev_t)*num_dev);
param.get_bond_dev_cmpl.status = btc_get_bonded_ble_devices_list(bond_dev);
param.get_bond_dev_cmpl.dev_num = num_dev;
LOG_ERROR("%s btc_transfer_context failed", __func__);
}
// release the buffer after used.
- GKI_freebuf((void *)bond_dev);
+ osi_free(bond_dev);
}
static void btc_ble_config_local_privacy(bool privacy_enable, tBTA_SET_LOCAL_PRIVACY_CBACK *set_local_privacy_cback)
{
btc_ble_gap_args_t *dst = (btc_ble_gap_args_t *) p_dest;
if (src->cfg_adv_data.adv_data.p_manufacturer_data) {
- dst->cfg_adv_data.adv_data.p_manufacturer_data = GKI_getbuf(src->cfg_adv_data.adv_data.manufacturer_len);
+ dst->cfg_adv_data.adv_data.p_manufacturer_data = osi_malloc(src->cfg_adv_data.adv_data.manufacturer_len);
memcpy(dst->cfg_adv_data.adv_data.p_manufacturer_data, src->cfg_adv_data.adv_data.p_manufacturer_data,
src->cfg_adv_data.adv_data.manufacturer_len);
}
if (src->cfg_adv_data.adv_data.p_service_data) {
- dst->cfg_adv_data.adv_data.p_service_data = GKI_getbuf(src->cfg_adv_data.adv_data.service_data_len);
+ dst->cfg_adv_data.adv_data.p_service_data = osi_malloc(src->cfg_adv_data.adv_data.service_data_len);
memcpy(dst->cfg_adv_data.adv_data.p_service_data, src->cfg_adv_data.adv_data.p_service_data, src->cfg_adv_data.adv_data.service_data_len);
}
if (src->cfg_adv_data.adv_data.p_service_uuid) {
- dst->cfg_adv_data.adv_data.p_service_uuid = GKI_getbuf(src->cfg_adv_data.adv_data.service_uuid_len);
+ dst->cfg_adv_data.adv_data.p_service_uuid = osi_malloc(src->cfg_adv_data.adv_data.service_uuid_len);
memcpy(dst->cfg_adv_data.adv_data.p_service_uuid, src->cfg_adv_data.adv_data.p_service_uuid, src->cfg_adv_data.adv_data.service_uuid_len);
}
break;
btc_ble_gap_args_t *dst = (btc_ble_gap_args_t *) p_dest;
if (src && src->cfg_adv_data_raw.raw_adv && src->cfg_adv_data_raw.raw_adv_len > 0) {
- dst->cfg_adv_data_raw.raw_adv = GKI_getbuf(src->cfg_adv_data_raw.raw_adv_len);
+ dst->cfg_adv_data_raw.raw_adv = osi_malloc(src->cfg_adv_data_raw.raw_adv_len);
if (dst->cfg_adv_data_raw.raw_adv) {
memcpy(dst->cfg_adv_data_raw.raw_adv, src->cfg_adv_data_raw.raw_adv, src->cfg_adv_data_raw.raw_adv_len);
}
btc_ble_gap_args_t *dst = (btc_ble_gap_args_t *) p_dest;
if (src && src->cfg_scan_rsp_data_raw.raw_scan_rsp && src->cfg_scan_rsp_data_raw.raw_scan_rsp_len > 0) {
- dst->cfg_scan_rsp_data_raw.raw_scan_rsp = GKI_getbuf(src->cfg_scan_rsp_data_raw.raw_scan_rsp_len);
+ dst->cfg_scan_rsp_data_raw.raw_scan_rsp = osi_malloc(src->cfg_scan_rsp_data_raw.raw_scan_rsp_len);
if (dst->cfg_scan_rsp_data_raw.raw_scan_rsp) {
memcpy(dst->cfg_scan_rsp_data_raw.raw_scan_rsp, src->cfg_scan_rsp_data_raw.raw_scan_rsp, src->cfg_scan_rsp_data_raw.raw_scan_rsp_len);
}
uint8_t length = 0;
if (src->set_security_param.value) {
length = dst->set_security_param.len;
- dst->set_security_param.value = GKI_getbuf(length);
+ dst->set_security_param.value = osi_malloc(length);
if (dst->set_security_param.value != NULL) {
memcpy(dst->set_security_param.value, src->set_security_param.value, length);
} else {
uint16_t length = 0;
if (src->get_bond_dev_cmpl.bond_dev) {
length = (src->get_bond_dev_cmpl.dev_num)*sizeof(esp_ble_bond_dev_t);
- dst->get_bond_dev_cmpl.bond_dev = GKI_getbuf(length);
+ dst->get_bond_dev_cmpl.bond_dev = (esp_ble_bond_dev_t *)osi_malloc(length);
if (dst->get_bond_dev_cmpl.bond_dev != NULL) {
memcpy(dst->get_bond_dev_cmpl.bond_dev, src->get_bond_dev_cmpl.bond_dev, length);
} else {
case BTC_GAP_BLE_ACT_CFG_ADV_DATA: {
esp_ble_adv_data_t *adv = &((btc_ble_gap_args_t *)msg->arg)->cfg_adv_data.adv_data;
if (adv->p_service_data) {
- GKI_freebuf(adv->p_service_data);
+ osi_free(adv->p_service_data);
}
if (adv->p_service_uuid) {
- GKI_freebuf(adv->p_service_uuid);
+ osi_free(adv->p_service_uuid);
}
if (adv->p_manufacturer_data) {
- GKI_freebuf(adv->p_manufacturer_data);
+ osi_free(adv->p_manufacturer_data);
}
break;
}
case BTC_GAP_BLE_ACT_CFG_ADV_DATA_RAW: {
uint8_t *raw_adv = ((btc_ble_gap_args_t *)msg->arg)->cfg_adv_data_raw.raw_adv;
if (raw_adv) {
- GKI_freebuf(raw_adv);
+ osi_free(raw_adv);
}
break;
}
case BTC_GAP_BLE_ACT_CFG_SCAN_RSP_DATA_RAW: {
uint8_t *raw_scan_rsp = ((btc_ble_gap_args_t *)msg->arg)->cfg_scan_rsp_data_raw.raw_scan_rsp;
if (raw_scan_rsp) {
- GKI_freebuf(raw_scan_rsp);
+ osi_free(raw_scan_rsp);
}
break;
}
case ESP_GAP_BLE_GET_BOND_DEV_COMPLETE_EVT: {
esp_ble_bond_dev_t *bond_dev = ((esp_ble_gap_cb_param_t *)msg->arg)->get_bond_dev_cmpl.bond_dev;
if (bond_dev) {
- GKI_freebuf((void *)bond_dev);
+ osi_free(bond_dev);
}
break;
}
#include "btc_manage.h"
#include "bta_gatt_api.h"
#include "bt_trace.h"
+#include "allocator.h"
#include "esp_gattc_api.h"
#if (GATTC_INCLUDED == TRUE)
switch (msg->act) {
case BTC_GATTC_ACT_WRITE_CHAR: {
- dst->write_char.value = (uint8_t *)GKI_getbuf(src->write_char.value_len);
+ dst->write_char.value = (uint8_t *)osi_malloc(src->write_char.value_len);
if (dst->write_char.value) {
memcpy(dst->write_char.value, src->write_char.value, src->write_char.value_len);
} else {
break;
}
case BTC_GATTC_ACT_WRITE_CHAR_DESCR: {
- dst->write_descr.value = (uint8_t *)GKI_getbuf(src->write_descr.value_len);
+ dst->write_descr.value = (uint8_t *)osi_malloc(src->write_descr.value_len);
if (dst->write_descr.value) {
memcpy(dst->write_descr.value, src->write_descr.value, src->write_descr.value_len);
} else {
break;
}
case BTC_GATTC_ACT_PREPARE_WRITE: {
- dst->prep_write.value = (uint8_t *)GKI_getbuf(src->prep_write.value_len);
+ dst->prep_write.value = (uint8_t *)osi_malloc(src->prep_write.value_len);
if (dst->prep_write.value) {
memcpy(dst->prep_write.value, src->prep_write.value, src->prep_write.value_len);
} else {
break;
}
case BTC_GATTC_ACT_PREPARE_WRITE_CHAR_DESCR: {
- dst->prep_write_descr.value = (uint8_t *)GKI_getbuf(src->prep_write_descr.value_len);
+ dst->prep_write_descr.value = (uint8_t *)osi_malloc(src->prep_write_descr.value_len);
if (dst->prep_write_descr.value) {
memcpy(dst->prep_write_descr.value, src->prep_write_descr.value, src->prep_write_descr.value_len);
} else {
switch (msg->act) {
case BTC_GATTC_ACT_WRITE_CHAR: {
if (arg->write_char.value) {
- GKI_freebuf(arg->write_char.value);
+ osi_free(arg->write_char.value);
}
break;
}
case BTC_GATTC_ACT_WRITE_CHAR_DESCR: {
if (arg->write_descr.value) {
- GKI_freebuf(arg->write_descr.value);
+ osi_free(arg->write_descr.value);
}
break;
}
case BTC_GATTC_ACT_PREPARE_WRITE: {
if (arg->prep_write.value) {
- GKI_freebuf(arg->prep_write.value);
+ osi_free(arg->prep_write.value);
}
break;
}
case BTC_GATTC_ACT_PREPARE_WRITE_CHAR_DESCR: {
if (arg->prep_write_descr.value) {
- GKI_freebuf(arg->prep_write_descr.value);
+ osi_free(arg->prep_write_descr.value);
}
break;
}
btc_gattc_free_req_data(msg);
}
-#endif ///GATTC_INCLUDED == TRUE
\ No newline at end of file
+#endif ///GATTC_INCLUDED == TRUE
#include "btc_gatts.h"
#include "btc_gatt_util.h"
#include "future.h"
+#include "allocator.h"
#include "btc_main.h"
#include "esp_gatts_api.h"
switch (msg->act) {
case BTC_GATTS_ACT_SEND_INDICATE: {
- dst->send_ind.value = (uint8_t *)GKI_getbuf(src->send_ind.value_len);
+ dst->send_ind.value = (uint8_t *)osi_malloc(src->send_ind.value_len);
if (dst->send_ind.value) {
memcpy(dst->send_ind.value, src->send_ind.value, src->send_ind.value_len);
} else {
}
case BTC_GATTS_ACT_SEND_RESPONSE: {
if (src->send_rsp.rsp) {
- dst->send_rsp.rsp = (esp_gatt_rsp_t *)GKI_getbuf(sizeof(esp_gatt_rsp_t));
+ dst->send_rsp.rsp = (esp_gatt_rsp_t *)osi_malloc(sizeof(esp_gatt_rsp_t));
if (dst->send_rsp.rsp) {
memcpy(dst->send_rsp.rsp, src->send_rsp.rsp, sizeof(esp_gatt_rsp_t));
} else {
}
case BTC_GATTS_ACT_ADD_CHAR:{
if (src->add_char.char_val.attr_value != NULL){
- dst->add_char.char_val.attr_value = (uint8_t *)GKI_getbuf(src->add_char.char_val.attr_len);
+ dst->add_char.char_val.attr_value = (uint8_t *)osi_malloc(src->add_char.char_val.attr_len);
if(dst->add_char.char_val.attr_value != NULL){
memcpy(dst->add_char.char_val.attr_value, src->add_char.char_val.attr_value,
src->add_char.char_val.attr_len);
}
case BTC_GATTS_ACT_ADD_CHAR_DESCR:{
if(src->add_descr.descr_val.attr_value != NULL){
- dst->add_descr.descr_val.attr_value = (uint8_t *)GKI_getbuf(src->add_descr.descr_val.attr_len);
+ dst->add_descr.descr_val.attr_value = (uint8_t *)osi_malloc(src->add_descr.descr_val.attr_len);
if(dst->add_descr.descr_val.attr_value != NULL){
memcpy(dst->add_descr.descr_val.attr_value, src->add_descr.descr_val.attr_value,
src->add_descr.descr_val.attr_len);
case BTC_GATTS_ACT_CREATE_ATTR_TAB:{
uint8_t num_attr = src->create_attr_tab.max_nb_attr;
if(src->create_attr_tab.gatts_attr_db != NULL){
- dst->create_attr_tab.gatts_attr_db = (esp_gatts_attr_db_t *)GKI_getbuf(sizeof(esp_gatts_attr_db_t)*num_attr);
+ dst->create_attr_tab.gatts_attr_db = (esp_gatts_attr_db_t *)osi_malloc(sizeof(esp_gatts_attr_db_t)*num_attr);
if(dst->create_attr_tab.gatts_attr_db != NULL){
memcpy(dst->create_attr_tab.gatts_attr_db, src->create_attr_tab.gatts_attr_db,
sizeof(esp_gatts_attr_db_t)*num_attr);
case BTC_GATTS_ACT_SET_ATTR_VALUE:{
uint16_t len = src->set_attr_val.length;
if(src->set_attr_val.value){
- dst->set_attr_val.value = (uint8_t *)GKI_getbuf(len);
+ dst->set_attr_val.value = (uint8_t *)osi_malloc(len);
if(dst->set_attr_val.value != NULL){
memcpy(dst->set_attr_val.value, src->set_attr_val.value, len);
}else{
switch (msg->act) {
case BTC_GATTS_ACT_SEND_INDICATE: {
if (arg->send_ind.value) {
- GKI_freebuf(arg->send_ind.value);
+ osi_free(arg->send_ind.value);
}
break;
}
case BTC_GATTS_ACT_SEND_RESPONSE: {
if (arg->send_rsp.rsp) {
- GKI_freebuf(arg->send_rsp.rsp);
+ osi_free(arg->send_rsp.rsp);
}
break;
}
case BTC_GATTS_ACT_ADD_CHAR:{
if (arg->add_char.char_val.attr_value != NULL) {
- GKI_freebuf(arg->add_char.char_val.attr_value);
+ osi_free(arg->add_char.char_val.attr_value);
}
break;
}
case BTC_GATTS_ACT_ADD_CHAR_DESCR:{
if (arg->add_descr.descr_val.attr_value != NULL){
- GKI_freebuf(arg->add_descr.descr_val.attr_value);
+ osi_free(arg->add_descr.descr_val.attr_value);
}
break;
}
case BTC_GATTS_ACT_CREATE_ATTR_TAB:{
if (arg->create_attr_tab.gatts_attr_db != NULL){
- GKI_freebuf(arg->create_attr_tab.gatts_attr_db);
+ osi_free(arg->create_attr_tab.gatts_attr_db);
}
break;
}
case BTC_GATTS_ACT_SET_ATTR_VALUE:{
if (arg->set_attr_val.value != NULL){
- GKI_freebuf(arg->set_attr_val.value);
+ osi_free(arg->set_attr_val.value);
}
}
break;
case BTA_GATTS_WRITE_EVT:
case BTA_GATTS_EXEC_WRITE_EVT:
case BTA_GATTS_MTU_EVT:
- p_dest_data->req_data.p_data = GKI_getbuf(sizeof(tBTA_GATTS_REQ_DATA));
+ p_dest_data->req_data.p_data = osi_malloc(sizeof(tBTA_GATTS_REQ_DATA));
if (p_dest_data->req_data.p_data != NULL) {
memcpy(p_dest_data->req_data.p_data, p_src_data->req_data.p_data,
sizeof(tBTA_GATTS_REQ_DATA));
case BTA_GATTS_EXEC_WRITE_EVT:
case BTA_GATTS_MTU_EVT:
if (p_data && p_data->req_data.p_data) {
- GKI_freebuf(p_data->req_data.p_data);
+ osi_free(p_data->req_data.p_data);
}
break;
default:
btc_gatts_cb_param_copy_free(msg, p_data);
}
-#endif ///GATTS_INCLUDED
\ No newline at end of file
+#endif ///GATTS_INCLUDED
#include <stdbool.h>
#include "bta_api.h"
-#include "gki.h"
#include "btc_av_api.h"
#if (BTA_AV_INCLUDED == TRUE)
**
** Function btc_media_aa_readbuf
**
- ** Description Read an audio GKI buffer from the BTC media TX queue
+ ** Description Read an audio buffer from the BTC media TX queue
**
- ** Returns pointer on a GKI aa buffer ready to send
+ ** Returns pointer on a aa buffer ready to send
**
*******************************************************************************/
extern BT_HDR *btc_media_aa_readbuf(void);
**
** Function btc_media_aa_writebuf
**
- ** Description Enqueue a Advance Audio media GKI buffer to be processed by btc media task.
+ ** Description Enqueue a Advance Audio media buffer to be processed by btc media task.
**
** Returns TRUE is success
**
**
** Function btc_media_av_writebuf
**
- ** Description Enqueue a video media GKI buffer to be processed by btc media task.
+ ** Description Enqueue a video media buffer to be processed by btc media task.
**
** Returns TRUE is success
**
if (btui_cfg.sco_use_mic) {
btui_sco_codec_inqdata (p_buf);
} else {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
#include <unistd.h>
#endif /* BT_SUPPORT_NVM */
-#include "gki.h"
#include "bta_gattc_co.h"
#include "bta_gattc_ci.h"
// #include "btif_util.h"
#include <stdlib.h>
#include <string.h>
-#include "gki.h"
#include "bta_gatts_co.h"
// #include "btif_util.h"
+++ /dev/null
-/******************************************************************************
- *
- * Copyright (C) 1999-2012 Broadcom Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- ******************************************************************************/
-
-#include "bt_trace.h"
-#include "allocator.h"
-#include "gki_int.h"
-
-/*******************************************************************************
-**
-** Function gki_init_free_queue
-**
-** Description Internal function called at startup to initialize a free
-** queue. It is called once for each free queue.
-**
-** Returns void
-**
-*******************************************************************************/
-static void gki_init_free_queue (UINT8 id, UINT16 size, UINT16 total, void *p_mem)
-{
- UINT16 i;
- UINT16 act_size;
- BUFFER_HDR_T *hdr;
- BUFFER_HDR_T *hdr1 = NULL;
- UINT32 *magic;
- INT32 tempsize = size;
- tGKI_COM_CB *p_cb = &gki_cb.com;
-
- /* Ensure an even number of longwords */
- tempsize = (INT32)ALIGN_POOL(size);
- act_size = (UINT16)(tempsize + BUFFER_PADDING_SIZE);
-
- /* Remember pool start and end addresses */
- if (p_mem) {
- p_cb->pool_start[id] = (UINT8 *)p_mem;
- p_cb->pool_end[id] = (UINT8 *)p_mem + (act_size * total);
- }
-
- p_cb->pool_size[id] = act_size;
-
- p_cb->freeq[id].size = (UINT16) tempsize;
- p_cb->freeq[id].total = total;
- p_cb->freeq[id].cur_cnt = 0;
- p_cb->freeq[id].max_cnt = 0;
-
- /* Initialize index table */
- if (p_mem) {
- hdr = (BUFFER_HDR_T *)p_mem;
- p_cb->freeq[id]._p_first = hdr;
- for (i = 0; i < total; i++) {
- hdr->q_id = id;
- hdr->status = BUF_STATUS_FREE;
- magic = (UINT32 *)((UINT8 *)hdr + BUFFER_HDR_SIZE + tempsize);
- *magic = MAGIC_NO;
- hdr1 = hdr;
- hdr = (BUFFER_HDR_T *)((UINT8 *)hdr + act_size);
- hdr1->p_next = hdr;
- }
- hdr1->p_next = NULL;
- p_cb->freeq[id]._p_last = hdr1;
- }
-}
-
-void gki_buffer_cleanup(void)
-{
- UINT8 i;
- tGKI_COM_CB *p_cb = &gki_cb.com;
-
- for (i = 0; i < GKI_NUM_FIXED_BUF_POOLS; i++) {
- if ( 0 < p_cb->freeq[i].max_cnt ) {
- osi_free(p_cb->pool_start[i]);
-
- p_cb->freeq[i].cur_cnt = 0;
- p_cb->freeq[i].max_cnt = 0;
- p_cb->freeq[i]._p_first = NULL;
- p_cb->freeq[i]._p_last = NULL;
-
- p_cb->pool_start[i] = NULL;
- p_cb->pool_end[i] = NULL;
- p_cb->pool_size[i] = 0;
- }
- }
-}
-
-/*******************************************************************************
-**
-** Function gki_buffer_init
-**
-** Description Called once internally by GKI at startup to initialize all
-** buffers and free buffer pools.
-**
-** Returns void
-**
-*******************************************************************************/
-void gki_buffer_init(void)
-{
- static const struct {
- uint16_t size;
- uint16_t count;
- } buffer_info[GKI_NUM_FIXED_BUF_POOLS] = {
- { GKI_BUF0_SIZE, GKI_BUF0_MAX },
- { GKI_BUF1_SIZE, GKI_BUF1_MAX },
- { GKI_BUF2_SIZE, GKI_BUF2_MAX },
- { GKI_BUF3_SIZE, GKI_BUF3_MAX },
- { GKI_BUF4_SIZE, GKI_BUF4_MAX },
- { GKI_BUF5_SIZE, GKI_BUF5_MAX },
- { GKI_BUF6_SIZE, GKI_BUF6_MAX },
- { GKI_BUF7_SIZE, GKI_BUF7_MAX },
- { GKI_BUF8_SIZE, GKI_BUF8_MAX },
- { GKI_BUF9_SIZE, GKI_BUF9_MAX },
- };
-
- tGKI_COM_CB *p_cb = &gki_cb.com;
-
- for (int i = 0; i < GKI_NUM_TOTAL_BUF_POOLS; i++) {
- p_cb->pool_start[i] = NULL;
- p_cb->pool_end[i] = NULL;
- p_cb->pool_size[i] = 0;
-
- p_cb->freeq[i]._p_first = 0;
- p_cb->freeq[i]._p_last = 0;
- p_cb->freeq[i].size = 0;
- p_cb->freeq[i].total = 0;
- p_cb->freeq[i].cur_cnt = 0;
- p_cb->freeq[i].max_cnt = 0;
- }
-
- /* Use default from target.h */
- p_cb->pool_access_mask = GKI_DEF_BUFPOOL_PERM_MASK;
-
- for (int i = 0; i < GKI_NUM_FIXED_BUF_POOLS; ++i) {
- gki_init_free_queue(i, buffer_info[i].size, buffer_info[i].count, NULL);
- }
-}
-
-/*******************************************************************************
-**
-** Function GKI_init_q
-**
-** Description Called by an application to initialize a buffer queue.
-**
-** Returns void
-**
-*******************************************************************************/
-void GKI_init_q (BUFFER_Q *p_q)
-{
- p_q->_p_first = p_q->_p_last = NULL;
- p_q->_count = 0;
-}
-
-/*******************************************************************************
-**
-** Function GKI_getbuf_func
-**
-** Description Called by an application to get a free buffer which
-** is of size greater or equal to the requested size.
-**
-** Note: This routine only takes buffers from public pools.
-** It will not use any buffers from pools
-** marked GKI_RESTRICTED_POOL.
-**
-** Parameters size - (input) number of bytes needed.
-**
-** Returns A pointer to the buffer, or NULL if none available
-**
-*******************************************************************************/
-void *GKI_getbuf_func(UINT16 size)
-{
- BUFFER_HDR_T *header = osi_malloc(size + BUFFER_HDR_SIZE);
- if (header != NULL) {
- header->status = BUF_STATUS_UNLINKED;
- header->p_next = NULL;
- header->Type = 0;
- header->size = size;
-
- return header + 1;
- } else {
- return NULL;
- }
-}
-
-/*******************************************************************************
-**
-** Function GKI_getpoolbuf_func
-**
-** Description Called by an application to get a free buffer from
-** a specific buffer pool.
-**
-** Note: If there are no more buffers available from the pool,
-** the public buffers are searched for an available buffer.
-**
-** Parameters pool_id - (input) pool ID to get a buffer out of.
-**
-** Returns A pointer to the buffer, or NULL if none available
-**
-*******************************************************************************/
-void *GKI_getpoolbuf_func(UINT8 pool_id)
-{
- return GKI_getbuf_func(gki_cb.com.pool_size[pool_id]);
-}
-
-/*******************************************************************************
-**
-** Function GKI_freebuf
-**
-** Description Called by an application to return a buffer to the free pool.
-**
-** Parameters p_buf - (input) address of the beginning of a buffer.
-**
-** Returns void
-**
-*******************************************************************************/
-void GKI_freebuf (void *p_buf)
-{
- osi_free((BUFFER_HDR_T *)p_buf - 1);
-}
-
-/*******************************************************************************
-**
-** Function GKI_get_buf_size
-**
-** Description Called by an application to get the size of a buffer.
-**
-** Parameters p_buf - (input) address of the beginning of a buffer.
-**
-** Returns the size of the buffer
-**
-*******************************************************************************/
-UINT16 GKI_get_buf_size (void *p_buf)
-{
- BUFFER_HDR_T *header = (BUFFER_HDR_T *)p_buf - 1;
- return header->size;
-}
-
-/*******************************************************************************
-**
-** Function GKI_enqueue
-**
-** Description Enqueue a buffer at the tail of the queue
-**
-** Parameters: p_q - (input) pointer to a queue.
-** p_buf - (input) address of the buffer to enqueue
-**
-** Returns void
-**
-*******************************************************************************/
-void GKI_enqueue (BUFFER_Q *p_q, void *p_buf)
-{
- BUFFER_HDR_T *p_hdr = (BUFFER_HDR_T *) ((UINT8 *) p_buf - BUFFER_HDR_SIZE);
- assert(p_hdr->status == BUF_STATUS_UNLINKED);
-
- GKI_disable();
-
- /* Since the queue is exposed (C vs C++), keep the pointers in exposed format */
- if (p_q->_p_last) {
- BUFFER_HDR_T *_p_last_hdr = (BUFFER_HDR_T *)((UINT8 *)p_q->_p_last - BUFFER_HDR_SIZE);
- _p_last_hdr->p_next = p_hdr;
- } else {
- p_q->_p_first = p_buf;
- }
-
- p_q->_p_last = p_buf;
- p_q->_count++;
-
- p_hdr->p_next = NULL;
- p_hdr->status = BUF_STATUS_QUEUED;
-
- GKI_enable();
-}
-
-/*******************************************************************************
-**
-** Function GKI_dequeue
-**
-** Description Dequeues a buffer from the head of a queue
-**
-** Parameters: p_q - (input) pointer to a queue.
-**
-** Returns NULL if queue is empty, else buffer
-**
-*******************************************************************************/
-void *GKI_dequeue (BUFFER_Q *p_q)
-{
- BUFFER_HDR_T *p_hdr;
-
- GKI_disable();
-
- if (!p_q || !p_q->_count) {
- GKI_enable();
- return (NULL);
- }
-
- p_hdr = (BUFFER_HDR_T *)((UINT8 *)p_q->_p_first - BUFFER_HDR_SIZE);
-
- /* Keep buffers such that GKI header is invisible
- */
- if (p_hdr->p_next) {
- p_q->_p_first = ((UINT8 *)p_hdr->p_next + BUFFER_HDR_SIZE);
- } else {
- p_q->_p_first = NULL;
- p_q->_p_last = NULL;
- }
-
- p_q->_count--;
-
- p_hdr->p_next = NULL;
- p_hdr->status = BUF_STATUS_UNLINKED;
-
- GKI_enable();
-
- return ((UINT8 *)p_hdr + BUFFER_HDR_SIZE);
-}
-
-/*******************************************************************************
-**
-** Function GKI_remove_from_queue
-**
-** Description Dequeue a buffer from the middle of the queue
-**
-** Parameters: p_q - (input) pointer to a queue.
-** p_buf - (input) address of the buffer to enqueue
-**
-** Returns NULL if queue is empty, else buffer
-**
-*******************************************************************************/
-void *GKI_remove_from_queue (BUFFER_Q *p_q, void *p_buf)
-{
- BUFFER_HDR_T *p_prev;
- BUFFER_HDR_T *p_buf_hdr;
-
- GKI_disable();
-
- if (p_buf == p_q->_p_first) {
- GKI_enable();
- return (GKI_dequeue (p_q));
- }
-
- p_buf_hdr = (BUFFER_HDR_T *)((UINT8 *)p_buf - BUFFER_HDR_SIZE);
- p_prev = (BUFFER_HDR_T *)((UINT8 *)p_q->_p_first - BUFFER_HDR_SIZE);
-
- for ( ; p_prev; p_prev = p_prev->p_next) {
- /* If the previous points to this one, move the pointers around */
- if (p_prev->p_next == p_buf_hdr) {
- p_prev->p_next = p_buf_hdr->p_next;
-
- /* If we are removing the last guy in the queue, update _p_last */
- if (p_buf == p_q->_p_last) {
- p_q->_p_last = p_prev + 1;
- }
-
- /* One less in the queue */
- p_q->_count--;
-
- /* The buffer is now unlinked */
- p_buf_hdr->p_next = NULL;
- p_buf_hdr->status = BUF_STATUS_UNLINKED;
-
- GKI_enable();
- return (p_buf);
- }
- }
-
- GKI_enable();
- return (NULL);
-}
-
-/*******************************************************************************
-**
-** Function GKI_getfirst
-**
-** Description Return a pointer to the first buffer in a queue
-**
-** Parameters: p_q - (input) pointer to a queue.
-**
-** Returns NULL if queue is empty, else buffer address
-**
-*******************************************************************************/
-void *GKI_getfirst (BUFFER_Q *p_q)
-{
- return (p_q->_p_first);
-}
-
-/*******************************************************************************
-**
-** Function GKI_getlast
-**
-** Description Return a pointer to the last buffer in a queue
-**
-** Parameters: p_q - (input) pointer to a queue.
-**
-** Returns NULL if queue is empty, else buffer address
-**
-*******************************************************************************/
-void *GKI_getlast (BUFFER_Q *p_q)
-{
- return (p_q->_p_last);
-}
-
-/*******************************************************************************
-**
-** Function GKI_getnext
-**
-** Description Return a pointer to the next buffer in a queue
-**
-** Parameters: p_buf - (input) pointer to the buffer to find the next one from.
-**
-** Returns NULL if no more buffers in the queue, else next buffer address
-**
-*******************************************************************************/
-void *GKI_getnext (void *p_buf)
-{
- BUFFER_HDR_T *p_hdr;
-
- p_hdr = (BUFFER_HDR_T *) ((UINT8 *) p_buf - BUFFER_HDR_SIZE);
-
- if (p_hdr->p_next) {
- return ((UINT8 *)p_hdr->p_next + BUFFER_HDR_SIZE);
- } else {
- return (NULL);
- }
-}
-
-/*******************************************************************************
-**
-** Function GKI_queue_is_empty
-**
-** Description Check the status of a queue.
-**
-** Parameters: p_q - (input) pointer to a queue.
-**
-** Returns TRUE if queue is empty, else FALSE
-**
-*******************************************************************************/
-BOOLEAN GKI_queue_is_empty(BUFFER_Q *p_q)
-{
- return ((BOOLEAN) (p_q->_count == 0));
-}
-
-UINT16 GKI_queue_length(BUFFER_Q *p_q)
-{
- return p_q->_count;
-}
-
-/*******************************************************************************
-**
-** Function GKI_poolcount
-**
-** Description Called by an application to get the total number of buffers
-** in the specified buffer pool.
-**
-** Parameters pool_id - (input) pool ID to get the free count of.
-**
-** Returns the total number of buffers in the pool
-**
-*******************************************************************************/
-UINT16 GKI_poolcount (UINT8 pool_id)
-{
- if (pool_id >= GKI_NUM_TOTAL_BUF_POOLS) {
- return (0);
- }
-
- return (gki_cb.com.freeq[pool_id].total);
-}
-
-/*******************************************************************************
-**
-** Function GKI_poolfreecount
-**
-** Description Called by an application to get the number of free buffers
-** in the specified buffer pool.
-**
-** Parameters pool_id - (input) pool ID to get the free count of.
-**
-** Returns the number of free buffers in the pool
-**
-*******************************************************************************/
-UINT16 GKI_poolfreecount (UINT8 pool_id)
-{
- FREE_QUEUE_T *Q;
-
- if (pool_id >= GKI_NUM_TOTAL_BUF_POOLS) {
- return (0);
- }
-
- Q = &gki_cb.com.freeq[pool_id];
-
- return ((UINT16)(Q->total - Q->cur_cnt));
-}
-
-/*******************************************************************************
-**
-** Function GKI_get_pool_bufsize
-**
-** Description Called by an application to get the size of buffers in a pool
-**
-** Parameters Pool ID.
-**
-** Returns the size of buffers in the pool
-**
-*******************************************************************************/
-UINT16 GKI_get_pool_bufsize (UINT8 pool_id)
-{
- if (pool_id < GKI_NUM_TOTAL_BUF_POOLS) {
- return (gki_cb.com.freeq[pool_id].size);
- }
-
- return (0);
-}
-
-/*******************************************************************************
-**
-** Function GKI_poolutilization
-**
-** Description Called by an application to get the buffer utilization
-** in the specified buffer pool.
-**
-** Parameters pool_id - (input) pool ID to get the free count of.
-**
-** Returns % of buffers used from 0 to 100
-**
-*******************************************************************************/
-UINT16 GKI_poolutilization (UINT8 pool_id)
-{
- FREE_QUEUE_T *Q;
-
- if (pool_id >= GKI_NUM_TOTAL_BUF_POOLS) {
- return (100);
- }
-
- Q = &gki_cb.com.freeq[pool_id];
-
- if (Q->total == 0) {
- return (100);
- }
-
- return ((Q->cur_cnt * 100) / Q->total);
-}
+++ /dev/null
-/******************************************************************************
- *
- * Copyright (C) 2009-2012 Broadcom Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- ******************************************************************************/
-#include <string.h>
-#include "bt_trace.h"
-#include "gki_int.h"
-#include "osi.h"
-#include "osi_arch.h"
-#include "alarm.h"
-#include "bt_defs.h"
-
-tGKI_CB gki_cb;
-
-int gki_init(void)
-{
- memset(&gki_cb, 0, sizeof(gki_cb));
-
- //pthread_mutexattr_t attr;
- //pthread_mutexattr_init(&attr);
- //pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
- pthread_mutex_init(&gki_cb.lock, NULL);//&attr);
-
- gki_buffer_init();
- return 0;
-}
-
-void gki_clean_up(void)
-{
- gki_buffer_cleanup();
-
- pthread_mutex_destroy(&gki_cb.lock);
-}
-
-UINT32 GKI_get_os_tick_count(void)
-{
- return osi_alarm_now();
-}
-
-// Sleep the calling thread unconditionally for |timeout_ms| milliseconds.
-void GKI_delay(UINT32 timeout_ms)
-{
- osi_delay_ms(timeout_ms);
- /*TODO:*/
-}
-
-void GKI_enable(void)
-{
- pthread_mutex_unlock(&gki_cb.lock);
-}
-
-void GKI_disable(void)
-{
- pthread_mutex_lock(&gki_cb.lock);
-}
+++ /dev/null
-/******************************************************************************
- *
- * Copyright (C) 1999-2012 Broadcom Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- ******************************************************************************/
-
-#ifndef _GKI_H_
-#define _GKI_H_
-
-#include "bt_target.h"
-#include "bt_types.h"
-#include "gki_common.h"
-#include "gki_int.h"
-#include "allocator.h"
-
-#define ALIGN_POOL(pl_size) ( (((pl_size) + 3) / sizeof(UINT32)) * sizeof(UINT32))
-#define BUFFER_HDR_SIZE (sizeof(BUFFER_HDR_T)) /* Offset past header */
-#define BUFFER_PADDING_SIZE (sizeof(BUFFER_HDR_T) + sizeof(UINT32)) /* Header + Magic Number */
-#define MAGIC_NO 0xDDBADDBA
-
-#define BUF_STATUS_FREE 0
-#define BUF_STATUS_UNLINKED 1
-#define BUF_STATUS_QUEUED 2
-
-/* Timer list entry callback type
-*/
-typedef void (TIMER_CBACK)(void *p_tle);
-#ifndef TIMER_PARAM_TYPE
-#define TIMER_PARAM_TYPE UINT32
-#endif
-/* Define a timer list entry
-*/
-typedef struct _tle {
- struct _tle *p_next;
- struct _tle *p_prev;
- TIMER_CBACK *p_cback;
- INT32 ticks;
- INT32 ticks_initial;
- TIMER_PARAM_TYPE param;
- TIMER_PARAM_TYPE data;
- UINT16 event;
- UINT8 in_use;
-} TIMER_LIST_ENT;
-
-/***********************************************************************
-** This queue is a general purpose buffer queue, for application use.
-*/
-typedef struct {
- void *_p_first;
- void *_p_last;
- UINT16 _count;
-} BUFFER_Q;
-
-#define GKI_PUBLIC_POOL 0 /* General pool accessible to GKI_getbuf() */
-#define GKI_RESTRICTED_POOL 1 /* Inaccessible pool to GKI_getbuf() */
-
-/***********************************************************************
-** Function prototypes
-*/
-
-/* To get and release buffers, change owner and get size
-*/
-void *GKI_getbuf_func(UINT16);
-void *GKI_getpoolbuf_func(UINT8);
-void GKI_freebuf(void *);
-UINT16 GKI_get_buf_size (void *);
-void *GKI_getpoolbuf (UINT8);
-UINT16 GKI_poolcount (UINT8);
-UINT16 GKI_poolfreecount (UINT8);
-UINT16 GKI_poolutilization (UINT8);
-
-#ifdef CONFIG_BLUEDROID_MEM_DEBUG
-
-#define GKI_getbuf(_size) \
-({ \
- BUFFER_HDR_T *header = osi_malloc((_size) + BUFFER_HDR_SIZE); \
- header->status = BUF_STATUS_UNLINKED; \
- header->p_next = NULL; \
- header->Type = 0; \
- header->size = (_size); \
- (void *)(header + 1); \
-})
-
-#define GKI_getpoolbuf(_pool_id) \
-({ \
- (void *)GKI_getbuf(gki_cb.com.pool_size[(_pool_id)]); \
-})
-
-#else
-#define GKI_getbuf GKI_getbuf_func
-#define GKI_getpoolbuf GKI_getpoolbuf_func
-
-#endif /* CONFIG_BLUEDROID_MEM_DEBUG */
-
-/* User buffer queue management
-*/
-void *GKI_dequeue (BUFFER_Q *);
-void GKI_enqueue (BUFFER_Q *, void *);
-void *GKI_getfirst (BUFFER_Q *);
-void *GKI_getlast (BUFFER_Q *);
-void *GKI_getnext (void *);
-void GKI_init_q (BUFFER_Q *);
-UINT16 GKI_queue_length(BUFFER_Q *);
-BOOLEAN GKI_queue_is_empty(BUFFER_Q *);
-void *GKI_remove_from_queue (BUFFER_Q *, void *);
-UINT16 GKI_get_pool_bufsize (UINT8);
-
-/* Timer management
-*/
-void GKI_delay(UINT32);
-
-/* Disable Interrupts, Enable Interrupts
-*/
-void GKI_enable(void);
-void GKI_disable(void);
-
-/* os timer operation */
-UINT32 GKI_get_os_tick_count(void);
-
-#endif /*_GKI_H_*/
+++ /dev/null
-/******************************************************************************
- *
- * Copyright (C) 1999-2012 Broadcom Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- ******************************************************************************/
-
-#ifndef _GKI_COMMON_H_
-#define _GKI_COMMON_H_
-
-#include "gki.h"
-
-typedef struct _buffer_hdr {
- struct _buffer_hdr *p_next; /* next buffer in the queue */
- UINT8 q_id; /* id of the queue */
- UINT8 status; /* FREE, UNLINKED or QUEUED */
- UINT8 Type;
- UINT16 size;
-} BUFFER_HDR_T;
-
-typedef struct _free_queue {
- BUFFER_HDR_T *_p_first; /* first buffer in the queue */
- BUFFER_HDR_T *_p_last; /* last buffer in the queue */
- UINT16 size; /* size of the buffers in the pool */
- UINT16 total; /* toatal number of buffers */
- UINT16 cur_cnt; /* number of buffers currently allocated */
- UINT16 max_cnt; /* maximum number of buffers allocated at any time */
-} FREE_QUEUE_T;
-
-/* Put all GKI variables into one control block
-*/
-typedef struct {
- /* Define the buffer pool management variables
- */
- FREE_QUEUE_T freeq[GKI_NUM_TOTAL_BUF_POOLS];
-
- UINT16 pool_buf_size[GKI_NUM_TOTAL_BUF_POOLS];
-
- /* Define the buffer pool start addresses
- */
- UINT8 *pool_start[GKI_NUM_TOTAL_BUF_POOLS]; /* array of pointers to the start of each buffer pool */
- UINT8 *pool_end[GKI_NUM_TOTAL_BUF_POOLS]; /* array of pointers to the end of each buffer pool */
- UINT16 pool_size[GKI_NUM_TOTAL_BUF_POOLS]; /* actual size of the buffers in a pool */
-
- /* Define the buffer pool access control variables */
- UINT16 pool_access_mask; /* Bits are set if the corresponding buffer pool is a restricted pool */
-} tGKI_COM_CB;
-
-/* Internal GKI function prototypes
-*/
-void gki_buffer_init(void);
-void gki_buffer_cleanup(void);
-
-#endif /*_GKI_COMMON_H_*/
*
******************************************************************************/
#include "buffer_allocator.h"
-#include "gki.h"
+#include "allocator.h"
-// TODO(zachoverflow): move the assertion into GKI_getbuf in the future
+// TODO(zachoverflow): move the assertion into osi_malloc in the future
static void *buffer_alloc(size_t size)
{
- return GKI_getbuf((uint16_t)size);
+ return osi_malloc(size);
+}
+static void buffer_free(void *p)
+{
+ osi_free(p);
}
-
static const allocator_t interface = {
buffer_alloc,
- GKI_freebuf
+ buffer_free
};
const allocator_t *buffer_allocator_get_interface()
#include "list.h"
#include "alarm.h"
#include "thread.h"
+#include "mutex.h"
typedef struct {
uint16_t opcode;
bool timer_is_set;
osi_alarm_t *command_response_timer;
list_t *commands_pending_response;
- pthread_mutex_t commands_pending_response_lock;
+ osi_mutex_t commands_pending_response_lock;
} command_waiting_response_t;
typedef struct {
/*
non_repeating_timer_t *command_response_timer;
list_t *commands_pending_response;
- pthread_mutex_t commands_pending_response_lock;
+ osi_mutex_t commands_pending_response_lock;
*/
} hci_host_env_t;
LOG_ERROR("%s unable to create list for commands pending response.", __func__);
return -1;
}
- pthread_mutex_init(&cmd_wait_q->commands_pending_response_lock, NULL);
+ osi_mutex_new(&cmd_wait_q->commands_pending_response_lock);
cmd_wait_q->command_response_timer = osi_alarm_new("cmd_rsp_to", command_timed_out, cmd_wait_q, COMMAND_PENDING_TIMEOUT);
if (!cmd_wait_q->command_response_timer) {
LOG_ERROR("%s unable to create command response timer.", __func__);
cmd_wait_q = &hci_host_env.cmd_waiting_q;
list_free(cmd_wait_q->commands_pending_response);
- pthread_mutex_destroy(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_free(&cmd_wait_q->commands_pending_response_lock);
osi_alarm_free(cmd_wait_q->command_response_timer);
cmd_wait_q->command_response_timer = NULL;
}
hci_host_env.command_credits--;
// Move it to the list of commands awaiting response
- pthread_mutex_lock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
list_append(cmd_wait_q->commands_pending_response, wait_entry);
- pthread_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
// Send it off
packet_fragmenter->fragment_and_dispatch(wait_entry->command);
cmd_wait_q->timer_is_set = false;
}
- pthread_mutex_lock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
wait_entry = (list_is_empty(cmd_wait_q->commands_pending_response) ?
NULL : list_front(cmd_wait_q->commands_pending_response));
- pthread_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
if (wait_entry == NULL) {
return;
command_waiting_response_t *cmd_wait_q = (command_waiting_response_t *)context;
waiting_command_t *wait_entry;
- pthread_mutex_lock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
wait_entry = (list_is_empty(cmd_wait_q->commands_pending_response) ?
NULL : list_front(cmd_wait_q->commands_pending_response));
- pthread_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
if (wait_entry == NULL) {
LOG_ERROR("%s with no commands pending response", __func__);
static waiting_command_t *get_waiting_command(command_opcode_t opcode)
{
command_waiting_response_t *cmd_wait_q = &hci_host_env.cmd_waiting_q;
- pthread_mutex_lock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
for (const list_node_t *node = list_begin(cmd_wait_q->commands_pending_response);
node != list_end(cmd_wait_q->commands_pending_response);
list_remove(cmd_wait_q->commands_pending_response, wait_entry);
- pthread_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
return wait_entry;
}
- pthread_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
+ osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
return NULL;
}
#include <stdlib.h>
#include <stdint.h>
#include "bt_trace.h"
-
-#include "osi_arch.h"
+#include "bt_target.h"
#define UNUSED(x) (void)(x)
#endif
/*Timer Related Defination*/
+//by Snake.T
+typedef void (TIMER_CBACK)(void *p_tle);
+typedef struct _tle {
+ struct _tle *p_next;
+ struct _tle *p_prev;
+ TIMER_CBACK *p_cback;
+ INT32 ticks;
+ INT32 ticks_initial;
+ TIMER_PARAM_TYPE param;
+ TIMER_PARAM_TYPE data;
+ UINT16 event;
+ UINT8 in_use;
+} TIMER_LIST_ENT;
+
#define alarm_timer_t uint32_t
#define alarm_timer_setfn(timer, cb, data) \
do { \
#define alarm_timer_now() (0)
-/*Thread and locker related defination*/
-#define RTOS_SUPPORT
-#ifdef RTOS_SUPPORT
-#define pthread_mutex_t osi_mutex_t
-#define pthread_mutex_init(mutex, a) osi_mutex_new(mutex)
-#define pthread_mutex_destroy(mutex) osi_mutex_free(mutex)
-#define pthread_mutex_lock osi_mutex_lock
-#define pthread_mutex_unlock osi_mutex_unlock
-#else
-#define pthread_mutex_t uint8_t
-#define pthread_mutex_init(x1, x2)
-#define pthread_mutex_destroy(mutex)
-#define pthread_mutex_lock(mutex)
-#define pthread_mutex_unlock(mutex)
-#endif
-
-
/*Bluetooth Address*/
typedef struct {
uint8_t address[6];
#include "sdkconfig.h"
#include "bt_types.h" /* This must be defined AFTER buildcfg.h */
-/* Include common GKI definitions used by this platform */
-#include "gki_target.h"
#include "dyn_mem.h" /* defines static and/or dynamic memory for components */
#if CONFIG_CLASSIC_BT_ENABLED
#define BTA_DISABLE_DELAY 200 /* in milliseconds */
#endif
-// If the next wakeup time is less than this threshold, we should acquire
-// a wakelock instead of setting a wake alarm so we're not bouncing in
-// and out of suspend frequently.
-// in millisecond
-// TODO(zachoverflow): reinstate in alarm code
-#ifndef GKI_TIMER_INTERVAL_FOR_WAKELOCK
-#define GKI_TIMER_INTERVAL_FOR_WAKELOCK 3000
-#endif
-
#ifndef BTA_SYS_TIMER_PERIOD
#define BTA_SYS_TIMER_PERIOD 100
#endif
/******************************************************************************
**
-** GKI Buffer Pools
+** Buffer Size
**
******************************************************************************/
-/* Receives HCI events from the lower-layer. */
-#ifndef HCI_CMD_POOL_ID
-#define HCI_CMD_POOL_ID GKI_POOL_ID_2
-#endif
-
-#ifndef HCI_CMD_POOL_BUF_SIZE
-#define HCI_CMD_POOL_BUF_SIZE GKI_BUF2_SIZE
-#endif
-
-/* Receives ACL data packets from thelower-layer. */
-#ifndef HCI_ACL_POOL_ID
-#define HCI_ACL_POOL_ID GKI_POOL_ID_3
+#ifndef BT_DEFAULT_BUFFER_SIZE
+#define BT_DEFAULT_BUFFER_SIZE (4096 + 16)
#endif
-/* Maximum number of buffers available for ACL receive data. */
-#ifndef HCI_ACL_BUF_MAX
-#define HCI_ACL_BUF_MAX GKI_BUF3_MAX
+#ifndef BT_SMALL_BUFFER_SIZE
+#define BT_SMALL_BUFFER_SIZE 660
#endif
-/* Receives SCO data packets from the lower-layer. */
-#ifndef HCI_SCO_POOL_ID
-#define HCI_SCO_POOL_ID GKI_POOL_ID_6
+/* Receives HCI events from the lower-layer. */
+#ifndef HCI_CMD_BUF_SIZE
+#define HCI_CMD_BUF_SIZE BT_SMALL_BUFFER_SIZE
#endif
/* Sends SDP data packets. */
-#ifndef SDP_POOL_ID
-#define SDP_POOL_ID 3
+#ifndef SDP_DATA_BUF_SIZE
+#define SDP_DATA_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/* Sends RFCOMM command packets. */
-#ifndef RFCOMM_CMD_POOL_ID
-#define RFCOMM_CMD_POOL_ID GKI_POOL_ID_2
+#ifndef RFCOMM_CMD_BUF_SIZE
+#define RFCOMM_CMD_BUF_SIZE BT_SMALL_BUFFER_SIZE
#endif
/* Sends RFCOMM data packets. */
-#ifndef RFCOMM_DATA_POOL_ID
-#define RFCOMM_DATA_POOL_ID GKI_POOL_ID_3
+#ifndef RFCOMM_DATA_BUF_SIZE
+#define RFCOMM_DATA_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-#ifndef RFCOMM_DATA_POOL_BUF_SIZE
-#define RFCOMM_DATA_POOL_BUF_SIZE GKI_BUF3_SIZE
+/* Sends L2CAP packets to the peer and HCI messages to the controller. */
+#ifndef L2CAP_CMD_BUF_SIZE
+#define L2CAP_CMD_BUF_SIZE BT_SMALL_BUFFER_SIZE
#endif
-/* Sends L2CAP packets to the peer and HCI messages to the controller. */
-#ifndef L2CAP_CMD_POOL_ID
-#define L2CAP_CMD_POOL_ID GKI_POOL_ID_2
+#ifndef L2CAP_USER_TX_BUF_SIZE
+#define L2CAP_USER_TX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
+#endif
+
+#ifndef L2CAP_USER_RX_BUF_SIZE
+#define L2CAP_USER_RX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/* Sends L2CAP segmented packets in ERTM mode */
-#ifndef L2CAP_FCR_TX_POOL_ID
-#define L2CAP_FCR_TX_POOL_ID HCI_ACL_POOL_ID
+#ifndef L2CAP_FCR_TX_BUF_SIZE
+#define L2CAP_FCR_TX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/* Receives L2CAP segmented packets in ERTM mode */
-#ifndef L2CAP_FCR_RX_POOL_ID
-#define L2CAP_FCR_RX_POOL_ID HCI_ACL_POOL_ID
+#ifndef L2CAP_FCR_RX_BUF_SIZE
+#define L2CAP_FCR_RX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
+#endif
+
+#ifndef L2CAP_FCR_ERTM_BUF_SIZE
+#define L2CAP_FCR_ERTM_BUF_SIZE (10240 + 24)
#endif
/* Number of ACL buffers to assign to LE
#endif
/* Used by BTM when it sends HCI commands to the controller. */
-#ifndef BTM_CMD_POOL_ID
-#define BTM_CMD_POOL_ID GKI_POOL_ID_2
+#ifndef BTM_CMD_BUF_SIZE
+#define BTM_CMD_BUF_SIZE BT_SMALL_BUFFER_SIZE
#endif
-#ifndef OBX_LRG_DATA_POOL_SIZE
-#define OBX_LRG_DATA_POOL_SIZE GKI_BUF4_SIZE
+#ifndef OBX_LRG_DATA_BUF_SIZE
+#define OBX_LRG_DATA_BUF_SIZE (8080 + 26)
#endif
-#ifndef OBX_LRG_DATA_POOL_ID
-#define OBX_LRG_DATA_POOL_ID GKI_POOL_ID_4
-#endif
/* Used to send data to L2CAP. */
-#ifndef GAP_DATA_POOL_ID
-#define GAP_DATA_POOL_ID GKI_POOL_ID_3
-#endif
-
-#ifndef SPP_DB_SIZE
-#define SPP_DB_SIZE GKI_BUF3_SIZE
+#ifndef GAP_DATA_BUF_SIZE
+#define GAP_DATA_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/* BNEP data and protocol messages. */
-#ifndef BNEP_POOL_ID
-#define BNEP_POOL_ID GKI_POOL_ID_3
-#endif
-
-/* RPC pool for temporary trace message buffers. */
-#ifndef RPC_SCRATCH_POOL_ID
-#define RPC_SCRATCH_POOL_ID GKI_POOL_ID_2
-#endif
-
-/* AVDTP pool for protocol messages */
-#ifndef AVDT_CMD_POOL_ID
-#define AVDT_CMD_POOL_ID GKI_POOL_ID_2
-#endif
-
-/* AVDTP pool size for media packets in case of fragmentation */
-#ifndef AVDT_DATA_POOL_SIZE
-#define AVDT_DATA_POOL_SIZE GKI_BUF3_SIZE
-#endif
-
-#ifndef PAN_POOL_ID
-#define PAN_POOL_ID GKI_POOL_ID_3
-/* Maximum amount of the shared buffer to allocate for PAN */
-#define PAN_POOL_MAX (GKI_BUF3_MAX / 4)
+#ifndef BNEP_BUF_SIZE
+#define BNEP_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-/* AVCTP pool for protocol messages */
-#ifndef AVCT_CMD_POOL_ID
-#define AVCT_CMD_POOL_ID GKI_POOL_ID_1
+/* AVDTP buffer size for protocol messages */
+#ifndef AVDT_CMD_BUF_SIZE
+#define AVDT_CMD_BUF_SIZE BT_SMALL_BUFFER_SIZE
#endif
-/* AVRCP pool for protocol messages */
-#ifndef AVRC_CMD_POOL_ID
-#define AVRC_CMD_POOL_ID GKI_POOL_ID_1
+/* AVDTP buffer size for media packets in case of fragmentation */
+#ifndef AVDT_DATA_BUF_SIZE
+#define AVDT_DATA_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-/* AVRCP pool size for protocol messages */
-#ifndef AVRC_CMD_POOL_SIZE
-#define AVRC_CMD_POOL_SIZE GKI_BUF1_SIZE
+#ifndef PAN_BUF_SIZE
+#define PAN_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-/* AVRCP Metadata pool for protocol messages */
-#ifndef AVRC_META_CMD_POOL_ID
-#define AVRC_META_CMD_POOL_ID GKI_POOL_ID_2
+/* Maximum number of buffers to allocate for PAN */
+#ifndef PAN_BUF_MAX
+#define PAN_BUF_MAX 100
#endif
-/* AVRCP Metadata pool size for protocol messages */
-#ifndef AVRC_META_CMD_POOL_SIZE
-#define AVRC_META_CMD_POOL_SIZE GKI_BUF2_SIZE
+/* AVCTP buffer size for protocol messages */
+#ifndef AVCT_CMD_BUF_SIZE
+#define AVCT_CMD_BUF_SIZE 288
#endif
-
-/* AVRCP buffer size for browsing channel messages */
-#ifndef AVRC_BROWSE_POOL_SIZE
-#define AVRC_BROWSE_POOL_SIZE GKI_MAX_BUF_SIZE
+/* AVRCP buffer size for protocol messages */
+#ifndef AVRC_CMD_BUF_SIZE
+#define AVRC_CMD_BUF_SIZE 288
#endif
-#ifndef BTA_HL_LRG_DATA_POOL_ID
-#define BTA_HL_LRG_DATA_POOL_ID GKI_POOL_ID_7
+/* AVRCP Metadata buffer size for protocol messages */
+#ifndef AVRC_META_CMD_BUF_SIZE
+#define AVRC_META_CMD_BUF_SIZE BT_SMALL_BUFFER_SIZE
#endif
-/* GATT Server Database pool ID */
-#ifndef GATT_DB_POOL_ID
-#define GATT_DB_POOL_ID GKI_POOL_ID_8
+#ifndef BTA_HL_LRG_DATA_BUF_SIZE
+#define BTA_HL_LRG_DATA_BUF_SIZE (10240 + 24)
#endif
-/* GATT Data sending buffer pool ID, use default ACL pool for fix channel data */
-#ifndef GATT_BUF_POOL_ID
-#define GATT_BUF_POOL_ID HCI_ACL_POOL_ID
+/* GATT Server Database buffer size */
+#ifndef GATT_DB_BUF_SIZE
+#define GATT_DB_BUF_SIZE 128
#endif
-/******************************************************************************
-**
-** Lower Layer Interface
-**
-******************************************************************************/
-
-/* Macro for allocating buffer for HCI commands */
-#ifndef HCI_GET_CMD_BUF
-#if (!defined(HCI_USE_VARIABLE_SIZE_CMD_BUF) || (HCI_USE_VARIABLE_SIZE_CMD_BUF == FALSE))
-/* Allocate fixed-size buffer from HCI_CMD_POOL (default case) */
-#define HCI_GET_CMD_BUF(paramlen) ((BT_HDR *)GKI_getpoolbuf (HCI_CMD_POOL_ID))
-#else
-/* Allocate smallest possible buffer (for platforms with limited RAM) */
-#define HCI_GET_CMD_BUF(paramlen) ((BT_HDR *)GKI_getbuf ((UINT16)(BT_HDR_SIZE + HCIC_PREAMBLE_SIZE + (paramlen))))
+/* GATT Data sending buffer size */
+#ifndef GATT_DATA_BUF_SIZE
+#define GATT_DATA_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-#endif /* HCI_GET_CMD_BUF */
/******************************************************************************
**
#define PORT_CREDIT_RX_LOW 8
#endif
-/* if application like BTA, Java or script test engine is running on other than BTU thread, */
-/* PORT_SCHEDULE_LOCK shall be defined as GKI_sched_lock() or GKI_disable() */
-#ifndef PORT_SCHEDULE_LOCK
-#define PORT_SCHEDULE_LOCK GKI_disable()
-#endif
-
-/* if application like BTA, Java or script test engine is running on other than BTU thread, */
-/* PORT_SCHEDULE_LOCK shall be defined as GKI_sched_unlock() or GKI_enable() */
-#ifndef PORT_SCHEDULE_UNLOCK
-#define PORT_SCHEDULE_UNLOCK GKI_enable()
-#endif
-
/******************************************************************************
**
** OBEX
**
******************************************************************************/
-#define OBX_14_INCLUDED FALSE
-/* The maximum number of registered servers. */
-#ifndef OBX_NUM_SERVERS
-#define OBX_NUM_SERVERS 12
-#endif
-
-/* The maximum number of active clients. */
-#ifndef OBX_NUM_CLIENTS
-#define OBX_NUM_CLIENTS 8
-#endif
-
-/* This option is application when OBX_14_INCLUDED=TRUE
- Pool ID where to reassemble the SDU.
- This Pool will allow buffers to be used that are larger than
- the L2CAP_MAX_MTU. */
-#ifndef OBX_USER_RX_POOL_ID
-#define OBX_USER_RX_POOL_ID OBX_LRG_DATA_POOL_ID
+/*
+ * Buffer size to reassemble the SDU.
+ * It will allow buffers to be used that are larger than the L2CAP_MAX_MTU.
+ */
+#ifndef OBX_USER_RX_BUF_SIZE
+#define OBX_USER_RX_BUF_SIZE OBX_LRG_DATA_BUF_SIZE
#endif
-/* This option is application when OBX_14_INCLUDED=TRUE
- Pool ID where to hold the SDU.
- This Pool will allow buffers to be used that are larger than
- the L2CAP_MAX_MTU. */
-#ifndef OBX_USER_TX_POOL_ID
-#define OBX_USER_TX_POOL_ID OBX_LRG_DATA_POOL_ID
+/*
+ * Buffer size to hold the SDU.
+ * It will allow buffers to be used that are larger than the L2CAP_MAX_MTU.
+ */
+#ifndef OBX_USER_TX_BUF_SIZE
+#define OBX_USER_TX_BUF_SIZE OBX_LRG_DATA_BUF_SIZE
#endif
-/* This option is application when OBX_14_INCLUDED=TRUE
-GKI Buffer Pool ID used to hold MPS segments during SDU reassembly
-*/
-#ifndef OBX_FCR_RX_POOL_ID
-#define OBX_FCR_RX_POOL_ID HCI_ACL_POOL_ID
+/* Buffer size used to hold MPS segments during SDU reassembly. */
+#ifndef OBX_FCR_RX_BUF_SIZE
+#define OBX_FCR_RX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-/* This option is application when OBX_14_INCLUDED=TRUE
-GKI Buffer Pool ID used to hold MPS segments used in (re)transmissions.
-L2CAP_DEFAULT_ERM_POOL_ID is specified to use the HCI ACL data pool.
-Note: This pool needs to have enough buffers to hold two times the window size negotiated
- in the L2CA_SetFCROptions (2 * tx_win_size) to allow for retransmissions.
- The size of each buffer must be able to hold the maximum MPS segment size passed in
- L2CA_SetFCROptions plus BT_HDR (8) + HCI preamble (4) + L2CAP_MIN_OFFSET (11 - as of BT 2.1 + EDR Spec).
-*/
-#ifndef OBX_FCR_TX_POOL_ID
-#define OBX_FCR_TX_POOL_ID HCI_ACL_POOL_ID
+/*
+ * Buffer size used to hold MPS segments used in (re)transmissions.
+ * The size of each buffer must be able to hold the maximum MPS segment size
+ * passed in L2CA_SetFCROptions plus BT_HDR (8) + HCI preamble (4) +
+ * L2CAP_MIN_OFFSET (11 - as of BT 2.1 + EDR Spec).
+ */
+#ifndef OBX_FCR_TX_BUF_SIZE
+#define OBX_FCR_TX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/* This option is application when OBX_14_INCLUDED=TRUE
**
******************************************************************************/
-#ifndef HID_DEV_SUBCLASS
-#define HID_DEV_SUBCLASS COD_MINOR_POINTING
-#endif
-
-#ifndef HID_CONTROL_POOL_ID
-#define HID_CONTROL_POOL_ID 2
+#ifndef HID_CONTROL_BUF_SIZE
+#define HID_CONTROL_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-#ifndef HID_INTERRUPT_POOL_ID
-#define HID_INTERRUPT_POOL_ID 2
+#ifndef HID_INTERRUPT_BUF_SIZE
+#define HID_INTERRUPT_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/*************************************************************************
#define MCA_NUM_MDLS 4
#endif
-/* Pool ID where to reassemble the SDU. */
-#ifndef MCA_USER_RX_POOL_ID
-#define MCA_USER_RX_POOL_ID HCI_ACL_POOL_ID
+/* Buffer size to reassemble the SDU. */
+#ifndef MCA_USER_RX_BUF_SIZE
+#define MCA_USER_RX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
-/* Pool ID where to hold the SDU. */
-#ifndef MCA_USER_TX_POOL_ID
-#define MCA_USER_TX_POOL_ID HCI_ACL_POOL_ID
+/* Buffer size to hold the SDU. */
+#ifndef MCA_USER_TX_BUF_SIZE
+#define MCA_USER_TX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/*
-GKI Buffer Pool ID used to hold MPS segments during SDU reassembly
-*/
-#ifndef MCA_FCR_RX_POOL_ID
-#define MCA_FCR_RX_POOL_ID HCI_ACL_POOL_ID
+ * Buffer size used to hold MPS segments during SDU reassembly
+ */
+#ifndef MCA_FCR_RX_BUF_SIZE
+#define MCA_FCR_RX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/*
-GKI Buffer Pool ID used to hold MPS segments used in (re)transmissions.
-L2CAP_DEFAULT_ERM_POOL_ID is specified to use the HCI ACL data pool.
-Note: This pool needs to have enough buffers to hold two times the window size negotiated
- in the tL2CAP_FCR_OPTIONS (2 * tx_win_size) to allow for retransmissions.
- The size of each buffer must be able to hold the maximum MPS segment size passed in
- tL2CAP_FCR_OPTIONS plus BT_HDR (8) + HCI preamble (4) + L2CAP_MIN_OFFSET (11 - as of BT 2.1 + EDR Spec).
-*/
-#ifndef MCA_FCR_TX_POOL_ID
-#define MCA_FCR_TX_POOL_ID HCI_ACL_POOL_ID
+ * Default buffer size used to hold MPS segments used in (re)transmissions.
+ * The size of each buffer must be able to hold the maximum MPS segment size
+ * passed in tL2CAP_FCR_OPTIONS plus BT_HDR (8) + HCI preamble (4) +
+ * L2CAP_MIN_OFFSET (11 - as of BT 2.1 + EDR Spec).
+ */
+#ifndef MCA_FCR_TX_BUF_SIZE
+#define MCA_FCR_TX_BUF_SIZE BT_DEFAULT_BUFFER_SIZE
#endif
/* MCAP control channel FCR Option:
UINT8 ExplicitBaudRate3;
} tBAUD_REG;
-#include "gki.h"
extern const tBAUD_REG baud_rate_regs[];
+++ /dev/null
-/******************************************************************************
- *
- * Copyright (C) 1999-2012 Broadcom Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- ******************************************************************************/
-
-#ifndef _GKI_TARGET_H_
-#define _GKI_TARGET_H_
-
-/******************************************************************************
-**
-** Buffer configuration
-**
-******************************************************************************/
-
-/* The size of the buffers in pool 0. */
-#ifndef GKI_BUF0_SIZE
-#define GKI_BUF0_SIZE 64
-#endif
-
-/* The number of buffers in buffer pool 0. */
-#ifndef GKI_BUF0_MAX
-#define GKI_BUF0_MAX 96
-#endif
-
-/* The ID of buffer pool 0. */
-#ifndef GKI_POOL_ID_0
-#define GKI_POOL_ID_0 0
-#endif
-
-/* The size of the buffers in pool 1. */
-#ifndef GKI_BUF1_SIZE
-#define GKI_BUF1_SIZE 288
-#endif
-
-/* The number of buffers in buffer pool 1. */
-#ifndef GKI_BUF1_MAX
-#define GKI_BUF1_MAX 52
-#endif
-
-/* The ID of buffer pool 1. */
-#ifndef GKI_POOL_ID_1
-#define GKI_POOL_ID_1 1
-#endif
-
-/* The size of the buffers in pool 2. */
-#ifndef GKI_BUF2_SIZE
-#define GKI_BUF2_SIZE 660
-#endif
-
-/* The number of buffers in buffer pool 2. */
-#ifndef GKI_BUF2_MAX
-#define GKI_BUF2_MAX 90
-#endif
-
-/* The ID of buffer pool 2. */
-#ifndef GKI_POOL_ID_2
-#define GKI_POOL_ID_2 2
-#endif
-
-/* The size of the buffers in pool 3. */
-#ifndef GKI_BUF3_SIZE
-#define GKI_BUF3_SIZE (4096+16)
-#endif
-
-/* The number of buffers in buffer pool 3. */
-#ifndef GKI_BUF3_MAX
-#define GKI_BUF3_MAX 400
-#endif
-
-/* The ID of buffer pool 3. */
-#ifndef GKI_POOL_ID_3
-#define GKI_POOL_ID_3 3
-#endif
-
-/* The size of the largest PUBLIC fixed buffer in system. */
-#ifndef GKI_MAX_BUF_SIZE
-#define GKI_MAX_BUF_SIZE GKI_BUF3_SIZE
-#endif
-
-/* The pool ID of the largest PUBLIC fixed buffer in system. */
-#ifndef GKI_MAX_BUF_SIZE_POOL_ID
-#define GKI_MAX_BUF_SIZE_POOL_ID GKI_POOL_ID_3
-#endif
-
-/* Pool 4 is used for BluetoothSocket L2CAP connections */
-/* The size of the buffers in pool 4. */
-#ifndef GKI_BUF4_SIZE
-#define GKI_BUF4_SIZE (8080+26)
-#endif
-
-/* The number of buffers in buffer pool 4. */
-#ifndef GKI_BUF4_MAX
-#define GKI_BUF4_MAX (OBX_NUM_SERVERS + OBX_NUM_CLIENTS)
-#endif
-
-/* The ID of buffer pool 4. */
-#ifndef GKI_POOL_ID_4
-#define GKI_POOL_ID_4 4
-#endif
-
-/* The number of fixed GKI buffer pools.
-eL2CAP requires Pool ID 5
-If BTM_SCO_HCI_INCLUDED is FALSE, Pool ID 6 is unnecessary, otherwise set to 7
-If BTA_HL_INCLUDED is FALSE then Pool ID 7 is uncessary and set the following to 7, otherwise set to 8
-If BLE_INCLUDED is FALSE then Pool ID 8 is uncessary and set the following to 8, otherwise set to 9
-POOL_ID 9 is a public pool meant for large buffer needs such as SDP_DB
-*/
-#ifndef GKI_NUM_FIXED_BUF_POOLS
-#define GKI_NUM_FIXED_BUF_POOLS 10
-#endif
-
-/* The buffer pool usage mask. */
-#ifndef GKI_DEF_BUFPOOL_PERM_MASK
-/* Setting POOL_ID 9 as a public pool meant for large buffers such as SDP_DB */
-#define GKI_DEF_BUFPOOL_PERM_MASK 0xfdf0
-#endif
-
-/* The following is intended to be a reserved pool for L2CAP
-Flow control and retransmissions and intentionally kept out
-of order */
-
-/* The number of buffers in buffer pool 5. */
-#ifndef GKI_BUF5_MAX
-#define GKI_BUF5_MAX 64
-#endif
-
-/* The ID of buffer pool 5. */
-#ifndef GKI_POOL_ID_5
-#define GKI_POOL_ID_5 5
-#endif
-
-/* The size of the buffers in pool 5
-** Special pool used by L2CAP retransmissions only. This size based on segment
-** that will fit into both DH5 and 2-DH3 packet types after accounting for GKI
-** header. 13 bytes of max headers allows us a 339 payload max. (in btui_app.txt)
-** Note: 748 used for insight scriptwrapper with CAT-2 scripts.
-*/
-#ifndef GKI_BUF5_SIZE
-#define GKI_BUF5_SIZE 748
-#endif
-
-/* The following is intended to be a reserved pool for SCO
-over HCI data and intentionally kept out of order */
-
-/* The ID of buffer pool 6. */
-#ifndef GKI_POOL_ID_6
-#define GKI_POOL_ID_6 6
-#endif
-
-/* The size of the buffers in pool 6,
- BUF_SIZE = max SCO data 255 + sizeof(BT_HDR) = 8 + SCO packet header 3 + padding 2 = 268 */
-#ifndef GKI_BUF6_SIZE
-#define GKI_BUF6_SIZE 268
-#endif
-
-/* The number of buffers in buffer pool 6. */
-#ifndef GKI_BUF6_MAX
-#define GKI_BUF6_MAX 60
-#endif
-
-
-/* The following pool is a dedicated pool for HDP
- If a shared pool is more desirable then
- 1. set BTA_HL_LRG_DATA_POOL_ID to the desired Gki Pool ID
- 2. make sure that the shared pool size is larger than 9472
- 3. adjust GKI_NUM_FIXED_BUF_POOLS accordingly since
- POOL ID 7 is not needed
-*/
-
-/* The ID of buffer pool 7. */
-#ifndef GKI_POOL_ID_7
-#define GKI_POOL_ID_7 7
-#endif
-
-/* The size of the buffers in pool 7 */
-#ifndef GKI_BUF7_SIZE
-#define GKI_BUF7_SIZE (10240 + 24)
-#endif
-
-/* The number of buffers in buffer pool 7. */
-#ifndef GKI_BUF7_MAX
-#define GKI_BUF7_MAX 2
-#endif
-
-/* The following pool is a dedicated pool for GATT
- If a shared pool is more desirable then
- 1. set GATT_DB_POOL_ID to the desired Gki Pool ID
- 2. make sure that the shared pool size fit a common GATT database needs
- 3. adjust GKI_NUM_FIXED_BUF_POOLS accordingly since
- POOL ID 8 is not needed
-*/
-
-/* The ID of buffer pool 8. */
-#ifndef GKI_POOL_ID_8
-#define GKI_POOL_ID_8 8
-#endif
-
-/* The size of the buffers in pool 8 */
-#ifndef GKI_BUF8_SIZE
-#define GKI_BUF8_SIZE 128
-#endif
-
-/* The number of buffers in buffer pool 8. */
-#ifndef GKI_BUF8_MAX
-#define GKI_BUF8_MAX 30
-#endif
-
-/* The following pool is meant for large allocations such as SDP_DB */
-#ifndef GKI_POOL_ID_9
-#define GKI_POOL_ID_9 9
-#endif
-
-#ifndef GKI_BUF9_SIZE
-#define GKI_BUF9_SIZE 8192
-#endif
-
-#ifndef GKI_BUF9_MAX
-#define GKI_BUF9_MAX 5
-#endif
-
-/* The number of fixed and dynamic buffer pools */
-#ifndef GKI_NUM_TOTAL_BUF_POOLS
-#define GKI_NUM_TOTAL_BUF_POOLS 10
-#endif
-
-int gki_init(void);
-void gki_clean_up(void);
-
-//void LogMsg (UINT32 trace_set_mask, const char *fmt_str, ...);
-
-#endif /*_GKI_TARGET_H_*/
******************************************************************************/
int bte_main_boot_entry(bluedroid_init_done_cb_t cb)
{
- if (gki_init()) {
- LOG_ERROR("%s: Init GKI Module Failure.\n", __func__);
- return -1;
- }
-
hci = hci_layer_get_interface();
if (!hci) {
LOG_ERROR("%s could not get hci layer interface.\n", __func__);
bluedroid_init_done_cb = cb;
+ osi_init();
+
//Enbale HCI
bte_main_enable();
BTA_VendorCleanup();
#endif
bte_main_disable();
- gki_clean_up();
+
+ osi_deinit();
}
/******************************************************************************
hci->transmit_downward(event, p_msg);
} else {
//APPL_TRACE_ERROR("Invalid Controller ID. Discarding message.");
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
}
#include "rom/ets_sys.h"
#include "btc_task.h"
#include "btc_alarm.h"
+#include "mutex.h"
+#include "time.h"
#define RTC_TIMER_TICKS_TO_MS(ticks) (((ticks/625)<<1) + (ticks-(ticks/625)*625)/312)
{
assert(alarm_mutex != NULL);
- osi_mutex_lock(&alarm_mutex);
+ osi_mutex_lock(&alarm_mutex, OSI_MUTEX_MAX_TIMEOUT);
if (alarm_state != ALARM_STATE_IDLE) {
LOG_WARN("%s, invalid state %d\n", __func__, alarm_state);
goto end;
{
assert(alarm_mutex != NULL);
- osi_mutex_lock(&alarm_mutex);
+ osi_mutex_lock(&alarm_mutex, OSI_MUTEX_MAX_TIMEOUT);
if (alarm_state != ALARM_STATE_OPEN) {
LOG_WARN("%s, invalid state %d\n", __func__, alarm_state);
goto end;
struct alarm_t *timer_id = NULL;
- osi_mutex_lock(&alarm_mutex);
+ osi_mutex_lock(&alarm_mutex, OSI_MUTEX_MAX_TIMEOUT);
if (alarm_state != ALARM_STATE_OPEN) {
LOG_ERROR("%s, invalid state %d\n", __func__, alarm_state);
timer_id = NULL;
assert(alarm_mutex != NULL);
int ret = 0;
- osi_mutex_lock(&alarm_mutex);
+ osi_mutex_lock(&alarm_mutex, OSI_MUTEX_MAX_TIMEOUT);
if (alarm_state != ALARM_STATE_OPEN) {
LOG_ERROR("%s, invalid state %d\n", __func__, alarm_state);
ret = -3;
assert(alarm_mutex != NULL);
int ret = 0;
- osi_mutex_lock(&alarm_mutex);
+ osi_mutex_lock(&alarm_mutex, OSI_MUTEX_MAX_TIMEOUT);
if (alarm_state != ALARM_STATE_OPEN) {
LOG_ERROR("%s, invalid state %d\n", __func__, alarm_state);
ret = -3;
int osi_alarm_cancel(osi_alarm_t *alarm)
{
int ret = 0;
- osi_mutex_lock(&alarm_mutex);
+ osi_mutex_lock(&alarm_mutex, OSI_MUTEX_MAX_TIMEOUT);
if (alarm_state != ALARM_STATE_OPEN) {
LOG_ERROR("%s, invalid state %d\n", __func__, alarm_state);
ret = -3;
}
return (period_ms_t)diff;
}
+
+uint32_t osi_time_get_os_boottime_ms(void)
+{
+ return RTC_TIMER_TICKS_TO_MS((alarm_current_tick()));
+}
+
+void osi_delay_ms(uint32_t ms)
+{
+ vTaskDelay(ms / portTICK_PERIOD_MS);
+}
+
#ifdef CONFIG_BLUEDROID_MEM_DEBUG
void *p;
- p = calloc(1, size);
+ p = malloc(size);
osi_mem_dbg_record(p, size, __func__, __LINE__);
return p;
#else
- return calloc(1, size);
+ return malloc(size);
#endif
}
#include "fixed_queue.h"
#include "list.h"
#include "osi.h"
-#include "osi_arch.h"
#include "bt_trace.h"
+#include "mutex.h"
+#include "semaphore.h"
typedef struct fixed_queue_t {
list_t *list;
osi_sem_t enqueue_sem;
osi_sem_t dequeue_sem;
- pthread_mutex_t lock;
+ osi_mutex_t lock;
size_t capacity;
fixed_queue_cb dequeue_ready;
- /*
- reactor_object_t *dequeue_object;
- fixed_queue_cb dequeue_ready;
- void *dequeue_context;
- */
} fixed_queue_t;
-//static void internal_dequeue_ready(void *context);
fixed_queue_t *fixed_queue_new(size_t capacity)
{
goto error;
}
- pthread_mutex_init(&ret->lock, NULL);
+ osi_mutex_new(&ret->lock);
ret->capacity = capacity;
ret->list = list_new(NULL);
void fixed_queue_free(fixed_queue_t *queue, fixed_queue_free_cb free_cb)
{
const list_node_t *node;
- if (!queue) {
- return;
- }
-// fixed_queue_unregister_dequeue(queue);
+ if (queue == NULL) {
+ return;
+ }
+
+ fixed_queue_unregister_dequeue(queue);
- if (free_cb)
+ if (free_cb) {
for (node = list_begin(queue->list); node != list_end(queue->list); node = list_next(node)) {
free_cb(list_node(node));
}
+ }
list_free(queue->list);
osi_sem_free(&queue->enqueue_sem);
osi_sem_free(&queue->dequeue_sem);
- pthread_mutex_destroy(&queue->lock);
+ osi_mutex_free(&queue->lock);
osi_free(queue);
}
bool fixed_queue_is_empty(fixed_queue_t *queue)
{
bool is_empty = false;
- assert(queue != NULL);
- pthread_mutex_lock(&queue->lock);
+ if (queue == NULL) {
+ return true;
+ }
+
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
is_empty = list_is_empty(queue->list);
- pthread_mutex_unlock(&queue->lock);
+ osi_mutex_unlock(&queue->lock);
return is_empty;
}
+size_t fixed_queue_length(fixed_queue_t *queue)
+{
+ size_t length;
+
+ if (queue == NULL) {
+ return 0;
+ }
+
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
+ length = list_length(queue->list);
+ osi_mutex_unlock(&queue->lock);
+
+ return length;
+}
size_t fixed_queue_capacity(fixed_queue_t *queue)
{
assert(queue != NULL);
assert(queue != NULL);
assert(data != NULL);
- osi_sem_wait(&queue->enqueue_sem, 0);
+ osi_sem_take(&queue->enqueue_sem, OSI_SEM_MAX_TIMEOUT);
- pthread_mutex_lock(&queue->lock);
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
list_append(queue->list, data);
- pthread_mutex_unlock(&queue->lock);
+ osi_mutex_unlock(&queue->lock);
- osi_sem_signal(&queue->dequeue_sem);
+ osi_sem_give(&queue->dequeue_sem);
}
void *fixed_queue_dequeue(fixed_queue_t *queue)
{
void *ret = NULL;
+
assert(queue != NULL);
- osi_sem_wait(&queue->dequeue_sem, 0);
+ osi_sem_take(&queue->dequeue_sem, OSI_SEM_MAX_TIMEOUT);
- pthread_mutex_lock(&queue->lock);
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
ret = list_front(queue->list);
list_remove(queue->list, ret);
- pthread_mutex_unlock(&queue->lock);
+ osi_mutex_unlock(&queue->lock);
- osi_sem_signal(&queue->enqueue_sem);
+ osi_sem_give(&queue->enqueue_sem);
return ret;
}
-/*
-void *fixed_queue_try_dequeue(fixed_queue_t *queue) {
- void *ret = NULL;
- assert(queue != NULL);
+bool fixed_queue_try_enqueue(fixed_queue_t *queue, void *data)
+{
+ assert(queue != NULL);
+ assert(data != NULL);
- if (!semaphore_try_wait(queue->dequeue_sem))
- return NULL;
+ if (osi_sem_take(&queue->enqueue_sem, 0) != 0) {
+ return false;
+ }
+
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
+
+ list_append(queue->list, data);
+ osi_mutex_unlock(&queue->lock);
+
+ osi_sem_give(&queue->dequeue_sem);
+
+ return true;
+}
+
+void *fixed_queue_try_dequeue(fixed_queue_t *queue)
+{
+ void *ret = NULL;
+
+ if (queue == NULL) {
+ return NULL;
+ }
+
+ if (osi_sem_take(queue->dequeue_sem, 0) != 0) {
+ return NULL;
+ }
+
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
+ ret = list_front(queue->list);
+ list_remove(queue->list, ret);
+ osi_mutex_unlock(&queue->lock);
+
+ osi_sem_give(&queue->enqueue_sem);
- pthread_mutex_lock(&queue->lock);
- ret = list_front(queue->list);
- list_remove(queue->list, ret);
- pthread_mutex_unlock(&queue->lock);
+ return ret;
+}
+
+void *fixed_queue_try_peek_first(fixed_queue_t *queue)
+{
+ void *ret = NULL;
- semaphore_post(queue->enqueue_sem);
+ if (queue == NULL) {
+ return NULL;
+ }
+
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
+ ret = list_is_empty(queue->list) ? NULL : list_front(queue->list);
+ osi_mutex_unlock(&queue->lock);
- return ret;
+ return ret;
}
-int fixed_queue_get_dequeue_fd(const fixed_queue_t *queue) {
- assert(queue != NULL);
- return semaphore_get_fd(queue->dequeue_sem);
+void *fixed_queue_try_peek_last(fixed_queue_t *queue)
+{
+ void *ret = NULL;
+
+ if (queue == NULL) {
+ return NULL;
+ }
+
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
+ ret = list_is_empty(queue->list) ? NULL : list_back(queue->list);
+ osi_mutex_unlock(&queue->lock);
+
+ return ret;
}
-*/
+
+void *fixed_queue_try_remove_from_queue(fixed_queue_t *queue, void *data)
+{
+ bool removed = false;
+
+ if (queue == NULL) {
+ return NULL;
+ }
+
+ osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
+ if (list_contains(queue->list, data) &&
+ osi_sem_take(queue->dequeue_sem, 0) == 0) {
+ removed = list_remove(queue->list, data);
+ assert(removed);
+ }
+ osi_mutex_unlock(&queue->lock);
+
+ if (removed) {
+ osi_sem_give(queue->enqueue_sem);
+ return data;
+ }
+
+ return NULL;
+}
+
+list_t *fixed_queue_get_list(fixed_queue_t *queue)
+{
+ assert(queue != NULL);
+
+ // NOTE: This function is not thread safe, and there is no point for
+ // calling osi_mutex_lock() / osi_mutex_unlock()
+ return queue->list;
+}
+
void fixed_queue_register_dequeue(fixed_queue_t *queue, fixed_queue_cb ready_cb)
{
assert(queue != NULL);
void fixed_queue_process(fixed_queue_t *queue)
{
- if (queue == NULL) {
- return;
- }
+ assert(queue != NULL);
if (queue->dequeue_ready) {
queue->dequeue_ready(queue);
}
}
-/*
-static void internal_dequeue_ready(void *context) {
- assert(context != NULL);
- fixed_queue_t *queue = context;
- queue->dequeue_ready(queue, queue->dequeue_context);
-}
-*/
#include "allocator.h"
#include "future.h"
#include "osi.h"
-#include "osi_arch.h"
void future_free(future_t *future);
future->ready_can_be_called = false;
future->result = value;
- osi_sem_signal(&future->semaphore);
+ osi_sem_give(&future->semaphore);
}
void *future_await(future_t *future)
// If the future is immediate, it will not have a semaphore
if (future->semaphore) {
- osi_sem_wait(&future->semaphore, 0);
+ osi_sem_take(&future->semaphore, OSI_SEM_MAX_TIMEOUT);
}
void *result = future->result;
// t1 and t2 should be no greater than the time of MAX ticks
period_ms_t osi_alarm_time_diff(period_ms_t t1, period_ms_t t2);
+uint32_t osi_time_get_os_boottime_ms(void);
+
#endif /*_ALARM_H_*/
#define _ALLOCATOR_H_
#include <stddef.h>
-//#include <stdlib.h>
+#include <stdlib.h>
#include "sdkconfig.h"
typedef void *(*alloc_fn)(size_t size);
({ \
void *p; \
\
- p = calloc(1, (size)); \
+ p = malloc((size)); \
osi_mem_dbg_record(p, size, __func__, __LINE__); \
(void *)p; \
})
})
#define osi_free(ptr) \
-({ \
- osi_mem_dbg_clean(ptr, __func__, __LINE__); \
- free((ptr)); \
-})
+do { \
+ void *tmp_point = (void *)(ptr); \
+ osi_mem_dbg_clean(tmp_point, __func__, __LINE__); \
+ free(tmp_point); \
+} while (0)
#else
-#define osi_malloc(size) calloc(1, (size))
+#define osi_malloc(size) malloc((size))
#define osi_calloc(size) calloc(1, (size))
#define osi_free(p) free((p))
#define _FIXED_QUEUE_H_
#include <stdbool.h>
+#include "list.h"
struct fixed_queue_t;
+
typedef struct fixed_queue_t fixed_queue_t;
//typedef struct reactor_t reactor_t;
// blocked on it) results in undefined behaviour.
void fixed_queue_free(fixed_queue_t *queue, fixed_queue_free_cb free_cb);
-// Returns a value indicating whether the given |queue| is empty. |queue| may
-// not be NULL.
+// Returns a value indicating whether the given |queue| is empty. If |queue|
+// is NULL, the return value is true.
bool fixed_queue_is_empty(fixed_queue_t *queue);
+// Returns the length of the |queue|. If |queue| is NULL, the return value
+// is 0.
+size_t fixed_queue_length(fixed_queue_t *queue);
+
// Returns the maximum number of elements this queue may hold. |queue| may
// not be NULL.
size_t fixed_queue_capacity(fixed_queue_t *queue);
// the caller. If the queue is empty, this function returns NULL immediately.
// Otherwise, the next element in the queue is returned. |queue| may not be
// NULL.
-//void *fixed_queue_try_dequeue(fixed_queue_t *queue);
+void *fixed_queue_try_dequeue(fixed_queue_t *queue);
// Returns the first element from |queue|, if present, without dequeuing it.
-// This function will never block the caller. Returns NULL if there are no elements
-// in the queue. |queue| may not be NULL.
-//void *fixed_queue_try_peek(fixed_queue_t *queue);
+// This function will never block the caller. Returns NULL if there are no
+// elements in the queue or |queue| is NULL.
+void *fixed_queue_try_peek_first(fixed_queue_t *queue);
+
+// Returns the last element from |queue|, if present, without dequeuing it.
+// This function will never block the caller. Returns NULL if there are no
+// elements in the queue or |queue| is NULL.
+void *fixed_queue_try_peek_last(fixed_queue_t *queue);
+
+// Tries to remove a |data| element from the middle of the |queue|. This
+// function will never block the caller. If the queue is empty or NULL, this
+// function returns NULL immediately. |data| may not be NULL. If the |data|
+// element is found in the queue, a pointer to the removed data is returned,
+// otherwise NULL.
+void *fixed_queue_try_remove_from_queue(fixed_queue_t *queue, void *data);
+
+// Returns the iterateable list with all entries in the |queue|. This function
+// will never block the caller. |queue| may not be NULL.
+//
+// NOTE: The return result of this function is not thread safe: the list could
+// be modified by another thread, and the result would be unpredictable.
+// TODO: The usage of this function should be refactored, and the function
+// itself should be removed.
+list_t *fixed_queue_get_list(fixed_queue_t *queue);
// This function returns a valid file descriptor. Callers may perform one
// operation on the fd: select(2). If |select| indicates that the file
void fixed_queue_process(fixed_queue_t *queue);
+list_t *fixed_queue_get_list(fixed_queue_t *queue);
+
#endif
#ifndef __FUTURE_H__
#define __FUTURE_H__
-// #pragma once
-#include "osi_arch.h"
+#include "semaphore.h"
struct future {
bool ready_can_be_called;
typedef struct list_t list_t;
typedef void (*list_free_cb)(void *data);
-typedef bool (*list_iter_cb)(void *data);
+typedef bool (*list_iter_cb)(void *data, void *context);
// Returns a new, empty list. Returns NULL if not enough memory could be allocated
// for the list structure. The returned list must be freed with |list_free|. The
list_node_t *list_free_node(list_t *list, list_node_t *node);
-//list_node_t *list_free_node(list_t *list, list_node_t *node);
// Frees the list. This function accepts NULL as an argument, in which case it
// behaves like a no-op.
void list_free(list_t *list);
// Returns the last element in the list without removing it. |list| may not
// be NULL or empty.
-//void *list_back(const list_t *list);
+void *list_back(const list_t *list);
+list_node_t *list_back_node(const list_t *list);
// Inserts |data| after |prev_node| in |list|. |data|, |list|, and |prev_node|
// may not be NULL. This function does not make a copy of |data| so the pointer
// list inside the callback. If an element is added before the node being visited,
// there will be no callback for the newly-inserted node. Neither |list| nor
// |callback| may be NULL.
-void list_foreach(const list_t *list, list_iter_cb callback);
+list_node_t *list_foreach(const list_t *list, list_iter_cb callback, void *context);
// Returns an iterator to the first element in |list|. |list| may not be NULL.
// The returned iterator is valid as long as it does not equal the value returned
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ ******************************************************************************/
+
+#ifndef __MUTEX_H__
+#define __MUTEX_H__
+
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+#include "freertos/queue.h"
+#include "freertos/semphr.h"
+
+
+#define OSI_MUTEX_MAX_TIMEOUT 0xffffffffUL
+
+#define osi_mutex_valid( x ) ( ( ( *x ) == NULL) ? pdFALSE : pdTRUE )
+#define osi_mutex_set_invalid( x ) ( ( *x ) = NULL )
+
+typedef xSemaphoreHandle osi_mutex_t;
+
+int osi_mutex_new(osi_mutex_t *mutex);
+
+int osi_mutex_lock(osi_mutex_t *mutex, uint32_t timeout);
+
+void osi_mutex_unlock(osi_mutex_t *mutex);
+
+void osi_mutex_free(osi_mutex_t *mutex);
+
+/* Just for a global mutex */
+int osi_mutex_global_init(void);
+
+void osi_mutex_global_deinit(void);
+
+void osi_mutex_global_lock(void);
+
+void osi_mutex_global_unlock(void);
+
+#endif /* __MUTEX_H__ */
+
#include <stdint.h>
#define UNUSED_ATTR __attribute__((unused))
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#define INVALID_FD (-1)
#define CONCAT(a, b) a##b
+#define COMPILE_ASSERT(x)
-// Use during compile time to check conditional values
-// NOTE: The the failures will present as a generic error
-// "error: initialization makes pointer from integer without a cast"
-// but the file and line number will present the condition that
-// failed.
-#define DUMMY_COUNTER(c) CONCAT(__osi_dummy_, c)
-#define DUMMY_PTR DUMMY_COUNTER(__COUNTER__)
-
-#define COMPILE_ASSERT(x) char * DUMMY_PTR = !(x)
-
-typedef uint32_t timeout_t;
+int osi_init(void);
+void osi_deinit(void);
#endif /*_OSI_H_*/
+++ /dev/null
-#ifndef __os_ARCH_H__
-#define __os_ARCH_H__
-
-#include "freertos/FreeRTOS.h"
-#include "freertos/task.h"
-#include "freertos/queue.h"
-#include "freertos/semphr.h"
-
-#define OSI_ARCH_TIMEOUT 0xffffffffUL
-
-typedef xSemaphoreHandle osi_sem_t;
-typedef xSemaphoreHandle osi_mutex_t;
-
-#define osi_mutex_valid( x ) ( ( ( *x ) == NULL) ? pdFALSE : pdTRUE )
-#define osi_mutex_set_invalid( x ) ( ( *x ) = NULL )
-#define osi_sem_valid( x ) ( ( ( *x ) == NULL) ? pdFALSE : pdTRUE )
-#define osi_sem_set_invalid( x ) ( ( *x ) = NULL )
-
-int osi_mutex_new(osi_mutex_t *pxMutex);
-
-void osi_mutex_lock(osi_mutex_t *pxMutex);
-
-int osi_mutex_trylock(osi_mutex_t *pxMutex);
-
-void osi_mutex_unlock(osi_mutex_t *pxMutex);
-
-void osi_mutex_free(osi_mutex_t *pxMutex);
-
-int osi_sem_new(osi_sem_t *sem, uint32_t max_count, uint32_t init_count);
-
-void osi_sem_signal(osi_sem_t *sem);
-
-uint32_t osi_sem_wait(osi_sem_t *sem, uint32_t timeout);
-
-void osi_sem_free(osi_sem_t *sem);
-
-void osi_arch_init(void);
-
-uint32_t osi_now(void);
-
-void osi_delay_ms(uint32_t ms);
-
-
-#endif /* __os_ARCH_H__ */
-
/******************************************************************************
*
- * Copyright (C) 1999-2012 Broadcom Corporation
+ * Copyright (C) 2015 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
******************************************************************************/
-#ifndef _GKI_INT_H_
-#define _GKI_INT_H_
+#ifndef __SEMAPHORE_H__
+#define __SEMAPHORE_H__
-//#include <pthread.h>
-#include "bt_defs.h"
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+#include "freertos/queue.h"
+#include "freertos/semphr.h"
-#include "gki_common.h"
+#define OSI_SEM_MAX_TIMEOUT 0xffffffffUL
-typedef struct {
- pthread_mutex_t lock;
- tGKI_COM_CB com;
-} tGKI_CB;
+typedef xSemaphoreHandle osi_sem_t;
-extern tGKI_CB gki_cb;
+#define osi_sem_valid( x ) ( ( ( *x ) == NULL) ? pdFALSE : pdTRUE )
+#define osi_sem_set_invalid( x ) ( ( *x ) = NULL )
-#endif /*_GKI_INT_H_*/
+int osi_sem_new(osi_sem_t *sem, uint32_t max_count, uint32_t init_count);
+
+void osi_sem_free(osi_sem_t *sem);
+
+int osi_sem_take(osi_sem_t *sem, uint32_t timeout);
+
+void osi_sem_give(osi_sem_t *sem);
+
+
+#endif /* __SEMAPHORE_H__ */
return (list->length == 0);
}
-/*
-bool list_contains(const list_t *list, const void *data) {
- const list_node_t *node;
+bool list_contains(const list_t *list, const void *data)
+{
assert(list != NULL);
assert(data != NULL);
- for (node = list_begin(list); node != list_end(list); node = list_next(node)) {
+ for (const list_node_t *node = list_begin(list); node != list_end(list); node = list_next(node)) {
if (list_node(node) == data)
return true;
}
return false;
}
-*/
size_t list_length(const list_t *list)
{
return list->head->data;
}
-/*
void *list_back(const list_t *list) {
assert(list != NULL);
assert(!list_is_empty(list));
return list->tail->data;
}
-*/
-bool list_insert_after(list_t *list, list_node_t *prev_node, void *data)
-{
- list_node_t *node;
- assert(list != NULL);
- assert(prev_node != NULL);
- assert(data != NULL);
+list_node_t *list_back_node(const list_t *list) {
+ assert(list != NULL);
+ assert(!list_is_empty(list));
- node = (list_node_t *)list->allocator->alloc(sizeof(list_node_t));
- if (!node) {
- return false;
- }
+ return list->tail;
+}
+
+bool list_insert_after(list_t *list, list_node_t *prev_node, void *data) {
+ assert(list != NULL);
+ assert(prev_node != NULL);
+ assert(data != NULL);
+
+ list_node_t *node = (list_node_t *)list->allocator->alloc(sizeof(list_node_t));
+ if (!node)
+ return false;
node->next = prev_node->next;
node->data = data;
bool list_prepend(list_t *list, void *data)
{
- list_node_t *node;
assert(list != NULL);
assert(data != NULL);
- node = (list_node_t *)list->allocator->alloc(sizeof(list_node_t));
+ list_node_t *node = (list_node_t *)list->allocator->alloc(sizeof(list_node_t));
if (!node) {
return false;
}
bool list_append(list_t *list, void *data)
{
- list_node_t *node;
assert(list != NULL);
assert(data != NULL);
- node = (list_node_t *)list->allocator->alloc(sizeof(list_node_t));
+ list_node_t *node = (list_node_t *)list->allocator->alloc(sizeof(list_node_t));
if (!node) {
return false;
}
list->length = 0;
}
-void list_foreach(const list_t *list, list_iter_cb callback)
+list_node_t *list_foreach(const list_t *list, list_iter_cb callback, void *context)
{
- assert(list != NULL);
- assert(callback != NULL);
+ assert(list != NULL);
+ assert(callback != NULL);
- for (list_node_t *node = list->head; node; ) {
- list_node_t *next = node->next;
- callback(node->data);
- node = next;
- }
+ for (list_node_t *node = list->head; node; ) {
+ list_node_t *next = node->next;
+ if (!callback(node->data, context))
+ return node;
+ node = next;
+ }
+ return NULL;
}
list_node_t *list_begin(const list_t *list)
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ ******************************************************************************/
+
+#include "mutex.h"
+
+
+/* static section */
+static osi_mutex_t gl_mutex; /* Recursive Type */
+
+
+/** Create a new mutex
+ * @param mutex pointer to the mutex to create
+ * @return a new mutex */
+int osi_mutex_new(osi_mutex_t *mutex)
+{
+ int xReturn = -1;
+
+ *mutex = xSemaphoreCreateMutex();
+
+ if (*mutex != NULL) {
+ xReturn = 0;
+ }
+
+ return xReturn;
+}
+
+/** Lock a mutex
+ * @param mutex the mutex to lock */
+int osi_mutex_lock(osi_mutex_t *mutex, uint32_t timeout)
+{
+ int ret = 0;
+
+ if (timeout == OSI_MUTEX_MAX_TIMEOUT) {
+ if (xSemaphoreTake(*mutex, portMAX_DELAY) != pdTRUE) {
+ ret = -1;
+ }
+ } else {
+ if (xSemaphoreTake(*mutex, timeout / portTICK_PERIOD_MS) != pdTRUE) {
+ ret = -2;
+ }
+ }
+
+ return ret;
+}
+
+/** Unlock a mutex
+ * @param mutex the mutex to unlock */
+void osi_mutex_unlock(osi_mutex_t *mutex)
+{
+ xSemaphoreGive(*mutex);
+}
+
+/** Delete a semaphore
+ * @param mutex the mutex to delete */
+void osi_mutex_free(osi_mutex_t *mutex)
+{
+ vSemaphoreDelete(*mutex);
+ *mutex = NULL;
+}
+
+int osi_mutex_global_init(void)
+{
+ gl_mutex = xSemaphoreCreateRecursiveMutex();
+ if (gl_mutex == NULL) {
+ return -1;
+ }
+
+ return 0;
+}
+
+void osi_mutex_global_deinit(void)
+{
+ vSemaphoreDelete(gl_mutex);
+}
+
+void osi_mutex_global_lock(void)
+{
+ xSemaphoreTakeRecursive(gl_mutex, portMAX_DELAY);
+}
+
+void osi_mutex_global_unlock(void)
+{
+ xSemaphoreGiveRecursive(gl_mutex);
+}
--- /dev/null
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+#include "osi.h"
+#include "mutex.h"
+
+int osi_init(void)
+{
+ int ret = 0;
+
+ if (osi_mutex_global_init() != 0) {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+void osi_deinit(void)
+{
+ osi_mutex_global_deinit();
+}
+++ /dev/null
-/*
- * Copyright (c) 2001-2003 Swedish Institute of Computer Science.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without modification,
- * are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
- * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
- *
- * This file is part of the bluedroid stack.
- *
- * Author: Adam Dunkels <adam@sics.se>
- *
- */
-
-#include "osi_arch.h"
-
-/** Create a new mutex
- * @param mutex pointer to the mutex to create
- * @return a new mutex */
-int
-osi_mutex_new(osi_mutex_t *pxMutex)
-{
- int xReturn = -1;
-
- *pxMutex = xSemaphoreCreateMutex();
-
- if (*pxMutex != NULL) {
- xReturn = 0;
- }
-
- //LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("osi_mutex_new: m=%p\n", *pxMutex));
-
- return xReturn;
-}
-
-/** Lock a mutex
- * @param mutex the mutex to lock */
-void
-osi_mutex_lock(osi_mutex_t *pxMutex)
-{
- while (xSemaphoreTake(*pxMutex, portMAX_DELAY) != pdPASS);
-}
-
-int
-osi_mutex_trylock(osi_mutex_t *pxMutex)
-{
- if (xSemaphoreTake(*pxMutex, 0) == pdPASS) {
- return 0;
- } else {
- return -1;
- }
-}
-
-/** Unlock a mutex
- * @param mutex the mutex to unlock */
-void
-osi_mutex_unlock(osi_mutex_t *pxMutex)
-{
- xSemaphoreGive(*pxMutex);
-}
-
-/** Delete a semaphore
- * @param mutex the mutex to delete */
-void
-osi_mutex_free(osi_mutex_t *pxMutex)
-{
- //LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("osi_mutex_free: m=%p\n", *pxMutex));
- vQueueDelete(*pxMutex);
-}
-
-/*-----------------------------------------------------------------------------------*/
-// Creates and returns a new semaphore. The "init_count" argument specifies
-// the initial state of the semaphore, "max_count" specifies the maximum value
-// that can be reached.
-int
-osi_sem_new(osi_sem_t *sem, uint32_t max_count, uint32_t init_count)
-{
- int xReturn = -1;
- if (sem) {
- *sem = xSemaphoreCreateCounting(max_count, init_count);
- if ((*sem) != NULL) {
- xReturn = 0;
- }
- }
-
- return xReturn;
-}
-
-/*-----------------------------------------------------------------------------------*/
-// Signals a semaphore
-void
-osi_sem_signal(osi_sem_t *sem)
-{
- xSemaphoreGive(*sem);
-}
-
-/*-----------------------------------------------------------------------------------*/
-/*
- Blocks the thread while waiting for the semaphore to be
- signaled. If the "timeout" argument is non-zero, the thread should
- only be blocked for the specified time (measured in
- milliseconds).
-
- If the timeout argument is non-zero, the return value is the number of
- milliseconds spent waiting for the semaphore to be signaled. If the
- semaphore wasn't signaled within the specified time, the return value is
- OSI_ARCH_TIMEOUT. If the thread didn't have to wait for the semaphore
- (i.e., it was already signaled), the function may return zero.
-
- Notice that lwIP implements a function with a similar name,
- osi_sem_wait(), that uses the osi_arch_sem_wait() function.
-*/
-uint32_t
-osi_sem_wait(osi_sem_t *sem, uint32_t timeout)
-{
- portTickType StartTime, EndTime, Elapsed;
- unsigned long ulReturn;
-
- StartTime = xTaskGetTickCount();
-
- if (timeout != 0) {
- if (xSemaphoreTake(*sem, timeout / portTICK_PERIOD_MS) == pdTRUE) {
- EndTime = xTaskGetTickCount();
- Elapsed = (EndTime - StartTime) * portTICK_PERIOD_MS;
-
- if (Elapsed == 0) {
- Elapsed = 1;
- }
-
- ulReturn = Elapsed;
- } else {
- ulReturn = OSI_ARCH_TIMEOUT;
- }
- } else { // must block without a timeout
- while (xSemaphoreTake(*sem, portMAX_DELAY) != pdTRUE);
-
- EndTime = xTaskGetTickCount();
- Elapsed = (EndTime - StartTime) * portTICK_PERIOD_MS;
-
- if (Elapsed == 0) {
- Elapsed = 1;
- }
-
- ulReturn = Elapsed;
- }
-
- return ulReturn ; // return time blocked
-}
-
-/*-----------------------------------------------------------------------------------*/
-// Deallocates a semaphore
-void
-osi_sem_free(osi_sem_t *sem)
-{
- vSemaphoreDelete(*sem);
-}
-
-/*-----------------------------------------------------------------------------------*/
-// Initialize osi arch
-void
-osi_arch_init(void)
-{
-}
-
-/*-----------------------------------------------------------------------------------*/
-uint32_t
-osi_now(void)
-{
- return xTaskGetTickCount();
-}
-
-
-void osi_delay_ms(uint32_t ms)
-{
- vTaskDelay(ms / portTICK_PERIOD_MS);
-}
-
-
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ ******************************************************************************/
+
+
+#include "semaphore.h"
+
+/*-----------------------------------------------------------------------------------*/
+// Creates and returns a new semaphore. The "init_count" argument specifies
+// the initial state of the semaphore, "max_count" specifies the maximum value
+// that can be reached.
+int osi_sem_new(osi_sem_t *sem, uint32_t max_count, uint32_t init_count)
+{
+ int ret = -1;
+
+ if (sem) {
+ *sem = xSemaphoreCreateCounting(max_count, init_count);
+ if ((*sem) != NULL) {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------------*/
+// Give a semaphore
+void osi_sem_give(osi_sem_t *sem)
+{
+ xSemaphoreGive(*sem);
+}
+
+/*
+ Blocks the thread while waiting for the semaphore to be
+ signaled. If the "timeout" argument is non-zero, the thread should
+ only be blocked for the specified time (measured in
+ milliseconds).
+
+*/
+int
+osi_sem_take(osi_sem_t *sem, uint32_t timeout)
+{
+ int ret = 0;
+
+ if (timeout == OSI_SEM_MAX_TIMEOUT) {
+ if (xSemaphoreTake(*sem, portMAX_DELAY) != pdTRUE) {
+ ret = -1;
+ }
+ } else {
+ if (xSemaphoreTake(*sem, timeout / portTICK_PERIOD_MS) != pdTRUE) {
+ ret = -2;
+ }
+ }
+
+ return ret;
+}
+
+// Deallocates a semaphore
+void osi_sem_free(osi_sem_t *sem)
+{
+ vSemaphoreDelete(*sem);
+ *sem = NULL;
+}
#include "bt_types.h"
#include "bt_target.h"
#include "bt_utils.h"
-#include "gki.h"
#include "l2c_api.h"
#include "l2cdefs.h"
#include "btm_api.h"
#include "avct_api.h"
#include "avct_int.h"
+#include "allocator.h"
#if (defined(AVCT_INCLUDED) && AVCT_INCLUDED == TRUE)
/* map handle to ccb */
if ((p_ccb = avct_ccb_by_idx(handle)) == NULL) {
result = AVCT_BAD_HANDLE;
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
/* verify channel is bound to link */
else if (p_ccb->p_lcb == NULL) {
result = AVCT_NOT_OPEN;
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
if (result == AVCT_SUCCESS) {
if (p_ccb->p_bcb == NULL && (p_ccb->allocated & AVCT_ALOC_BCB) == 0) {
/* BCB channel is not open and not allocated */
result = AVCT_BAD_HANDLE;
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
} else {
p_ccb->p_bcb = avct_bcb_by_lcb(p_ccb->p_lcb);
avct_bcb_event(p_ccb->p_bcb, AVCT_LCB_UL_MSG_EVT, (tAVCT_LCB_EVT *) &ul_msg);
#include "avct_int.h"
#include "l2c_api.h"
#include "l2cdefs.h"
+#include "allocator.h"
#if (defined(AVCT_INCLUDED) && AVCT_INCLUDED == TRUE)
avct_lcb_event(p_lcb, AVCT_LCB_LL_MSG_EVT, (tAVCT_LCB_EVT *) &p_buf);
} else { /* prevent buffer leak */
AVCT_TRACE_WARNING("ERROR -> avct_l2c_data_ind_cback drop buffer");
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
#include "bt_utils.h"
#include "avct_api.h"
#include "avct_int.h"
-#include "gki.h"
+#include "allocator.h"
#if (defined(AVCT_INCLUDED) && AVCT_INCLUDED == TRUE)
p_lcb->allocated = (UINT8)(i + 1);
memcpy(p_lcb->peer_addr, bd_addr, BD_ADDR_LEN);
AVCT_TRACE_DEBUG("avct_lcb_alloc %d", p_lcb->allocated);
+ p_lcb->tx_q = fixed_queue_new(SIZE_MAX);
break;
}
}
*******************************************************************************/
void avct_lcb_dealloc(tAVCT_LCB *p_lcb, tAVCT_LCB_EVT *p_data)
{
- tAVCT_CCB *p_ccb = &avct_cb.ccb[0];
- BOOLEAN found = FALSE;
- int i;
UNUSED(p_data);
- AVCT_TRACE_DEBUG("avct_lcb_dealloc %d", p_lcb->allocated);
+ AVCT_TRACE_DEBUG("%s allocated: %d", __func__, p_lcb->allocated);
- for (i = 0; i < AVCT_NUM_CONN; i++, p_ccb++) {
- /* if ccb allocated and */
- if (p_ccb->allocated) {
- if (p_ccb->p_lcb == p_lcb) {
- AVCT_TRACE_DEBUG("avct_lcb_dealloc used by ccb: %d", i);
- found = TRUE;
- break;
- }
+ // Check if the LCB is still referenced
+
+ tAVCT_CCB *p_ccb = &avct_cb.ccb[0];
+ for (size_t i = 0; i < AVCT_NUM_CONN; i++, p_ccb++)
+ {
+ if (p_ccb->allocated && p_ccb->p_lcb == p_lcb)
+ {
+ AVCT_TRACE_DEBUG("%s LCB in use; lcb index: %d", __func__, i);
+ return;
}
}
- if (!found) {
- AVCT_TRACE_DEBUG("avct_lcb_dealloc now");
+ // If not, de-allocate now...
- /* clear reassembled msg buffer if in use */
- if (p_lcb->p_rx_msg != NULL) {
- GKI_freebuf(p_lcb->p_rx_msg);
- }
- memset(p_lcb, 0, sizeof(tAVCT_LCB));
- }
+ AVCT_TRACE_DEBUG("%s Freeing LCB", __func__);
+ osi_free(p_lcb->p_rx_msg);
+ fixed_queue_free(p_lcb->tx_q, NULL);
+ memset(p_lcb, 0, sizeof(tAVCT_LCB));
}
/*******************************************************************************
#include "bt_utils.h"
#include "avct_api.h"
#include "avct_int.h"
-#include "gki.h"
#include "btm_api.h"
+#include "allocator.h"
#if (defined(AVCT_INCLUDED) && AVCT_INCLUDED == TRUE)
/* quick sanity check on length */
if (p_buf->len < avct_lcb_pkt_type_len[pkt_type]) {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
AVCT_TRACE_WARNING("Bad length during reassembly");
p_ret = NULL;
}
else if (pkt_type == AVCT_PKT_TYPE_SINGLE) {
/* if reassembly in progress drop message and process new single */
if (p_lcb->p_rx_msg != NULL) {
- GKI_freebuf(p_lcb->p_rx_msg);
- p_lcb->p_rx_msg = NULL;
AVCT_TRACE_WARNING("Got single during reassembly");
}
+ osi_free(p_lcb->p_rx_msg);
+ p_lcb->p_rx_msg = NULL;
p_ret = p_buf;
}
/* start packet */
else if (pkt_type == AVCT_PKT_TYPE_START) {
/* if reassembly in progress drop message and process new start */
if (p_lcb->p_rx_msg != NULL) {
- GKI_freebuf(p_lcb->p_rx_msg);
AVCT_TRACE_WARNING("Got start during reassembly");
}
+ osi_free(p_lcb->p_rx_msg);
/* Allocate bigger buffer for reassembly. As lower layers are
* not aware of possible packet size after reassembly they
* would have allocated smaller buffer.
*/
- p_lcb->p_rx_msg = (BT_HDR *)GKI_getbuf(GKI_MAX_BUF_SIZE);
+ p_lcb->p_rx_msg = (BT_HDR *)osi_malloc(BT_DEFAULT_BUFFER_SIZE);
if (p_lcb->p_rx_msg == NULL) {
AVCT_TRACE_ERROR ("Cannot alloc buffer for reassembly !!");
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
} else {
memcpy (p_lcb->p_rx_msg, p_buf,
sizeof(BT_HDR) + p_buf->offset + p_buf->len);
/* Free original buffer */
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
/* update p to point to new buffer */
p = (UINT8 *)(p_lcb->p_rx_msg + 1) + p_lcb->p_rx_msg->offset;
else {
/* if no reassembly in progress drop message */
if (p_lcb->p_rx_msg == NULL) {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
AVCT_TRACE_WARNING("Pkt type=%d out of order", pkt_type);
p_ret = NULL;
} else {
/* get size of buffer holding assembled message */
- buf_len = GKI_get_buf_size(p_lcb->p_rx_msg) - sizeof(BT_HDR);
+ /*
+ * NOTE: The buffer is allocated above at the beginning of the
+ * reassembly, and is always of size BT_DEFAULT_BUFFER_SIZE.
+ */
+ buf_len = BT_DEFAULT_BUFFER_SIZE - sizeof(BT_HDR);
/* adjust offset and len of fragment for header byte */
p_buf->offset += AVCT_HDR_LEN_CONT;
/* verify length */
if ((p_lcb->p_rx_msg->offset + p_buf->len) > buf_len) {
/* won't fit; free everything */
- GKI_freebuf(p_lcb->p_rx_msg);
+ AVCT_TRACE_WARNING("%s: Fragmented message too big!", __func__);
+ osi_free(p_lcb->p_rx_msg);
p_lcb->p_rx_msg = NULL;
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
p_ret = NULL;
- AVCT_TRACE_WARNING("Fragmented message to big!");
} else {
/* copy contents of p_buf to p_rx_msg */
memcpy((UINT8 *)(p_lcb->p_rx_msg + 1) + p_lcb->p_rx_msg->offset,
p_lcb->p_rx_msg->len += p_buf->len;
p_ret = NULL;
}
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
}
/* set event */
event = (p_data->cong) ? AVCT_CONG_IND_EVT : AVCT_UNCONG_IND_EVT;
p_lcb->cong = p_data->cong;
- if (p_lcb->cong == FALSE && GKI_getfirst(&p_lcb->tx_q)) {
- while ( !p_lcb->cong && (p_buf = (BT_HDR *)GKI_dequeue(&p_lcb->tx_q)) != NULL) {
- if (L2CA_DataWrite(p_lcb->ch_lcid, p_buf) == L2CAP_DW_CONGESTED) {
+ if (p_lcb->cong == FALSE && !fixed_queue_is_empty(p_lcb->tx_q))
+ {
+ while (!p_lcb->cong &&
+ (p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_lcb->tx_q)) != NULL)
+ {
+ if (L2CA_DataWrite(p_lcb->ch_lcid, p_buf) == L2CAP_DW_CONGESTED)
+ {
p_lcb->cong = TRUE;
}
}
AVCT_TRACE_WARNING("Dropping msg");
- GKI_freebuf(p_data->ul_msg.p_buf);
+ osi_free(p_data->ul_msg.p_buf);
+ p_data->ul_msg.p_buf = NULL;
}
/*******************************************************************************
UINT16 curr_msg_len;
UINT8 pkt_type;
UINT8 hdr_len;
- BT_HDR *p_buf;
UINT8 *p;
UINT8 nosp = 0; /* number of subsequent packets */
UINT16 temp;
/* while we haven't sent all packets */
while (curr_msg_len != 0) {
+ BT_HDR *p_buf;
+
/* set header len */
hdr_len = avct_lcb_pkt_type_len[pkt_type];
/* if remaining msg must be fragmented */
if (p_data->ul_msg.p_buf->len > (p_lcb->peer_mtu - hdr_len)) {
/* get a new buffer for fragment we are sending */
- if ((p_buf = (BT_HDR *) GKI_getbuf(buf_size)) == NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(buf_size)) == NULL) {
/* whoops; free original msg buf and bail */
AVCT_TRACE_ERROR ("avct_lcb_send_msg cannot alloc buffer!!");
- GKI_freebuf(p_data->ul_msg.p_buf);
+ osi_free(p_data->ul_msg.p_buf);
break;
}
}
if (p_lcb->cong == TRUE) {
- GKI_enqueue (&p_lcb->tx_q, p_buf);
+ fixed_queue_enqueue(p_lcb->tx_q, p_buf);
}
/* send message to L2CAP */
pkt_type = AVCT_PKT_TYPE_END;
}
}
- AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d", GKI_queue_length(&p_lcb->tx_q));
+ AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d",
+ fixed_queue_length(p_lcb->tx_q));
return;
}
{
UNUSED(p_lcb);
- if (p_data) {
- GKI_freebuf(p_data->p_buf);
- }
- return;
+ if (p_data == NULL)
+ return;
+
+ osi_free(p_data->p_buf);
+ p_data->p_buf = NULL;
}
/*******************************************************************************
/* check for invalid cr_ipid */
if (cr_ipid == AVCT_CR_IPID_INVALID) {
AVCT_TRACE_WARNING("Invalid cr_ipid %d", cr_ipid);
- GKI_freebuf(p_data->p_buf);
+ osi_free(p_data->p_buf);
+ p_data->p_buf = NULL;
return;
}
} else {
/* PID not found; drop message */
AVCT_TRACE_WARNING("No ccb for PID=%x", pid);
- GKI_freebuf(p_data->p_buf);
+ osi_free(p_data->p_buf);
+ p_data->p_buf = NULL;
/* if command send reject */
if (cr_ipid == AVCT_CMD) {
- if ((p_buf = (BT_HDR *) GKI_getpoolbuf(AVCT_CMD_POOL_ID)) != NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(AVCT_CMD_BUF_SIZE)) != NULL) {
p_buf->len = AVCT_HDR_LEN_SINGLE;
p_buf->offset = AVCT_MSG_OFFSET - AVCT_HDR_LEN_SINGLE;
p = (UINT8 *)(p_buf + 1) + p_buf->offset;
#ifndef AVCT_INT_H
#define AVCT_INT_H
-#include "gki.h"
#include "avct_api.h"
#include "avct_defs.h"
#include "l2c_api.h"
+#include "fixed_queue.h"
/*****************************************************************************
** constants
BT_HDR *p_rx_msg; /* Message being reassembled */
UINT16 conflict_lcid; /* L2CAP channel LCID */
BD_ADDR peer_addr; /* BD address of peer */
- BUFFER_Q tx_q; /* Transmit data buffer queue */
+ fixed_queue_t *tx_q; /* Transmit data buffer queue */
BOOLEAN cong; /* TRUE, if congested */
} tAVCT_LCB;
#include "avdt_int.h"
#include "l2c_api.h"
#include "l2cdefs.h"
+#include "allocator.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
/*******************************************************************************
if (p_scb != NULL) {
avdt_scb_event(p_scb, AVDT_SCB_TC_DATA_EVT, (tAVDT_SCB_EVT *) &p_buf);
} else {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
AVDT_TRACE_ERROR(" avdt_ad_tc_data_ind buffer freed");
}
}
#include "l2c_api.h"
#include "btm_api.h"
#include "btu.h"
+#include "allocator.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
evt.apiwrite.time_stamp = time_stamp;
evt.apiwrite.m_pt = m_pt;
evt.apiwrite.opt = opt;
-#if AVDT_MULTIPLEXING == TRUE
- GKI_init_q (&evt.apiwrite.frag_q);
-#endif
avdt_scb_event(p_scb, AVDT_SCB_API_WRITE_REQ_EVT, &evt);
}
return (lcid);
}
-#if AVDT_MULTIPLEXING == TRUE
-/*******************************************************************************
-**
-** Function AVDT_WriteDataReq
-**
-** Description Send a media packet to the peer device. The stream must
-** be started before this function is called. Also, this
-** function can only be called if the stream is a SRC.
-**
-** When AVDTP has sent the media packet and is ready for the
-** next packet, an AVDT_WRITE_CFM_EVT is sent to the
-** application via the control callback. The application must
-** wait for the AVDT_WRITE_CFM_EVT before it makes the next
-** call to AVDT_WriteDataReq(). If the applications calls
-** AVDT_WriteDataReq() before it receives the event the packet
-** will not be sent. The application may make its first call
-** to AVDT_WriteDataReq() after it receives an
-** AVDT_START_CFM_EVT or AVDT_START_IND_EVT.
-**
-** Returns AVDT_SUCCESS if successful, otherwise error.
-**
-*******************************************************************************/
-extern UINT16 AVDT_WriteDataReq(UINT8 handle, UINT8 *p_data, UINT32 data_len,
- UINT32 time_stamp, UINT8 m_pt, UINT8 marker)
-{
-
- tAVDT_SCB *p_scb;
- tAVDT_SCB_EVT evt;
- UINT16 result = AVDT_SUCCESS;
-
- do {
- /* check length of media frame */
- if (data_len > AVDT_MAX_MEDIA_SIZE) {
- result = AVDT_BAD_PARAMS;
- break;
- }
- /* map handle to scb */
- if ((p_scb = avdt_scb_by_hdl(handle)) == NULL) {
- result = AVDT_BAD_HANDLE;
- break;
- }
- AVDT_TRACE_WARNING("mux_tsid_media:%d\n", p_scb->curr_cfg.mux_tsid_media);
-
- if (p_scb->p_pkt != NULL
- || p_scb->p_ccb == NULL
- || !GKI_queue_is_empty(&p_scb->frag_q)
- || p_scb->frag_off != 0
- || p_scb->curr_cfg.mux_tsid_media == 0) {
- result = AVDT_ERR_BAD_STATE;
- AVDT_TRACE_WARNING("p_scb->p_pkt=%p, p_scb->p_ccb=%p, IsQueueEmpty=%x, p_scb->frag_off=%x\n",
- p_scb->p_pkt, p_scb->p_ccb, GKI_queue_is_empty(&p_scb->frag_q), p_scb->frag_off);
- break;
- }
- evt.apiwrite.p_buf = 0; /* it will indicate using of fragments queue frag_q */
- /* create queue of media fragments */
- GKI_init_q (&evt.apiwrite.frag_q);
-
- /* compose fragments from media payload and put fragments into gueue */
- avdt_scb_queue_frags(p_scb, &p_data, &data_len, &evt.apiwrite.frag_q);
- if (GKI_queue_is_empty(&evt.apiwrite.frag_q)) {
- AVDT_TRACE_WARNING("AVDT_WriteDataReq out of GKI buffers");
- result = AVDT_ERR_RESOURCE;
- break;
- }
- evt.apiwrite.data_len = data_len;
- evt.apiwrite.p_data = p_data;
-
- /* process the fragments queue */
- evt.apiwrite.time_stamp = time_stamp;
- evt.apiwrite.m_pt = m_pt | (marker << 7);
- avdt_scb_event(p_scb, AVDT_SCB_API_WRITE_REQ_EVT, &evt);
- } while (0);
-
-#if (BT_USE_TRACES == TRUE)
- if (result != AVDT_SUCCESS) {
- AVDT_TRACE_WARNING("*** AVDT_WriteDataReq failed result=%d\n", result);
- }
-#endif
- return result;
-}
-#endif
#if AVDT_MULTIPLEXING == TRUE
/*******************************************************************************
/* build SR - assume fit in one packet */
p_tbl = avdt_ad_tc_tbl_by_type(AVDT_CHAN_REPORT, p_scb->p_ccb, p_scb);
if ((p_tbl->state == AVDT_AD_ST_OPEN) &&
- (p_pkt = (BT_HDR *)GKI_getbuf(p_tbl->peer_mtu)) != NULL) {
+ (p_pkt = (BT_HDR *)osi_malloc(p_tbl->peer_mtu)) != NULL) {
p_pkt->offset = L2CAP_MIN_OFFSET;
p = (UINT8 *)(p_pkt + 1) + p_pkt->offset;
#if AVDT_MULTIPLEXING == TRUE
#include "avdt_api.h"
#include "avdtc_api.h"
#include "avdt_int.h"
-#include "gki.h"
#include "btu.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
if (!p_ccb->allocated) {
p_ccb->allocated = TRUE;
memcpy(p_ccb->peer_addr, bd_addr, BD_ADDR_LEN);
- GKI_init_q(&p_ccb->cmd_q);
- GKI_init_q(&p_ccb->rsp_q);
+ p_ccb->cmd_q = fixed_queue_new(SIZE_MAX);
+ p_ccb->rsp_q = fixed_queue_new(SIZE_MAX);
p_ccb->timer_entry.param = (UINT32) p_ccb;
AVDT_TRACE_DEBUG("avdt_ccb_alloc %d\n", i);
break;
AVDT_TRACE_DEBUG("avdt_ccb_dealloc %d\n", avdt_ccb_to_idx(p_ccb));
btu_stop_timer(&p_ccb->timer_entry);
+ fixed_queue_free(p_ccb->cmd_q, NULL);
+ fixed_queue_free(p_ccb->rsp_q, NULL);
memset(p_ccb, 0, sizeof(tAVDT_CCB));
}
#include "avdt_api.h"
#include "avdtc_api.h"
#include "avdt_int.h"
-#include "gki.h"
#include "btu.h"
#include "btm_api.h"
+#include "allocator.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
/* free message being fragmented */
if (p_ccb->p_curr_msg != NULL) {
- GKI_freebuf(p_ccb->p_curr_msg);
+ osi_free(p_ccb->p_curr_msg);
p_ccb->p_curr_msg = NULL;
}
/* free message being reassembled */
if (p_ccb->p_rx_msg != NULL) {
- GKI_freebuf(p_ccb->p_rx_msg);
+ osi_free(p_ccb->p_rx_msg);
p_ccb->p_rx_msg = NULL;
}
/* clear out response queue */
- while ((p_buf = (BT_HDR *) GKI_dequeue(&p_ccb->rsp_q)) != NULL) {
- GKI_freebuf(p_buf);
+ while ((p_buf = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->rsp_q)) != NULL) {
+ osi_free(p_buf);
}
}
avdt_ccb_cmd_fail(p_ccb, (tAVDT_CCB_EVT *) &err_code);
/* set up next message */
- p_ccb->p_curr_cmd = (BT_HDR *) GKI_dequeue(&p_ccb->cmd_q);
+ p_ccb->p_curr_cmd = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->cmd_q);
} while (p_ccb->p_curr_cmd != NULL);
}
}
- GKI_freebuf(p_ccb->p_curr_cmd);
+ osi_free(p_ccb->p_curr_cmd);
p_ccb->p_curr_cmd = NULL;
}
}
UNUSED(p_data);
if (p_ccb->p_curr_cmd != NULL) {
- GKI_freebuf(p_ccb->p_curr_cmd);
+ osi_free(p_ccb->p_curr_cmd);
p_ccb->p_curr_cmd = NULL;
}
}
/* if command pending and we're not congested and not sending a fragment */
if ((!p_ccb->cong) && (p_ccb->p_curr_msg == NULL) && (p_ccb->p_curr_cmd != NULL)) {
/* make copy of message in p_curr_cmd and send it */
- if ((p_msg = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID)) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE)) != NULL) {
memcpy(p_msg, p_ccb->p_curr_cmd,
(sizeof(BT_HDR) + p_ccb->p_curr_cmd->offset + p_ccb->p_curr_cmd->len));
avdt_msg_send(p_ccb, p_msg);
** not congested, not sending fragment, not waiting for response
*/
if ((!p_ccb->cong) && (p_ccb->p_curr_msg == NULL) && (p_ccb->p_curr_cmd == NULL)) {
- if ((p_msg = (BT_HDR *) GKI_dequeue(&p_ccb->cmd_q)) != NULL) {
+ if ((p_msg = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->cmd_q)) != NULL) {
/* make a copy of buffer in p_curr_cmd */
- if ((p_ccb->p_curr_cmd = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID)) != NULL) {
+ if ((p_ccb->p_curr_cmd = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE)) != NULL) {
memcpy(p_ccb->p_curr_cmd, p_msg, (sizeof(BT_HDR) + p_msg->offset + p_msg->len));
avdt_msg_send(p_ccb, p_msg);
avdt_msg_send(p_ccb, NULL);
}
/* do we have responses to send? send them */
- else if (!GKI_queue_is_empty(&p_ccb->rsp_q)) {
- while ((p_msg = (BT_HDR *) GKI_dequeue(&p_ccb->rsp_q)) != NULL) {
+ else if (!fixed_queue_is_empty(p_ccb->rsp_q)) {
+ while ((p_msg = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->rsp_q)) != NULL) {
if (avdt_msg_send(p_ccb, p_msg) == TRUE) {
/* break out if congested */
break;
#include "l2cdefs.h"
#include "btm_api.h"
#include "btm_int.h"
+#include "allocator.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
if ((p_tbl = avdt_ad_tc_tbl_by_lcid(lcid)) != NULL) {
avdt_ad_tc_data_ind(p_tbl, p_buf);
} else { /* prevent buffer leak */
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
#include "avdt_api.h"
#include "avdtc_api.h"
#include "avdt_int.h"
-#include "gki.h"
#include "btu.h"
+#include "allocator.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
/*****************************************************************************
(p_tbl->peer_mtu - 1) + 2;
/* get a new buffer for fragment we are sending */
- if ((p_buf = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE)) == NULL) {
/* do we even want to try and recover from this? could do so
by setting retransmission timer */
return TRUE;
hdr_len = AVDT_LEN_TYPE_CONT;
/* get a new buffer for fragment we are sending */
- if ((p_buf = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE)) == NULL) {
/* do we even want to try and recover from this? could do so
by setting retransmission timer */
return TRUE;
/* quick sanity check on length */
if (p_buf->len < avdt_msg_pkt_type_len[pkt_type]) {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
AVDT_TRACE_WARNING("Bad length during reassembly");
p_ret = NULL;
}
else if (pkt_type == AVDT_PKT_TYPE_SINGLE) {
/* if reassembly in progress drop message and process new single */
if (p_ccb->p_rx_msg != NULL) {
- GKI_freebuf(p_ccb->p_rx_msg);
+ osi_free(p_ccb->p_rx_msg);
p_ccb->p_rx_msg = NULL;
AVDT_TRACE_WARNING("Got single during reassembly");
}
else if (pkt_type == AVDT_PKT_TYPE_START) {
/* if reassembly in progress drop message and process new single */
if (p_ccb->p_rx_msg != NULL) {
- GKI_freebuf(p_ccb->p_rx_msg);
+ osi_free(p_ccb->p_rx_msg);
+ p_ccb->p_rx_msg = NULL;
AVDT_TRACE_WARNING("Got start during reassembly");
}
- p_ccb->p_rx_msg = p_buf;
+ /*
+ * Allocate bigger buffer for reassembly. As lower layers are
+ * not aware of possible packet size after reassembly, they
+ * would have allocated smaller buffer.
+ */
+ p_ccb->p_rx_msg = (BT_HDR *)osi_malloc(BT_DEFAULT_BUFFER_SIZE);
+ memcpy(p_ccb->p_rx_msg, p_buf,
+ sizeof(BT_HDR) + p_buf->offset + p_buf->len);
+
+ /* Free original buffer */
+ osi_free(p_buf);
+
+ /* update p to point to new buffer */
+ p = (UINT8 *)(p_ccb->p_rx_msg + 1) + p_ccb->p_rx_msg->offset;
/* copy first header byte over nosp */
*(p + 1) = *p;
else {
/* if no reassembly in progress drop message */
if (p_ccb->p_rx_msg == NULL) {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
AVDT_TRACE_WARNING("Pkt type=%d out of order\n", pkt_type);
p_ret = NULL;
} else {
/* get size of buffer holding assembled message */
- buf_len = GKI_get_buf_size(p_ccb->p_rx_msg) - sizeof(BT_HDR);
+ buf_len = BT_DEFAULT_BUFFER_SIZE - sizeof(BT_HDR);
/* adjust offset and len of fragment for header byte */
p_buf->offset += AVDT_LEN_TYPE_CONT;
/* verify length */
if ((p_ccb->p_rx_msg->offset + p_buf->len) > buf_len) {
/* won't fit; free everything */
- GKI_freebuf(p_ccb->p_rx_msg);
+ AVDT_TRACE_WARNING("%s: Fragmented message too big!", __func__);
+ osi_free(p_ccb->p_rx_msg);
p_ccb->p_rx_msg = NULL;
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
p_ret = NULL;
} else {
/* copy contents of p_buf to p_rx_msg */
p_ccb->p_rx_msg->len += p_buf->len;
p_ret = NULL;
}
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
}
UINT8 *p_start;
/* get a buffer */
- p_buf = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID);
+ p_buf = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE);
if (p_buf == NULL) {
AVDT_TRACE_ERROR("avdt_msg_send_cmd out of buffer!!");
return;
p_ccb->label = (p_ccb->label + 1) % 16;
/* queue message and trigger ccb to send it */
- GKI_enqueue(&p_ccb->cmd_q, p_buf);
+ fixed_queue_enqueue(p_ccb->cmd_q, p_buf);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
UINT8 *p_start;
/* get a buffer */
- p_buf = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID);
+ p_buf = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE);
if (p_buf == NULL) {
return;
}
AVDT_BLD_LAYERSPEC(p_buf->layer_specific, AVDT_MSG_TYPE_RSP, p_params->hdr.label);
/* queue message and trigger ccb to send it */
- GKI_enqueue(&p_ccb->rsp_q, p_buf);
+ fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
UINT8 *p_start;
/* get a buffer */
- p_buf = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID);
+ p_buf = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE);
if (p_buf == NULL) {
return;
}
AVDT_BLD_LAYERSPEC(p_buf->layer_specific, AVDT_MSG_TYPE_REJ, p_params->hdr.label);
/* queue message and trigger ccb to send it */
- GKI_enqueue(&p_ccb->rsp_q, p_buf);
+ fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
UINT8 *p_start;
/* get a buffer */
- p_buf = (BT_HDR *) GKI_getpoolbuf(AVDT_CMD_POOL_ID);
+ p_buf = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE);
if (p_buf == NULL) {
return;
}
AVDT_TRACE_DEBUG("avdt_msg_send_grej");
/* queue message and trigger ccb to send it */
- GKI_enqueue(&p_ccb->rsp_q, p_buf);
+ fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
}
/* free message buffer */
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
/* if its a rsp or rej, send event to ccb to free associated
** cmd msg buffer and handle cmd queue
#include "avdt_api.h"
#include "avdtc_api.h"
#include "avdt_int.h"
-#include "gki.h"
#include "btu.h"
+#include "allocator.h"
+#include "fixed_queue.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
memcpy(&p_scb->cs, p_cs, sizeof(tAVDT_CS));
#if AVDT_MULTIPLEXING == TRUE
/* initialize fragments gueue */
- GKI_init_q(&p_scb->frag_q);
+ p_scb->frag_q = fixed_queue_new(SIZE_MAX);
if (p_cs->cfg.psc_mask & AVDT_PSC_MUX) {
p_scb->cs.cfg.mux_tcid_media = avdt_ad_type_to_tcid(AVDT_CHAN_MEDIA, p_scb);
*******************************************************************************/
void avdt_scb_dealloc(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
{
-#if AVDT_MULTIPLEXING == TRUE
- void *p_buf;
-#endif
UNUSED(p_data);
AVDT_TRACE_DEBUG("avdt_scb_dealloc hdl=%d\n", avdt_scb_to_hdl(p_scb));
#if AVDT_MULTIPLEXING == TRUE
/* free fragments we're holding, if any; it shouldn't happen */
- while ((p_buf = GKI_dequeue (&p_scb->frag_q)) != NULL) {
- GKI_freebuf(p_buf);
- }
+ fixed_queue_free(p_scb->frag_q, osi_free_func);
#endif
memset(p_scb, 0, sizeof(tAVDT_SCB));
#include "avdt_api.h"
#include "avdtc_api.h"
#include "avdt_int.h"
-#include "gki.h"
#include "btu.h"
+#include "allocator.h"
#if (defined(AVDT_INCLUDED) && AVDT_INCLUDED == TRUE)
/* do sanity check */
if ((offset > p_data->p_pkt->len) || ((pad_len + offset) > p_data->p_pkt->len)) {
AVDT_TRACE_WARNING("Got bad media packet");
- GKI_freebuf(p_data->p_pkt);
+ osi_free(p_data->p_pkt);
+ p_data->p_pkt = NULL;
}
/* adjust offset and length and send it up */
else {
p_scb->media_buf_len, time_stamp, seq, m_pt, marker);
}
#endif
- GKI_freebuf(p_data->p_pkt);
+ osi_free(p_data->p_pkt);
+ p_data->p_pkt = NULL;
}
}
}
if (p < p_end) {
AVDT_TRACE_WARNING("*** Got bad media packet");
}
- GKI_freebuf(p_data->p_pkt);
+ osi_free(p_data->p_pkt);
+ p_data->p_pkt = NULL;
}
#endif
if (p_data->p_pkt->layer_specific == AVDT_CHAN_REPORT) {
p = (UINT8 *)(p_data->p_pkt + 1) + p_data->p_pkt->offset;
avdt_scb_hdl_report(p_scb, p, p_data->p_pkt->len);
- GKI_freebuf(p_data->p_pkt);
+ osi_free(p_data->p_pkt);
+ p_data->p_pkt = NULL;
} else
#endif
avdt_scb_hdl_pkt_no_frag(p_scb, p_data);
{
UNUSED(p_scb);
- GKI_freebuf(p_data->p_pkt);
- AVDT_TRACE_ERROR(" avdt_scb_drop_pkt Dropped incoming media packet");
+ AVDT_TRACE_ERROR("%s dropped incoming media packet", __func__);
+ osi_free(p_data->p_pkt);
+ p_data->p_pkt = NULL;
}
/*******************************************************************************
/* free pkt we're holding, if any */
if (p_scb->p_pkt != NULL) {
- GKI_freebuf(p_scb->p_pkt);
+ osi_free(p_scb->p_pkt);
p_scb->p_pkt = NULL;
}
/* free packet we're holding, if any; to be replaced with new */
if (p_scb->p_pkt != NULL) {
- GKI_freebuf(p_scb->p_pkt);
+ osi_free(p_scb->p_pkt);
+ p_scb->p_pkt = NULL;
/* this shouldn't be happening */
AVDT_TRACE_WARNING("Dropped media packet; congested");
{
UINT8 *p;
UINT32 ssrc;
- BT_HDR *p_frag;
/* free fragments we're holding, if any; it shouldn't happen */
- if (!GKI_queue_is_empty(&p_scb->frag_q)) {
- while ((p_frag = (BT_HDR *)GKI_dequeue (&p_scb->frag_q)) != NULL) {
- GKI_freebuf(p_frag);
- }
-
+ if (!fixed_queue_is_empty(p_scb->frag_q))
+ {
/* this shouldn't be happening */
AVDT_TRACE_WARNING("*** Dropped media packet; congested");
+ BT_HDR *p_frag;
+ while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL)
+ osi_free(p_frag);
}
/* build a media fragments */
ssrc = avdt_scb_gen_ssrc(p_scb);
- /* get first packet */
- p_frag = (BT_HDR *)GKI_getfirst (&p_data->apiwrite.frag_q);
- /* posit on Adaptation Layer header */
- p_frag->len += AVDT_AL_HDR_SIZE + AVDT_MEDIA_HDR_SIZE;
- p_frag->offset -= AVDT_AL_HDR_SIZE + AVDT_MEDIA_HDR_SIZE;
- p = (UINT8 *)(p_frag + 1) + p_frag->offset;
-
- /* Adaptation Layer header */
- /* TSID, no-fragment bit and coding of length(in 2 length octets following) */
- *p++ = (p_scb->curr_cfg.mux_tsid_media << 3) | AVDT_ALH_LCODE_16BIT;
-
- /* length of all remaining transport packet */
- UINT16_TO_BE_STREAM(p, p_frag->layer_specific + AVDT_MEDIA_HDR_SIZE );
- /* media header */
- UINT8_TO_BE_STREAM(p, AVDT_MEDIA_OCTET1);
- UINT8_TO_BE_STREAM(p, p_data->apiwrite.m_pt);
- UINT16_TO_BE_STREAM(p, p_scb->media_seq);
- UINT32_TO_BE_STREAM(p, p_data->apiwrite.time_stamp);
- UINT32_TO_BE_STREAM(p, ssrc);
- p_scb->media_seq++;
-
- while ((p_frag = (BT_HDR *)GKI_getnext (p_frag)) != NULL) {
- /* posit on Adaptation Layer header */
- p_frag->len += AVDT_AL_HDR_SIZE;
- p_frag->offset -= AVDT_AL_HDR_SIZE;
- p = (UINT8 *)(p_frag + 1) + p_frag->offset;
- /* Adaptation Layer header */
- /* TSID, fragment bit and coding of length(in 2 length octets following) */
- *p++ = (p_scb->curr_cfg.mux_tsid_media << 3) | (AVDT_ALH_FRAG_MASK | AVDT_ALH_LCODE_16BIT);
-
- /* length of all remaining transport packet */
- UINT16_TO_BE_STREAM(p, p_frag->layer_specific );
- }
+ if (! fixed_queue_is_empty(p_scb->frag_q)) {
+ list_t *list = fixed_queue_get_list(p_scb->frag_q);
+ const list_node_t *node = list_begin(list);
+ if (node != list_end(list)) {
+ BT_HDR *p_frag = (BT_HDR *)list_node(node);
+ node = list_next(node);
+
+ /* get first packet */
+ /* posit on Adaptation Layer header */
+ p_frag->len += AVDT_AL_HDR_SIZE + AVDT_MEDIA_HDR_SIZE;
+ p_frag->offset -= AVDT_AL_HDR_SIZE + AVDT_MEDIA_HDR_SIZE;
+ p = (UINT8 *)(p_frag + 1) + p_frag->offset;
- /* store it */
- p_scb->frag_q = p_data->apiwrite.frag_q;
+ /* Adaptation Layer header */
+ /* TSID, no-fragment bit and coding of length (in 2 length octets
+ * following)
+ */
+ *p++ = (p_scb->curr_cfg.mux_tsid_media<<3) | AVDT_ALH_LCODE_16BIT;
+
+ /* length of all remaining transport packet */
+ UINT16_TO_BE_STREAM(p, p_frag->layer_specific + AVDT_MEDIA_HDR_SIZE );
+ /* media header */
+ UINT8_TO_BE_STREAM(p, AVDT_MEDIA_OCTET1);
+ UINT8_TO_BE_STREAM(p, p_data->apiwrite.m_pt);
+ UINT16_TO_BE_STREAM(p, p_scb->media_seq);
+ UINT32_TO_BE_STREAM(p, p_data->apiwrite.time_stamp);
+ UINT32_TO_BE_STREAM(p, ssrc);
+ p_scb->media_seq++;
+ }
+
+ for ( ; node != list_end(list); node = list_next(node)) {
+ BT_HDR *p_frag = (BT_HDR *)list_node(node);
+
+ /* posit on Adaptation Layer header */
+ p_frag->len += AVDT_AL_HDR_SIZE;
+ p_frag->offset -= AVDT_AL_HDR_SIZE;
+ p = (UINT8 *)(p_frag + 1) + p_frag->offset;
+ /* Adaptation Layer header */
+ /* TSID, fragment bit and coding of length (in 2 length octets
+ * following)
+ */
+ *p++ = (p_scb->curr_cfg.mux_tsid_media << 3) |
+ (AVDT_ALH_FRAG_MASK | AVDT_ALH_LCODE_16BIT);
+
+ /* length of all remaining transport packet */
+ UINT16_TO_BE_STREAM(p, p_frag->layer_specific);
+ }
+ }
}
#endif
void avdt_scb_hdl_write_req(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
{
#if AVDT_MULTIPLEXING == TRUE
- if (GKI_queue_is_empty(&p_data->apiwrite.frag_q))
+ if (fixed_queue_is_empty(p_scb->frag_q))
#endif
avdt_scb_hdl_write_req_no_frag(p_scb, p_data);
#if AVDT_MULTIPLEXING == TRUE
void avdt_scb_snd_stream_close(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
{
#if AVDT_MULTIPLEXING == TRUE
- BT_HDR *p_frag;
+ AVDT_TRACE_WARNING("%s c:%d, off:%d", __func__,
+ fixed_queue_length(p_scb->frag_q), p_scb->frag_off);
- AVDT_TRACE_WARNING("avdt_scb_snd_stream_close c:%d, off:%d\n",
- GKI_queue_length(&p_scb->frag_q), p_scb->frag_off);
/* clean fragments queue */
- while ((p_frag = (BT_HDR *)GKI_dequeue (&p_scb->frag_q)) != NULL) {
- GKI_freebuf(p_frag);
+ BT_HDR *p_frag;
+ while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
+ osi_free(p_frag);
}
p_scb->frag_off = 0;
#endif
if (p_scb->p_pkt) {
- GKI_freebuf(p_scb->p_pkt);
+ osi_free(p_scb->p_pkt);
p_scb->p_pkt = NULL;
}
-#if 0
- if (p_scb->cong) {
- p_scb->cong = FALSE;
- }
-
- /* p_scb->curr_cfg.mux_tsid_media == 0 */
-#endif
avdt_scb_snd_close_req(p_scb, p_data);
}
void avdt_scb_free_pkt(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
{
tAVDT_CTRL avdt_ctrl;
-#if AVDT_MULTIPLEXING == TRUE
- BT_HDR *p_frag;
-#endif
/* set error code and parameter */
avdt_ctrl.hdr.err_code = AVDT_ERR_BAD_STATE;
/* p_buf can be NULL in case using of fragments queue frag_q */
if (p_data->apiwrite.p_buf) {
- GKI_freebuf(p_data->apiwrite.p_buf);
+ osi_free(p_data->apiwrite.p_buf);
+ p_data->apiwrite.p_buf = NULL;
}
#if AVDT_MULTIPLEXING == TRUE
/* clean fragments queue */
- while ((p_frag = (BT_HDR *)GKI_dequeue (&p_data->apiwrite.frag_q)) != NULL) {
- GKI_freebuf(p_frag);
- }
+ BT_HDR *p_frag;
+ while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
+ osi_free(p_frag);
+ }
#endif
AVDT_TRACE_WARNING("Dropped media packet");
tAVDT_CCB *p_ccb;
UINT8 tcid;
UINT16 lcid;
-#if AVDT_MULTIPLEXING == TRUE
- BT_HDR *p_frag;
-#endif
UNUSED(p_data);
/* set error code and parameter */
}
if (p_scb->p_pkt != NULL) {
- GKI_freebuf(p_scb->p_pkt);
+ osi_free(p_scb->p_pkt);
p_scb->p_pkt = NULL;
AVDT_TRACE_DEBUG("Dropped stored media packet");
&avdt_ctrl);
}
#if AVDT_MULTIPLEXING == TRUE
- else if (!GKI_queue_is_empty (&p_scb->frag_q)) {
+ else if (!fixed_queue_is_empty(p_scb->frag_q)) {
AVDT_TRACE_DEBUG("Dropped fragments queue");
/* clean fragments queue */
- while ((p_frag = (BT_HDR *)GKI_dequeue (&p_scb->frag_q)) != NULL) {
- GKI_freebuf(p_frag);
- }
-
+ BT_HDR *p_frag;
+ while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
+ osi_free(p_frag);
+ }
p_scb->frag_off = 0;
/* we need to call callback to keep data flow going */
L2CA_FlushChannel(avdt_cb.ad.rt_tbl[avdt_ccb_to_idx(p_scb->p_ccb)][avdt_ad_type_to_tcid(AVDT_CHAN_MEDIA, p_scb)].lcid),
L2CAP_FLUSH_CHANS_GET);
#endif
- while ((p_pkt = (BT_HDR *)GKI_dequeue (&p_scb->frag_q)) != NULL) {
+ while ((p_pkt = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
sent = TRUE;
AVDT_TRACE_DEBUG("Send fragment len=%d\n", p_pkt->len);
/* fragments queue contains fragment to send */
AVDT_TRACE_DEBUG("res=%d left=%d\n", res, p_scb->frag_off);
if (p_scb->frag_off) {
- if (AVDT_AD_SUCCESS == res || GKI_queue_is_empty (&p_scb->frag_q)) {
+ if (AVDT_AD_SUCCESS == res || fixed_queue_is_empty(p_scb->frag_q)) {
/* all buffers were sent to L2CAP, compose more to queue */
- avdt_scb_queue_frags(p_scb, &p_scb->p_next_frag, &p_scb->frag_off, &p_scb->frag_q);
- if (!GKI_queue_is_empty (&p_scb->frag_q)) {
+ avdt_scb_queue_frags(p_scb, &p_scb->p_next_frag, &p_scb->frag_off, p_scb->frag_q);
+ if (!fixed_queue_is_empty(p_scb->frag_q)) {
data.llcong = p_scb->cong;
avdt_scb_event(p_scb, AVDT_SCB_TC_CONG_EVT, &data);
}
}
/* Send event AVDT_WRITE_CFM_EVT if it was last fragment */
- else if (sent && GKI_queue_is_empty (&p_scb->frag_q)) {
+ else if (sent && fixed_queue_is_empty(p_scb->frag_q)) {
(*p_scb->cs.p_ctrl_cback)(avdt_scb_to_hdl(p_scb), NULL, AVDT_WRITE_CFM_EVT, &avdt_ctrl);
}
}
** Returns Nothing.
**
*******************************************************************************/
-void avdt_scb_queue_frags(tAVDT_SCB *p_scb, UINT8 **pp_data, UINT32 *p_data_len, BUFFER_Q *pq)
+void avdt_scb_queue_frags(tAVDT_SCB *p_scb, UINT8 **pp_data, UINT32 *p_data_len, fixed_queue_t *pq)
{
UINT16 lcid;
UINT16 num_frag;
AVDT_TRACE_DEBUG("peer_mtu: %d, buf_size: %d num_frag=%d\n",
p_tbl->peer_mtu, buf_size, num_frag);
- if (buf_size > AVDT_DATA_POOL_SIZE) {
- buf_size = AVDT_DATA_POOL_SIZE;
+ if (buf_size > AVDT_DATA_BUF_SIZE) {
+ buf_size = AVDT_DATA_BUF_SIZE;
}
mtu_used = buf_size - BT_HDR_SIZE;
while (*p_data_len && num_frag) {
/* allocate buffer for fragment */
- if (NULL == (p_frag = (BT_HDR *)GKI_getbuf(buf_size))) {
+ if (NULL == (p_frag = (BT_HDR *)osi_malloc(buf_size))) {
AVDT_TRACE_WARNING("avdt_scb_queue_frags len=%d(out of GKI buffers)\n", *p_data_len);
break;
}
UINT16_TO_BE_STREAM(p, p_frag->layer_specific );
}
/* put fragment into gueue */
- GKI_enqueue(pq, p_frag);
+ fixed_queue_enqueue(p_scb->frag_q, p_frag);
num_frag--;
}
}
#ifndef AVDT_INT_H
#define AVDT_INT_H
-#include "gki.h"
#include "avdt_api.h"
#include "avdtc_api.h"
#include "avdt_defs.h"
#include "l2c_api.h"
#include "btm_api.h"
+#include "fixed_queue.h"
#if (AVRC_INCLUDED == TRUE)
typedef struct {
BD_ADDR peer_addr; /* BD address of peer */
TIMER_LIST_ENT timer_entry; /* CCB timer list entry */
- BUFFER_Q cmd_q; /* Queue for outgoing command messages */
- BUFFER_Q rsp_q; /* Queue for outgoing response and reject messages */
+ fixed_queue_t *cmd_q; /* Queue for outgoing command messages */
+ fixed_queue_t *rsp_q; /* Queue for outgoing response and reject messages */
tAVDT_CTRL_CBACK *proc_cback; /* Procedure callback function */
tAVDT_CTRL_CBACK *p_conn_cback; /* Connection/disconnection callback function */
void *p_proc_data; /* Pointer to data storage for procedure */
BT_HDR *p_buf;
UINT32 time_stamp;
#if AVDT_MULTIPLEXING == TRUE
- BUFFER_Q frag_q; /* Queue for outgoing media fragments. p_buf should be 0 */
+ fixed_queue_t *frag_q; /* Queue for outgoing media fragments. p_buf should be 0 */
UINT8 *p_data;
UINT32 data_len;
#endif
BOOLEAN cong; /* Whether media transport channel is congested */
UINT8 close_code; /* Error code received in close response */
#if AVDT_MULTIPLEXING == TRUE
- BUFFER_Q frag_q; /* Queue for outgoing media fragments */
+ fixed_queue_t *frag_q; /* Queue for outgoing media fragments */
UINT32 frag_off; /* length of already received media fragments */
UINT32 frag_org_len; /* original length before fragmentation of receiving media packet */
UINT8 *p_next_frag; /* next fragment to send */
extern void avdt_scb_clr_pkt(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data);
extern void avdt_scb_tc_timer(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data);
extern void avdt_scb_clr_vars(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data);
-extern void avdt_scb_queue_frags(tAVDT_SCB *p_scb, UINT8 **pp_data, UINT32 *p_data_len, BUFFER_Q *pq);
+extern void avdt_scb_queue_frags(tAVDT_SCB *p_scb, UINT8 **pp_data, UINT32 *p_data_len, fixed_queue_t *pq);
/* msg function declarations */
extern BOOLEAN avdt_msg_send(tAVDT_CCB *p_ccb, BT_HDR *p_msg);
#include "bt_trace.h"
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_int.h"
+#include "allocator.h"
#if (defined(AVRC_INCLUDED) && AVRC_INCLUDED == TRUE)
const int offset = MAX(AVCT_MSG_OFFSET, p_pkt->offset);
const int pkt_len = MAX(rsp_pkt_len, p_pkt->len);
BT_HDR *p_pkt_copy =
- (BT_HDR *)GKI_getbuf((UINT16)(BT_HDR_SIZE + offset + pkt_len));
+ (BT_HDR *)osi_malloc((UINT16)(BT_HDR_SIZE + offset + pkt_len));
/* Copy the packet header, set the new offset, and copy the payload */
if (p_pkt_copy != NULL) {
if (p_pkt->len > AVRC_MAX_CTRL_DATA_LEN) {
int offset_len = MAX(AVCT_MSG_OFFSET, p_pkt->offset);
p_pkt_old = p_fcb->p_fmsg;
- p_pkt = (BT_HDR *)GKI_getbuf((UINT16)(AVRC_PACKET_LEN + offset_len + BT_HDR_SIZE));
+ p_pkt = (BT_HDR *)osi_malloc((UINT16)(AVRC_PACKET_LEN + offset_len + BT_HDR_SIZE));
if (p_pkt) {
p_pkt->len = AVRC_MAX_CTRL_DATA_LEN;
p_pkt->offset = AVCT_MSG_OFFSET;
if (abort_frag) {
if (p_fcb->p_fmsg) {
- GKI_freebuf(p_fcb->p_fmsg);
+ osi_free(p_fcb->p_fmsg);
+ p_fcb->p_fmsg = NULL;
}
- p_fcb->p_fmsg = NULL;
p_fcb->frag_enabled = FALSE;
}
}
BOOLEAN req_continue = FALSE;
BT_HDR *p_pkt_new = NULL;
UINT8 pkt_type;
- UINT16 buf_len;
tAVRC_RASM_CB *p_rcb;
tAVRC_NEXT_CMD avrc_cmd;
/* previous fragments need to be dropped, when received another new message */
p_rcb->rasm_offset = 0;
if (p_rcb->p_rmsg) {
- GKI_freebuf(p_rcb->p_rmsg);
+ osi_free(p_rcb->p_rmsg);
p_rcb->p_rmsg = NULL;
}
}
if (pkt_type == AVRC_PKT_START) {
/* Allocate buffer for re-assembly */
p_rcb->rasm_pdu = *p_data;
- if ((p_rcb->p_rmsg = (BT_HDR *)GKI_getbuf(GKI_MAX_BUF_SIZE)) != NULL) {
+ if ((p_rcb->p_rmsg = (BT_HDR *)osi_malloc(BT_DEFAULT_BUFFER_SIZE)) != NULL) {
/* Copy START packet to buffer for re-assembling fragments*/
memcpy(p_rcb->p_rmsg, p_pkt, sizeof(BT_HDR)); /* Copy bt hdr */
p_rcb->p_rmsg->offset = p_rcb->rasm_offset = 0;
/* Free original START packet, replace with pointer to reassembly buffer */
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
*pp_pkt = p_rcb->p_rmsg;
} else {
/* Unable to allocate buffer for fragmented avrc message. Reuse START
AVRC_TRACE_DEBUG ("Received a CONTINUE/END without no corresponding START \
(or previous fragmented response was dropped)");
drop_code = 5;
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
*pp_pkt = NULL;
} else {
/* get size of buffer holding assembled message */
- buf_len = GKI_get_buf_size (p_rcb->p_rmsg) - sizeof(BT_HDR);
+ /*
+ * NOTE: The buffer is allocated above at the beginning of the
+ * reassembly, and is always of size BT_DEFAULT_BUFFER_SIZE.
+ */
+ UINT16 buf_len = BT_DEFAULT_BUFFER_SIZE - sizeof(BT_HDR);
/* adjust offset and len of fragment for header byte */
p_pkt->offset += (AVRC_VENDOR_HDR_SIZE + AVRC_MIN_META_HDR_SIZE);
p_pkt->len -= (AVRC_VENDOR_HDR_SIZE + AVRC_MIN_META_HDR_SIZE);
p_pkt_new = NULL;
req_continue = TRUE;
}
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
*pp_pkt = p_pkt_new;
}
}
#if (BT_USE_TRACES == TRUE)
p_drop_msg = "dropped - too long AV/C cmd frame size";
#endif
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
return;
}
if (cr == AVCT_REJ) {
/* The peer thinks that this PID is no longer open - remove this handle */
/* */
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
AVCT_RemoveConn(handle);
return;
}
if (do_free) {
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
}
}
UINT8 *p_data;
assert(p_msg != NULL);
- assert(AVRC_CMD_POOL_SIZE > (AVRC_MIN_CMD_LEN + p_msg->pass_len));
+ assert(AVRC_CMD_BUF_SIZE > (AVRC_MIN_CMD_LEN+p_msg->pass_len));
- if ((p_cmd = (BT_HDR *) GKI_getpoolbuf(AVRC_CMD_POOL_ID)) != NULL) {
+ if ((p_cmd = (BT_HDR *) osi_malloc(AVRC_CMD_BUF_SIZE)) != NULL) {
p_cmd->offset = AVCT_MSG_OFFSET;
p_cmd->layer_specific = AVCT_DATA_CTRL;
p_data = (UINT8 *)(p_cmd + 1) + p_cmd->offset;
}
if (p_fcb->p_fmsg) {
- GKI_freebuf(p_fcb->p_fmsg);
+ osi_free(p_fcb->p_fmsg);
p_fcb->p_fmsg = NULL;
}
if ((cr == AVCT_RSP) && (chk_frag == TRUE)) {
if (p_pkt->len > AVRC_MAX_CTRL_DATA_LEN) {
int offset_len = MAX(AVCT_MSG_OFFSET, p_pkt->offset);
- p_pkt_new = (BT_HDR *)GKI_getbuf((UINT16)(AVRC_PACKET_LEN + offset_len
+ p_pkt_new = (BT_HDR *)osi_malloc((UINT16)(AVRC_PACKET_LEN + offset_len
+ BT_HDR_SIZE));
if (p_pkt_new && (p_start != NULL)) {
p_fcb->frag_enabled = TRUE;
p_pkt->len, len, p_fcb->p_fmsg->len );
} else {
AVRC_TRACE_ERROR ("AVRC_MsgReq no buffers for fragmentation" );
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
return AVRC_NO_RESOURCES;
}
}
******************************************************************************/
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_defs.h"
#include "avrc_int.h"
+#include "allocator.h"
#if (defined(AVRC_INCLUDED) && AVRC_INCLUDED == TRUE)
*******************************************************************************/
static BT_HDR *avrc_bld_init_cmd_buffer(tAVRC_COMMAND *p_cmd)
{
- UINT16 offset = 0, chnl = AVCT_DATA_CTRL, len = AVRC_META_CMD_POOL_SIZE;
- BT_HDR *p_pkt = NULL;
- UINT8 opcode;
-
- opcode = avrc_opcode_from_pdu(p_cmd->pdu);
+ UINT8 opcode = avrc_opcode_from_pdu(p_cmd->pdu);
AVRC_TRACE_API("avrc_bld_init_cmd_buffer: pdu=%x, opcode=%x", p_cmd->pdu, opcode);
- switch (opcode) {
+ UINT16 offset = 0;
+ switch (opcode)
+ {
case AVRC_OP_PASS_THRU:
offset = AVRC_MSG_PASS_THRU_OFFSET;
break;
}
/* allocate and initialize the buffer */
- p_pkt = (BT_HDR *)GKI_getbuf(len);
+ BT_HDR *p_pkt = (BT_HDR *)osi_malloc(AVRC_META_CMD_BUF_SIZE);
if (p_pkt) {
UINT8 *p_data, *p_start;
- p_pkt->layer_specific = chnl;
+ p_pkt->layer_specific = AVCT_DATA_CTRL;
p_pkt->event = opcode;
p_pkt->offset = offset;
p_data = (UINT8 *)(p_pkt + 1) + p_pkt->offset;
}
if (alloc && (status != AVRC_STS_NO_ERROR) ) {
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
*pp_pkt = NULL;
}
AVRC_TRACE_API("AVRC_BldCommand: returning %d", status);
******************************************************************************/
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_defs.h"
#include "avrc_int.h"
#include "bt_utils.h"
+#include "allocator.h"
#if (defined(AVRC_INCLUDED) && AVRC_INCLUDED == TRUE)
/* get the existing length, if any, and also the num attributes */
p_start = (UINT8 *)(p_pkt + 1) + p_pkt->offset;
p_data = p_len = p_start + 2; /* pdu + rsvd */
- len_left = GKI_get_buf_size(p_pkt) - BT_HDR_SIZE - p_pkt->offset - p_pkt->len;
+
+ /*
+ * NOTE: The buffer is allocated within avrc_bld_init_rsp_buffer(), and is
+ * always of size BT_DEFAULT_BUFFER_SIZE.
+ */
+ len_left = BT_DEFAULT_BUFFER_SIZE - BT_HDR_SIZE - p_pkt->offset - p_pkt->len;
BE_STREAM_TO_UINT16(len, p_data);
p_count = p_data;
*******************************************************************************/
static BT_HDR *avrc_bld_init_rsp_buffer(tAVRC_RESPONSE *p_rsp)
{
- UINT16 offset = AVRC_MSG_PASS_THRU_OFFSET, chnl = AVCT_DATA_CTRL, len = AVRC_META_CMD_POOL_SIZE;
- BT_HDR *p_pkt = NULL;
+ UINT16 offset = AVRC_MSG_PASS_THRU_OFFSET;
+ UINT16 chnl = AVCT_DATA_CTRL;
UINT8 opcode = avrc_opcode_from_pdu(p_rsp->pdu);
AVRC_TRACE_API("avrc_bld_init_rsp_buffer: pdu=%x, opcode=%x/%x", p_rsp->pdu, opcode,
case AVRC_OP_VENDOR:
offset = AVRC_MSG_VENDOR_OFFSET;
- if (p_rsp->pdu == AVRC_PDU_GET_ELEMENT_ATTR) {
- len = AVRC_BROWSE_POOL_SIZE;
- }
break;
}
/* allocate and initialize the buffer */
- p_pkt = (BT_HDR *)GKI_getbuf(len);
+ BT_HDR *p_pkt = (BT_HDR *)osi_malloc(BT_DEFAULT_BUFFER_SIZE);
if (p_pkt) {
UINT8 *p_data, *p_start;
}
if (alloc && (status != AVRC_STS_NO_ERROR) ) {
- GKI_freebuf(p_pkt);
+ osi_free(p_pkt);
*pp_pkt = NULL;
}
AVRC_TRACE_API("AVRC_BldResponse: returning %d", status);
// #include <assert.h>
#include "bt_target.h"
#include <string.h>
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_int.h"
+#include "allocator.h"
#if (defined(AVRC_INCLUDED) && AVRC_INCLUDED == TRUE)
assert(p_msg != NULL);
#if AVRC_METADATA_INCLUDED == TRUE
- assert(AVRC_META_CMD_POOL_SIZE > (AVRC_MIN_CMD_LEN + p_msg->vendor_len));
- if ((p_cmd = (BT_HDR *) GKI_getpoolbuf(AVRC_META_CMD_POOL_ID)) != NULL)
+ assert(AVRC_META_CMD_BUF_SIZE > (AVRC_MIN_CMD_LEN + p_msg->vendor_len));
+ if ((p_cmd = (BT_HDR *) osi_malloc(AVRC_META_CMD_BUF_SIZE)) != NULL)
#else
- assert(AVRC_CMD_POOL_SIZE > (AVRC_MIN_CMD_LEN + p_msg->vendor_len));
- if ((p_cmd = (BT_HDR *) GKI_getpoolbuf(AVRC_CMD_POOL_ID)) != NULL)
+ assert(AVRC_CMD_BUF_SIZE > (AVRC_MIN_CMD_LEN + p_msg->vendor_len));
+ if ((p_cmd = (BT_HDR *) osi_malloc(AVRC_CMD_BUF_SIZE)) != NULL)
#endif
{
p_cmd->offset = AVCT_MSG_OFFSET;
BT_HDR *p_cmd;
UINT8 *p_data;
- if ((p_cmd = (BT_HDR *) GKI_getpoolbuf(AVRC_CMD_POOL_ID)) != NULL) {
+ if ((p_cmd = (BT_HDR *) osi_malloc(AVRC_CMD_BUF_SIZE)) != NULL) {
p_cmd->offset = AVCT_MSG_OFFSET;
p_data = (UINT8 *)(p_cmd + 1) + p_cmd->offset;
*p_data++ = AVRC_CMD_STATUS;
BT_HDR *p_cmd;
UINT8 *p_data;
- if ((p_cmd = (BT_HDR *) GKI_getpoolbuf(AVRC_CMD_POOL_ID)) != NULL) {
+ if ((p_cmd = (BT_HDR *) osi_malloc(AVRC_CMD_BUF_SIZE)) != NULL) {
p_cmd->offset = AVCT_MSG_OFFSET;
p_data = (UINT8 *)(p_cmd + 1) + p_cmd->offset;
*p_data++ = AVRC_CMD_STATUS;
******************************************************************************/
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_defs.h"
#include "avrc_int.h"
******************************************************************************/
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_defs.h"
#include "avrc_int.h"
******************************************************************************/
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_int.h"
******************************************************************************/
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "avrc_api.h"
#include "avrc_int.h"
#include "bt_types.h"
#include "bt_target.h"
#include "controller.h"
-#include "gki.h"
#include "hcimsgs.h"
#include "btu.h"
#include "btm_api.h"
BD_ADDR bda;
BTM_TRACE_DEBUG ("btm_acl_resubmit_page\n");
/* If there were other page request schedule can start the next one */
- if ((p_buf = (BT_HDR *)GKI_dequeue (&btm_cb.page_queue)) != NULL) {
+ if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(btm_cb.page_queue)) != NULL) {
/* skip 3 (2 bytes opcode and 1 byte len) to get to the bd_addr
* for both create_conn and rmt_name */
pp = (UINT8 *)(p_buf + 1) + p_buf->offset + 3;
BT_HDR *p;
BTM_TRACE_DEBUG ("btm_acl_reset_paging\n");
/* If we sent reset we are definitely not paging any more */
- while ((p = (BT_HDR *)GKI_dequeue(&btm_cb.page_queue)) != NULL) {
- GKI_freebuf (p);
+ while ((p = (BT_HDR *)fixed_queue_try_dequeue(btm_cb.page_queue)) != NULL) {
+ osi_free (p);
}
btm_cb.paging = FALSE;
(bda[0] << 16) + (bda[1] << 8) + bda[2], (bda[3] << 16) + (bda[4] << 8) + bda[5]);
if (btm_cb.discing) {
btm_cb.paging = TRUE;
- GKI_enqueue (&btm_cb.page_queue, p);
+ fixed_queue_enqueue(btm_cb.page_queue, p);
} else {
if (!BTM_ACL_IS_CONNECTED (bda)) {
BTM_TRACE_DEBUG ("connecting_bda: %06x%06x\n",
btm_cb.connecting_bda[5]);
if (btm_cb.paging &&
memcmp (bda, btm_cb.connecting_bda, BD_ADDR_LEN) != 0) {
- GKI_enqueue (&btm_cb.page_queue, p);
+ fixed_queue_enqueue(btm_cb.page_queue, p);
} else {
p_dev_rec = btm_find_or_alloc_dev (bda);
memcpy (btm_cb.connecting_bda, p_dev_rec->bd_addr, BD_ADDR_LEN);
} else {
UINT8 *p_mac = (UINT8 *)signature;
UINT8 *p_buf, *pp;
- if ((p_buf = (UINT8 *)GKI_getbuf((UINT16)(len + 4))) != NULL) {
+ if ((p_buf = (UINT8 *)osi_malloc((UINT16)(len + 4))) != NULL) {
BTM_TRACE_DEBUG("%s-Start to generate Local CSRK", __func__);
pp = p_buf;
/* prepare plain text */
*p_mac, *(p_mac + 1), *(p_mac + 2), *(p_mac + 3));
BTM_TRACE_DEBUG("p_mac[4] = 0x%02x p_mac[5] = 0x%02x p_mac[6] = 0x%02x p_mac[7] = 0x%02x",
*(p_mac + 4), *(p_mac + 5), *(p_mac + 6), *(p_mac + 7));
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
return ret;
#include "hcimsgs.h"
#include "btu.h"
#include "btm_int.h"
-//#include "bt_utils.h"
+#include "allocator.h"
#include "hcidefs.h"
#include "btm_ble_api.h"
#include "controller.h"
if (cmn_ble_vsc_cb.max_filter > 0) {
btm_ble_adv_filt_cb.p_addr_filter_count =
- (tBTM_BLE_PF_COUNT *) GKI_getbuf( sizeof(tBTM_BLE_PF_COUNT) * cmn_ble_vsc_cb.max_filter);
+ (tBTM_BLE_PF_COUNT *) osi_malloc( sizeof(tBTM_BLE_PF_COUNT) * cmn_ble_vsc_cb.max_filter);
}
}
void btm_ble_adv_filter_cleanup(void)
{
if (btm_ble_adv_filt_cb.p_addr_filter_count) {
- GKI_freebuf (btm_ble_adv_filt_cb.p_addr_filter_count);
+ osi_free(btm_ble_adv_filt_cb.p_addr_filter_count);
+ btm_ble_adv_filt_cb.p_addr_filter_count = NULL;
}
}
STREAM_TO_UINT8(adv_data.adv_pkt_len, p);
if (adv_data.adv_pkt_len > 0) {
- adv_data.p_adv_pkt_data = GKI_getbuf(adv_data.adv_pkt_len);
+ adv_data.p_adv_pkt_data = osi_malloc(adv_data.adv_pkt_len);
memcpy(adv_data.p_adv_pkt_data, p, adv_data.adv_pkt_len);
}
STREAM_TO_UINT8(adv_data.scan_rsp_len, p);
if (adv_data.scan_rsp_len > 0) {
- adv_data.p_scan_rsp_data = GKI_getbuf(adv_data.scan_rsp_len);
+ adv_data.p_scan_rsp_data = osi_malloc(adv_data.scan_rsp_len);
memcpy(adv_data.p_scan_rsp_data, p, adv_data.scan_rsp_len);
}
}
len = ble_batchscan_cb.main_rep_q.data_len[index];
p_orig_data = ble_batchscan_cb.main_rep_q.p_data[index];
if (NULL != p_orig_data) {
- p_app_data = GKI_getbuf(len + data_len);
+ p_app_data = osi_malloc(len + data_len);
memcpy(p_app_data, p_orig_data, len);
memcpy(p_app_data + len, p_data, data_len);
- GKI_freebuf(p_orig_data);
+ osi_free(p_orig_data);
ble_batchscan_cb.main_rep_q.p_data[index] = p_app_data;
ble_batchscan_cb.main_rep_q.num_records[index] += num_records;
ble_batchscan_cb.main_rep_q.data_len[index] += data_len;
} else {
- p_app_data = GKI_getbuf(data_len);
+ p_app_data = osi_malloc(data_len);
memcpy(p_app_data, p_data, data_len);
ble_batchscan_cb.main_rep_q.p_data[index] = p_app_data;
ble_batchscan_cb.main_rep_q.num_records[index] = num_records;
for (index = 0; index < BTM_BLE_BATCH_REP_MAIN_Q_SIZE; index++) {
if (NULL != ble_batchscan_cb.main_rep_q.p_data[index]) {
- GKI_freebuf(ble_batchscan_cb.main_rep_q.p_data[index]);
+ osi_free(ble_batchscan_cb.main_rep_q.p_data[index]);
+ ble_batchscan_cb.main_rep_q.p_data[index] = NULL;
}
- ble_batchscan_cb.main_rep_q.p_data[index] = NULL;
}
memset(&ble_batchscan_cb, 0, sizeof(tBTM_BLE_BATCH_SCAN_CB));
{
if (!background_connections) {
background_connections = hash_map_new(background_connection_buckets,
- hash_function_bdaddr, NULL, allocator_calloc.free, bdaddr_equality_fn);
+ hash_function_bdaddr, NULL, osi_free_func, bdaddr_equality_fn);
assert(background_connections);
}
}
*******************************************************************************/
void btm_ble_enqueue_direct_conn_req(void *p_param)
{
- tBTM_BLE_CONN_REQ *p = (tBTM_BLE_CONN_REQ *)GKI_getbuf(sizeof(tBTM_BLE_CONN_REQ));
+ tBTM_BLE_CONN_REQ *p = (tBTM_BLE_CONN_REQ *)osi_malloc(sizeof(tBTM_BLE_CONN_REQ));
p->p_param = p_param;
- GKI_enqueue (&btm_cb.ble_ctr_cb.conn_pending_q, p);
+ fixed_queue_enqueue(btm_cb.ble_ctr_cb.conn_pending_q, p);
}
/*******************************************************************************
**
tBTM_BLE_CONN_REQ *p_req;
BOOLEAN rt = FALSE;
- if (!GKI_queue_is_empty(&btm_cb.ble_ctr_cb.conn_pending_q)) {
- p_req = (tBTM_BLE_CONN_REQ *)GKI_dequeue (&btm_cb.ble_ctr_cb.conn_pending_q);
-
+ p_req = (tBTM_BLE_CONN_REQ*)fixed_queue_try_dequeue(btm_cb.ble_ctr_cb.conn_pending_q);
+ if (p_req != NULL) {
rt = l2cble_init_direct_conn((tL2C_LCB *)(p_req->p_param));
- GKI_freebuf((void *)p_req);
+ osi_free((void *)p_req);
}
return rt;
btm_cb.cmn_ble_vsc_cb.values_read = FALSE;
p_cb->cur_states = 0;
+ p_cb->conn_pending_q = fixed_queue_new(SIZE_MAX);
+
p_cb->inq_var.adv_mode = BTM_BLE_ADV_DISABLE;
p_cb->inq_var.scan_type = BTM_BLE_SCAN_MODE_NONE;
p_cb->inq_var.adv_chnl_map = BTM_BLE_DEFAULT_ADV_CHNL_MAP;
#endif
}
+/*******************************************************************************
+**
+** Function btm_ble_free
+**
+** Description free the control block variable values.
+**
+** Returns void
+**
+*******************************************************************************/
+void btm_ble_free (void)
+{
+ tBTM_BLE_CB *p_cb = &btm_cb.ble_ctr_cb;
+
+ BTM_TRACE_DEBUG("%s", __func__);
+
+ fixed_queue_free(p_cb->conn_pending_q, osi_free_func);
+}
+
/*******************************************************************************
**
** Function btm_ble_topology_check
btm_multi_adv_idx_q.rear = -1;
if (btm_cb.cmn_ble_vsc_cb.adv_inst_max > 0) {
- btm_multi_adv_cb.p_adv_inst = GKI_getbuf( sizeof(tBTM_BLE_MULTI_ADV_INST) *
+ btm_multi_adv_cb.p_adv_inst = osi_malloc( sizeof(tBTM_BLE_MULTI_ADV_INST) *
(btm_cb.cmn_ble_vsc_cb.adv_inst_max));
memset(btm_multi_adv_cb.p_adv_inst, 0, sizeof(tBTM_BLE_MULTI_ADV_INST) *
(btm_cb.cmn_ble_vsc_cb.adv_inst_max));
- btm_multi_adv_cb.op_q.p_sub_code = GKI_getbuf( sizeof(UINT8) *
+ btm_multi_adv_cb.op_q.p_sub_code = osi_malloc( sizeof(UINT8) *
(btm_cb.cmn_ble_vsc_cb.adv_inst_max));
memset(btm_multi_adv_cb.op_q.p_sub_code, 0,
sizeof(UINT8) * (btm_cb.cmn_ble_vsc_cb.adv_inst_max));
- btm_multi_adv_cb.op_q.p_inst_id = GKI_getbuf( sizeof(UINT8) *
+ btm_multi_adv_cb.op_q.p_inst_id = osi_malloc( sizeof(UINT8) *
(btm_cb.cmn_ble_vsc_cb.adv_inst_max));
memset(btm_multi_adv_cb.op_q.p_inst_id, 0,
sizeof(UINT8) * (btm_cb.cmn_ble_vsc_cb.adv_inst_max));
void btm_ble_multi_adv_cleanup(void)
{
if (btm_multi_adv_cb.p_adv_inst) {
- GKI_freebuf(btm_multi_adv_cb.p_adv_inst);
+ osi_free(btm_multi_adv_cb.p_adv_inst);
+ btm_multi_adv_cb.p_adv_inst = NULL;
}
if (btm_multi_adv_cb.op_q.p_sub_code) {
- GKI_freebuf(btm_multi_adv_cb.op_q.p_sub_code);
+ osi_free(btm_multi_adv_cb.op_q.p_sub_code);
+ btm_multi_adv_cb.op_q.p_sub_code = NULL;
}
if (btm_multi_adv_cb.op_q.p_inst_id) {
- GKI_freebuf(btm_multi_adv_cb.op_q.p_inst_id);
+ osi_free(btm_multi_adv_cb.op_q.p_inst_id);
+ btm_multi_adv_cb.op_q.p_inst_id = NULL;
}
}
(max_irk_list_sz / 8 + 1) : (max_irk_list_sz / 8);
if (max_irk_list_sz > 0) {
- p_q->resolve_q_random_pseudo = (BD_ADDR *)GKI_getbuf(sizeof(BD_ADDR) * max_irk_list_sz);
- p_q->resolve_q_action = (UINT8 *)GKI_getbuf(max_irk_list_sz);
+ p_q->resolve_q_random_pseudo = (BD_ADDR *)osi_malloc(sizeof(BD_ADDR) * max_irk_list_sz);
+ p_q->resolve_q_action = (UINT8 *)osi_malloc(max_irk_list_sz);
/* RPA offloading feature */
if (btm_cb.ble_ctr_cb.irk_list_mask == NULL) {
- btm_cb.ble_ctr_cb.irk_list_mask = (UINT8 *)GKI_getbuf(irk_mask_size);
+ btm_cb.ble_ctr_cb.irk_list_mask = (UINT8 *)osi_malloc(irk_mask_size);
}
BTM_TRACE_DEBUG ("%s max_irk_list_sz = %d", __func__, max_irk_list_sz);
tBTM_BLE_RESOLVE_Q *p_q = &btm_cb.ble_ctr_cb.resolving_list_pend_q;
if (p_q->resolve_q_random_pseudo) {
- GKI_freebuf(p_q->resolve_q_random_pseudo);
+ osi_free(p_q->resolve_q_random_pseudo);
+ p_q->resolve_q_random_pseudo = NULL;
}
if (p_q->resolve_q_action) {
- GKI_freebuf(p_q->resolve_q_action);
+ osi_free(p_q->resolve_q_action);
+ p_q->resolve_q_action = NULL;
}
controller_get_interface()->set_ble_resolving_list_max_size(0);
if (btm_cb.ble_ctr_cb.irk_list_mask) {
- GKI_freebuf(btm_cb.ble_ctr_cb.irk_list_mask);
+ osi_free(btm_cb.ble_ctr_cb.irk_list_mask);
+ btm_cb.ble_ctr_cb.irk_list_mask = NULL;
}
- btm_cb.ble_ctr_cb.irk_list_mask = NULL;
}
#endif
#include "bt_types.h"
#include "controller.h"
-#include "gki.h"
#include "hcimsgs.h"
#include "btu.h"
#include "btm_api.h"
opcode, param_len);
/* Allocate a buffer to hold HCI command plus the callback function */
- if ((p_buf = GKI_getbuf((UINT16)(sizeof(BT_HDR) + sizeof (tBTM_CMPL_CB *) +
+ if ((p_buf = osi_malloc((UINT16)(sizeof(BT_HDR) + sizeof (tBTM_CMPL_CB *) +
param_len + HCIC_PREAMBLE_SIZE))) != NULL) {
/* Send the HCI command (opcode will be OR'd with HCI_GRP_VENDOR_SPECIFIC) */
btsnd_hcic_vendor_spec_cmd (p_buf, opcode, param_len, p_param_buf, (void *)p_cb);
#include <stdio.h>
#include <stddef.h>
+#include "alarm.h"
#include "bt_types.h"
#include "controller.h"
-#include "gki.h"
#include "hcimsgs.h"
#include "btu.h"
#include "btm_api.h"
tBTM_INQUIRY_VAR_ST *p_inq = &btm_cb.btm_inq_vars;
if (p_inq->p_bd_db) {
- GKI_freebuf(p_inq->p_bd_db);
+ osi_free(p_inq->p_bd_db);
p_inq->p_bd_db = NULL;
}
p_inq->num_bd_entries = 0;
btm_clr_inq_result_flt();
/* Allocate memory to hold bd_addrs responding */
- if ((p_inq->p_bd_db = (tINQ_BDADDR *)GKI_getbuf(GKI_MAX_BUF_SIZE)) != NULL) {
- p_inq->max_bd_entries = (UINT16)(GKI_MAX_BUF_SIZE / sizeof(tINQ_BDADDR));
- memset(p_inq->p_bd_db, 0, GKI_MAX_BUF_SIZE);
+ if ((p_inq->p_bd_db = (tINQ_BDADDR *)osi_calloc(BT_DEFAULT_BUFFER_SIZE)) != NULL) {
+ p_inq->max_bd_entries = (UINT16)(BT_DEFAULT_BUFFER_SIZE / sizeof(tINQ_BDADDR));
/* BTM_TRACE_DEBUG("btm_initiate_inquiry: memory allocated for %d bdaddrs",
p_inq->max_bd_entries); */
}
p_cur->dev_class[2] = dc[2];
p_cur->clock_offset = clock_offset | BTM_CLOCK_OFFSET_VALID;
- p_i->time_of_resp = GKI_get_os_tick_count();
+ p_i->time_of_resp = osi_time_get_os_boottime_ms();
if (p_i->inq_count != p_inq->inq_counter) {
p_inq->inq_cmpl_info.num_resp++; /* A new response was found */
num_resp = (btm_cb.btm_inq_vars.inq_cmpl_info.num_resp < BTM_INQ_DB_SIZE) ?
btm_cb.btm_inq_vars.inq_cmpl_info.num_resp : BTM_INQ_DB_SIZE;
- if ((p_tmp = (tINQ_DB_ENT *)GKI_getbuf(sizeof(tINQ_DB_ENT))) != NULL) {
+ if ((p_tmp = (tINQ_DB_ENT *)osi_malloc(sizeof(tINQ_DB_ENT))) != NULL) {
size = sizeof(tINQ_DB_ENT);
for (xx = 0; xx < num_resp - 1; xx++, p_ent++) {
for (yy = xx + 1, p_next = p_ent + 1; yy < num_resp; yy++, p_next++) {
}
}
- GKI_freebuf(p_tmp);
+ osi_free(p_tmp);
}
}
btsnd_hcic_write_ext_inquiry_response (p_buff, BTM_EIR_DEFAULT_FEC_REQUIRED);
return BTM_SUCCESS;
} else {
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return BTM_MODE_UNSUPPORTED;
}
}
#include "bt_target.h"
#include <string.h>
#include "btm_int.h"
+#include "allocator.h"
/* Global BTM control block structure
*/
/* All fields are cleared; nonzero fields are reinitialized in appropriate function */
memset(&btm_cb, 0, sizeof(tBTM_CB));
+ btm_cb.page_queue = fixed_queue_new(SIZE_MAX);
+ btm_cb.sec_pending_q = fixed_queue_new(SIZE_MAX);
+
#if defined(BTM_INITIAL_TRACE_LEVEL)
btm_cb.trace_level = BTM_INITIAL_TRACE_LEVEL;
#else
}
+/*******************************************************************************
+**
+** Function btm_free
+**
+** Description This function is called at btu core free the fixed queue
+**
+** Returns void
+**
+*******************************************************************************/
+void btm_free(void)
+{
+ fixed_queue_free(btm_cb.page_queue, osi_free_func);
+ fixed_queue_free(btm_cb.sec_pending_q, osi_free_func);
+}
#include <stddef.h>
#include "bt_types.h"
-#include "gki.h"
#include "hcimsgs.h"
#include "btu.h"
#include "btm_api.h"
/* Scan state-paging, inquiry, and trying to connect */
/* Check for paging */
- if (btm_cb.is_paging || GKI_queue_length(&btm_cb.page_queue) > 0 ||
+ if (btm_cb.is_paging || (!fixed_queue_is_empty(btm_cb.page_queue)) ||
BTM_BL_PAGING_STARTED == btm_cb.busy_level) {
BTM_TRACE_DEBUG("btm_pm_device_in_scan_state- paging");
return TRUE;
#include <string.h>
#include "bt_types.h"
#include "bt_target.h"
-#include "gki.h"
#include "bt_types.h"
#include "hcimsgs.h"
#include "btu.h"
if (sco_inx < BTM_MAX_SCO_LINKS) {
p = &btm_cb.sco_cb.sco_db[sco_inx];
- while (p->xmit_data_q.p_first) {
- if ((p_buf = (BT_HDR *)GKI_dequeue (&p->xmit_data_q)) != NULL) {
- GKI_freebuf (p_buf);
- }
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p->xmit_data_q)) != NULL) {
+ osi_free(p_buf);
}
}
#else
*******************************************************************************/
void btm_sco_check_send_pkts (UINT16 sco_inx)
{
- BT_HDR *p_buf;
tSCO_CB *p_cb = &btm_cb.sco_cb;
tSCO_CONN *p_ccb = &p_cb->sco_db[sco_inx];
/* If there is data to send, send it now */
- while (p_ccb->xmit_data_q.p_first != NULL) {
- p_buf = NULL;
-
+ BT_HDR *p_buf;
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_data_q)) != NULL)
+ {
#if BTM_SCO_HCI_DEBUG
- BTM_TRACE_DEBUG ("btm: [%d] buf in xmit_data_q", p_ccb->xmit_data_q.count );
+ BTM_TRACE_DEBUG("btm: [%d] buf in xmit_data_q",
+ fixed_queue_length(p_ccb->xmit_data_q) + 1);
#endif
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_data_q);
- HCI_SCO_DATA_TO_LOWER (p_buf);
+ HCI_SCO_DATA_TO_LOWER(p_buf);
}
}
#endif /* BTM_SCO_HCI_INCLUDED == TRUE */
if (!btm_cb.sco_cb.p_data_cb )
/* if no data callback registered, just free the buffer */
{
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
} else {
(*btm_cb.sco_cb.p_data_cb)(sco_inx, p_msg, (tBTM_SCO_DATA_FLAG) pkt_status);
}
} else { /* no mapping handle SCO connection is active, free the buffer */
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
#else
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
#endif
}
/* Ensure we have enough space in the buffer for the SCO and HCI headers */
if (p_buf->offset < HCI_SCO_PREAMBLE_SIZE) {
BTM_TRACE_ERROR ("BTM SCO - cannot send buffer, offset: %d", p_buf->offset);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
status = BTM_ILLEGAL_VALUE;
} else { /* write HCI header */
/* Step back 3 bytes to add the headers */
UINT8_TO_STREAM (p, (UINT8)p_buf->len);
p_buf->len += HCI_SCO_PREAMBLE_SIZE;
- GKI_enqueue (&p_ccb->xmit_data_q, p_buf);
+ fixed_queue_enqueue(p_ccb->xmit_data_q, p_buf);
btm_sco_check_send_pkts (sco_inx);
}
} else {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
BTM_TRACE_WARNING ("BTM_WriteScoData, invalid sco index: %d at state [%d]",
sco_inx, btm_cb.sco_cb.sco_db[sco_inx].state);
#include "btu.h"
#include "btm_int.h"
#include "l2c_int.h"
-//#include "bt_utils.h"
-//#include "osi/include/log.h"
+#include "fixed_queue.h"
+#include "alarm.h"
#if (BT_USE_TRACES == TRUE && BT_TRACE_VERBOSE == FALSE)
/* needed for sprintf() */
void btm_sec_check_pending_reqs (void)
{
tBTM_SEC_QUEUE_ENTRY *p_e;
- BUFFER_Q bq;
+ fixed_queue_t *bq;
if (btm_cb.pairing_state == BTM_PAIR_STATE_IDLE) {
/* First, resubmit L2CAP requests */
/* Now, re-submit anything in the mux queue */
bq = btm_cb.sec_pending_q;
- GKI_init_q (&btm_cb.sec_pending_q);
+ btm_cb.sec_pending_q = fixed_queue_new(SIZE_MAX);
- while ((p_e = (tBTM_SEC_QUEUE_ENTRY *)GKI_dequeue (&bq)) != NULL) {
+ while ((p_e = (tBTM_SEC_QUEUE_ENTRY *)fixed_queue_try_dequeue(bq)) != NULL) {
/* Check that the ACL is still up before starting security procedures */
if (btm_bda_to_acl(p_e->bd_addr, p_e->transport) != NULL) {
if (p_e->psm != 0) {
}
}
- GKI_freebuf (p_e);
+ osi_free (p_e);
}
}
}
tBTM_SEC_DEV_REC *p_dev_rec;
if (!btm_cb.collision_start_time) {
- btm_cb.collision_start_time = GKI_get_os_tick_count();
+ btm_cb.collision_start_time = osi_time_get_os_boottime_ms();
}
- if ((GKI_get_os_tick_count() - btm_cb.collision_start_time) < btm_cb.max_collision_delay) {
- if (handle == BTM_SEC_INVALID_HANDLE) {
+ if ((osi_time_get_os_boottime_ms() - btm_cb.collision_start_time) < btm_cb.max_collision_delay)
+ {
+ if (handle == BTM_SEC_INVALID_HANDLE)
+ {
if ((p_dev_rec = btm_sec_find_dev_by_sec_state (BTM_SEC_STATE_AUTHENTICATING)) == NULL) {
p_dev_rec = btm_sec_find_dev_by_sec_state (BTM_SEC_STATE_ENCRYPTING);
}
UINT32 mx_proto_id, UINT32 mx_chan_id,
tBTM_SEC_CALLBACK *p_callback, void *p_ref_data)
{
- tBTM_SEC_QUEUE_ENTRY *p_e = (tBTM_SEC_QUEUE_ENTRY *)GKI_getbuf (sizeof(tBTM_SEC_QUEUE_ENTRY));
+ tBTM_SEC_QUEUE_ENTRY *p_e = (tBTM_SEC_QUEUE_ENTRY *)osi_malloc (sizeof(tBTM_SEC_QUEUE_ENTRY));
if (p_e) {
p_e->psm = psm;
BTM_TRACE_EVENT ("%s() PSM: 0x%04x Is_Orig: %u mx_proto_id: %u mx_chan_id: %u\n",
__func__, psm, is_orig, mx_proto_id, mx_chan_id);
- GKI_enqueue (&btm_cb.sec_pending_q, p_e);
+ fixed_queue_enqueue(btm_cb.sec_pending_q, p_e);
return (TRUE);
}
tBTM_SEC_CALLBACK *p_callback, void *p_ref_data)
{
tBTM_SEC_QUEUE_ENTRY *p_e;
- p_e = (tBTM_SEC_QUEUE_ENTRY *)GKI_getbuf(sizeof(tBTM_SEC_QUEUE_ENTRY) + 1);
+ p_e = (tBTM_SEC_QUEUE_ENTRY *)osi_malloc(sizeof(tBTM_SEC_QUEUE_ENTRY) + 1);
if (p_e) {
p_e->psm = 0; /* if PSM 0, encryption request */
*(UINT8 *)p_e->p_ref_data = *(UINT8 *)(p_ref_data);
p_e->transport = transport;
memcpy(p_e->bd_addr, bd_addr, BD_ADDR_LEN);
- GKI_enqueue(&btm_cb.sec_pending_q, p_e);
+ fixed_queue_enqueue(btm_cb.sec_pending_q, p_e);
return TRUE;
}
static void btm_sec_check_pending_enc_req (tBTM_SEC_DEV_REC *p_dev_rec, tBT_TRANSPORT transport,
UINT8 encr_enable)
{
- tBTM_SEC_QUEUE_ENTRY *p_e;
- BUFFER_Q *bq = &btm_cb.sec_pending_q;
- UINT8 res = encr_enable ? BTM_SUCCESS : BTM_ERR_PROCESSING;
+ if (fixed_queue_is_empty(btm_cb.sec_pending_q))
+ return;
- p_e = (tBTM_SEC_QUEUE_ENTRY *)GKI_getfirst(bq);
+ UINT8 res = encr_enable ? BTM_SUCCESS : BTM_ERR_PROCESSING;
+ list_t *list = fixed_queue_get_list(btm_cb.sec_pending_q);
+ for (const list_node_t *node = list_begin(list); node != list_end(list); ) {
+ tBTM_SEC_QUEUE_ENTRY *p_e = (tBTM_SEC_QUEUE_ENTRY *)list_node(node);
+ node = list_next(node);
- while (p_e != NULL) {
if (memcmp(p_e->bd_addr, p_dev_rec->bd_addr, BD_ADDR_LEN) == 0 && p_e->psm == 0
#if BLE_INCLUDED == TRUE
- && p_e->transport == transport
+ && p_e->transport == transport
#endif
) {
#if BLE_INCLUDED == TRUE
#if BLE_INCLUDED == TRUE
|| (sec_act == BTM_BLE_SEC_ENCRYPT || sec_act == BTM_BLE_SEC_ENCRYPT_NO_MITM)
|| (sec_act == BTM_BLE_SEC_ENCRYPT_MITM && p_dev_rec->sec_flags
- & BTM_SEC_LE_AUTHENTICATED)
+ & BTM_SEC_LE_AUTHENTICATED)
#endif
) {
- (*p_e->p_callback) (p_dev_rec->bd_addr, transport, p_e->p_ref_data, res);
- GKI_remove_from_queue(bq, (void *)p_e);
+ if (p_e->p_callback) {
+ (*p_e->p_callback) (p_dev_rec->bd_addr, transport, p_e->p_ref_data, res);
+ }
+
+ fixed_queue_try_remove_from_queue(btm_cb.sec_pending_q, (void *)p_e);
}
}
- p_e = (tBTM_SEC_QUEUE_ENTRY *) GKI_getnext ((void *)p_e);
}
}
#endif ///SMP_INCLUDED == TRUE
//#include <stdlib.h>
#include <string.h>
-#include "gki.h"
#include "bt_types.h"
#include "hcimsgs.h"
#include "btu.h"
hack->response->len - 5, // 3 for the command complete headers, 2 for the event headers
hack->context);
- GKI_freebuf(hack->response);
+ osi_free(hack->response);
osi_free(event);
}
stream,
hack->context);
- GKI_freebuf(hack->command);
+ osi_free(hack->command);
osi_free(event);
}
#include "hash_map.h"
#include "hash_functions.h"
#include "thread.h"
+#include "mutex.h"
#include "l2c_int.h"
#include "dyn_mem.h"
#endif
hash_map_t *btu_general_alarm_hash_map;
-pthread_mutex_t btu_general_alarm_lock;
+osi_mutex_t btu_general_alarm_lock;
static const size_t BTU_GENERAL_ALARM_HASH_MAP_SIZE = 34;
hash_map_t *btu_oneshot_alarm_hash_map;
-pthread_mutex_t btu_oneshot_alarm_lock;
+osi_mutex_t btu_oneshot_alarm_lock;
static const size_t BTU_ONESHOT_ALARM_HASH_MAP_SIZE = 34;
hash_map_t *btu_l2cap_alarm_hash_map;
-pthread_mutex_t btu_l2cap_alarm_lock;
+osi_mutex_t btu_l2cap_alarm_lock;
static const size_t BTU_L2CAP_ALARM_HASH_MAP_SIZE = 34;
//thread_t *bt_workqueue_thread;
#if (defined(GATTS_INCLUDED) && GATTS_INCLUDED == true)
gatt_free();
#endif
+ btm_ble_free();
#endif
+ btm_free();
}
/*****************************************************************************
goto error_exit;
}
- pthread_mutex_init(&btu_general_alarm_lock, NULL);
+ osi_mutex_new(&btu_general_alarm_lock);
btu_oneshot_alarm_hash_map = hash_map_new(BTU_ONESHOT_ALARM_HASH_MAP_SIZE,
hash_function_pointer, NULL, (data_free_fn)osi_alarm_free, NULL);
goto error_exit;
}
- pthread_mutex_init(&btu_oneshot_alarm_lock, NULL);
+ osi_mutex_new(&btu_oneshot_alarm_lock);
btu_l2cap_alarm_hash_map = hash_map_new(BTU_L2CAP_ALARM_HASH_MAP_SIZE,
hash_function_pointer, NULL, (data_free_fn)osi_alarm_free, NULL);
goto error_exit;
}
- pthread_mutex_init(&btu_l2cap_alarm_lock, NULL);
+ osi_mutex_new(&btu_l2cap_alarm_lock);
xBtuQueue = xQueueCreate(BTU_QUEUE_NUM, sizeof(BtTaskEvt_t));
xTaskCreatePinnedToCore(btu_task_thread_handler, BTU_TASK_NAME, BTU_TASK_STACK_SIZE, NULL, BTU_TASK_PRIO, &xBtuTaskHandle, 0);
btu_task_shut_down();
hash_map_free(btu_general_alarm_hash_map);
- pthread_mutex_destroy(&btu_general_alarm_lock);
+ osi_mutex_free(&btu_general_alarm_lock);
hash_map_free(btu_oneshot_alarm_hash_map);
- pthread_mutex_destroy(&btu_oneshot_alarm_lock);
+ osi_mutex_free(&btu_oneshot_alarm_lock);
hash_map_free(btu_l2cap_alarm_hash_map);
- pthread_mutex_destroy(&btu_l2cap_alarm_lock);
+ osi_mutex_free(&btu_l2cap_alarm_lock);
vTaskDelete(xBtuTaskHandle);
vQueueDelete(xBtuQueue);
#include "bt_trace.h"
#include "bt_types.h"
#include "allocator.h"
+#include "mutex.h"
#include "btm_api.h"
#include "btm_int.h"
#include "btu.h"
-#include "gki.h"
#include "hash_map.h"
#include "hcimsgs.h"
#include "l2c_int.h"
#endif
extern hash_map_t *btu_general_alarm_hash_map;
-extern pthread_mutex_t btu_general_alarm_lock;
+extern osi_mutex_t btu_general_alarm_lock;
// Oneshot timer queue.
extern hash_map_t *btu_oneshot_alarm_hash_map;
-extern pthread_mutex_t btu_oneshot_alarm_lock;
+extern osi_mutex_t btu_oneshot_alarm_lock;
// l2cap timer queue.
extern hash_map_t *btu_l2cap_alarm_hash_map;
-extern pthread_mutex_t btu_l2cap_alarm_lock;
+extern osi_mutex_t btu_l2cap_alarm_lock;
extern xTaskHandle xBtuTaskHandle;
extern xQueueHandle xBtuQueue;
case BT_EVT_TO_BTU_HCI_EVT:
btu_hcif_process_event ((UINT8)(p_msg->event & BT_SUB_EVT_MASK), p_msg);
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
#if (defined(HCILP_INCLUDED) && HCILP_INCLUDED == TRUE)
/* If host receives events which it doesn't response to, */
}
if (handled == FALSE) {
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
break;
(*p_tle->p_cback)(p_tle);
} else if (p_tle->event) {
BT_HDR *p_msg;
- if ((p_msg = (BT_HDR *) GKI_getbuf(sizeof(BT_HDR))) != NULL) {
+ if ((p_msg = (BT_HDR *) osi_malloc(sizeof(BT_HDR))) != NULL) {
p_msg->event = p_tle->event;
p_msg->layer_specific = 0;
- //GKI_freebuf(p_msg);
+ //osi_free(p_msg);
bta_sys_sendmsg(p_msg);
}
}
assert(p_tle != NULL);
// Get the alarm for the timer list entry.
- pthread_mutex_lock(&btu_general_alarm_lock);
+ osi_mutex_lock(&btu_general_alarm_lock, OSI_MUTEX_MAX_TIMEOUT);
if (!hash_map_has_key(btu_general_alarm_hash_map, p_tle)) {
alarm = osi_alarm_new("btu_gen", btu_general_alarm_cb, (void *)p_tle, 0);
hash_map_set(btu_general_alarm_hash_map, p_tle, alarm);
}
- pthread_mutex_unlock(&btu_general_alarm_lock);
+ osi_mutex_unlock(&btu_general_alarm_lock);
alarm = hash_map_get(btu_general_alarm_hash_map, p_tle);
if (alarm == NULL) {
assert(p_tle != NULL);
// Get the alarm for the timer list entry.
- pthread_mutex_lock(&btu_l2cap_alarm_lock);
+ osi_mutex_lock(&btu_l2cap_alarm_lock, OSI_MUTEX_MAX_TIMEOUT);
if (!hash_map_has_key(btu_l2cap_alarm_hash_map, p_tle)) {
alarm = osi_alarm_new("btu_l2cap", btu_l2cap_alarm_cb, (void *)p_tle, 0);
hash_map_set(btu_l2cap_alarm_hash_map, p_tle, (void *)alarm);
}
- pthread_mutex_unlock(&btu_l2cap_alarm_lock);
+ osi_mutex_unlock(&btu_l2cap_alarm_lock);
alarm = hash_map_get(btu_l2cap_alarm_hash_map, p_tle);
if (alarm == NULL) {
assert(p_tle != NULL);
// Get the alarm for the timer list entry.
- pthread_mutex_lock(&btu_oneshot_alarm_lock);
+ osi_mutex_lock(&btu_oneshot_alarm_lock, OSI_MUTEX_MAX_TIMEOUT);
if (!hash_map_has_key(btu_oneshot_alarm_hash_map, p_tle)) {
alarm = osi_alarm_new("btu_oneshot", btu_oneshot_alarm_cb, (void *)p_tle, 0);
hash_map_set(btu_oneshot_alarm_hash_map, p_tle, alarm);
}
- pthread_mutex_unlock(&btu_oneshot_alarm_lock);
+ osi_mutex_unlock(&btu_oneshot_alarm_lock);
alarm = hash_map_get(btu_oneshot_alarm_hash_map, p_tle);
if (alarm == NULL) {
#if (defined BLE_INCLUDED && BLE_INCLUDED == TRUE && GATTS_INCLUDED == TRUE)
#include "bt_defs.h"
+#include "allocator.h"
#include <string.h>
#include "gap_int.h"
#include "gap_api.h"
{
tGAP_BLE_REQ *p_q;
- while ((p_q = (tGAP_BLE_REQ *)GKI_dequeue(&p_clcb->pending_req_q)) != NULL) {
+ while ((p_q = (tGAP_BLE_REQ *)fixed_queue_try_dequeue(p_clcb->pending_req_q)) != NULL) {
/* send callback to all pending requests if being removed*/
if (p_q->p_cback != NULL) {
(*p_q->p_cback)(FALSE, p_clcb->bda, 0, NULL);
}
- GKI_freebuf (p_q);
+ osi_free (p_q);
}
memset(p_clcb, 0, sizeof(tGAP_CLCB));
*******************************************************************************/
BOOLEAN gap_ble_enqueue_request (tGAP_CLCB *p_clcb, UINT16 uuid, tGAP_BLE_CMPL_CBACK *p_cback)
{
- tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)GKI_getbuf(sizeof(tGAP_BLE_REQ));
+ tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)osi_malloc(sizeof(tGAP_BLE_REQ));
if (p_q != NULL) {
p_q->p_cback = p_cback;
p_q->uuid = uuid;
- GKI_enqueue(&p_clcb->pending_req_q, p_q);
+ fixed_queue_enqueue(p_clcb->pending_req_q, p_q);
return TRUE;
}
*******************************************************************************/
BOOLEAN gap_ble_dequeue_request (tGAP_CLCB *p_clcb, UINT16 *p_uuid, tGAP_BLE_CMPL_CBACK **p_cback)
{
- tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)GKI_dequeue(&p_clcb->pending_req_q);;
+ tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)fixed_queue_try_dequeue(p_clcb->pending_req_q);;
if (p_q != NULL) {
*p_cback = p_q->p_cback;
*p_uuid = p_q->uuid;
- GKI_freebuf((void *)p_q);
+ osi_free((void *)p_q);
return TRUE;
}
#include "l2cdefs.h"
#include "l2c_int.h"
#include <string.h>
+#include "mutex.h"
+#include "allocator.h"
+
#if GAP_CONN_INCLUDED == TRUE
#include "btm_int.h"
if ( p_ccb->cfg.fcr_present ) {
if (ertm_info == NULL) {
p_ccb->ertm_info.preferred_mode = p_ccb->cfg.fcr.mode;
- p_ccb->ertm_info.user_rx_pool_id = GAP_DATA_POOL_ID;
- p_ccb->ertm_info.user_tx_pool_id = GAP_DATA_POOL_ID;
- p_ccb->ertm_info.fcr_rx_pool_id = L2CAP_DEFAULT_ERM_POOL_ID;
- p_ccb->ertm_info.fcr_tx_pool_id = L2CAP_DEFAULT_ERM_POOL_ID;
+ p_ccb->ertm_info.user_rx_buf_size = GAP_DATA_BUF_SIZE;
+ p_ccb->ertm_info.user_tx_buf_size = GAP_DATA_BUF_SIZE;
+ p_ccb->ertm_info.fcr_rx_buf_size = L2CAP_INVALID_ERM_BUF_SIZE;
+ p_ccb->ertm_info.fcr_tx_buf_size = L2CAP_INVALID_ERM_BUF_SIZE;
} else {
p_ccb->ertm_info = *ertm_info;
}
UINT16 GAP_ConnReadData (UINT16 gap_handle, UINT8 *p_data, UINT16 max_len, UINT16 *p_len)
{
tGAP_CCB *p_ccb = gap_find_ccb_by_handle (gap_handle);
- BT_HDR *p_buf;
UINT16 copy_len;
if (!p_ccb) {
*p_len = 0;
- p_buf = (BT_HDR *)GKI_getfirst (&p_ccb->rx_queue);
- if (!p_buf) {
+ if (fixed_queue_is_empty(p_ccb->rx_queue)) {
return (GAP_NO_DATA_AVAIL);
- }
+ }
- GKI_disable();
+ osi_mutex_global_lock();
+
+ while (max_len) {
+ BT_HDR *p_buf = fixed_queue_try_peek_first(p_ccb->rx_queue);
+ if (p_buf == NULL) {
+ break;
+ }
- while (max_len && p_buf) {
- copy_len = (p_buf->len > max_len) ? max_len : p_buf->len;
+ copy_len = (p_buf->len > max_len)?max_len:p_buf->len;
max_len -= copy_len;
*p_len += copy_len;
if (p_data) {
p_buf->offset += copy_len;
p_buf->len -= copy_len;
break;
- } else {
- if (max_len) {
- p_buf = (BT_HDR *)GKI_getnext (p_buf);
- }
- GKI_freebuf (GKI_dequeue (&p_ccb->rx_queue));
}
+ osi_free(fixed_queue_try_dequeue(p_ccb->rx_queue));
}
p_ccb->rx_queue_size -= *p_len;
- GKI_enable();
+ osi_mutex_global_unlock();
GAP_TRACE_EVENT ("GAP_ConnReadData - rx_queue_size left=%d, *p_len=%d",
p_ccb->rx_queue_size, *p_len);
return (GAP_ERR_BAD_HANDLE);
}
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->rx_queue);
+ p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->rx_queue);
if (p_buf) {
*pp_buf = p_buf;
tGAP_CCB *p_ccb = gap_find_ccb_by_handle (gap_handle);
if (!p_ccb) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (GAP_ERR_BAD_HANDLE);
}
if (p_ccb->con_state != GAP_CCB_STATE_CONNECTED) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (GAP_ERR_BAD_STATE);
}
if (p_buf->offset < L2CAP_MIN_OFFSET) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (GAP_ERR_BUF_OFFSET);
}
- GKI_enqueue (&p_ccb->tx_queue, p_buf);
+ fixed_queue_enqueue(p_ccb->tx_queue, p_buf);
if (p_ccb->is_congested) {
return (BT_PASS);
#if (GAP_CONN_POST_EVT_INCLUDED == TRUE)
gap_send_event (gap_handle);
#else
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->tx_queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL) {
UINT8 status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
if (status == L2CAP_DW_CONGESTED) {
while (max_len) {
if (p_ccb->cfg.fcr.mode == L2CAP_FCR_ERTM_MODE) {
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (p_ccb->ertm_info.user_tx_pool_id)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(L2CAP_FCR_ERTM_BUF_SIZE)) == NULL) {
return (GAP_ERR_CONGESTED);
}
} else {
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (GAP_DATA_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(GAP_DATA_BUF_SIZE)) == NULL) {
return (GAP_ERR_CONGESTED);
}
}
GAP_TRACE_EVENT ("GAP_WriteData %d bytes", p_buf->len);
- GKI_enqueue (&p_ccb->tx_queue, p_buf);
+ fixed_queue_enqueue(p_ccb->tx_queue, p_buf);
}
if (p_ccb->is_congested) {
#if (GAP_CONN_POST_EVT_INCLUDED == TRUE)
gap_send_event (gap_handle);
#else
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->tx_queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL)
+ {
UINT8 status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
if (status == L2CAP_DW_CONGESTED) {
return (BT_PASS);
}
-
/*******************************************************************************
**
** Function GAP_ConnReconfig
/* Remember the remote MTU size */
if (p_ccb->cfg.fcr.mode == L2CAP_FCR_ERTM_MODE) {
- local_mtu_size = GKI_get_pool_bufsize (p_ccb->ertm_info.user_tx_pool_id)
+ local_mtu_size = p_ccb->ertm_info.user_tx_buf_size
- sizeof(BT_HDR) - L2CAP_MIN_OFFSET;
} else {
local_mtu_size = L2CAP_MTU_SIZE;
/* Find CCB based on CID */
if ((p_ccb = gap_find_ccb_by_cid (l2cap_cid)) == NULL) {
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return;
}
if (p_ccb->con_state == GAP_CCB_STATE_CONNECTED) {
- GKI_enqueue (&p_ccb->rx_queue, p_msg);
+ fixed_queue_enqueue(p_ccb->rx_queue, p_msg);
p_ccb->rx_queue_size += p_msg->len;
/*
p_ccb->p_callback (p_ccb->gap_handle, GAP_EVT_CONN_DATA_AVAIL);
} else {
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
}
p_ccb->p_callback (p_ccb->gap_handle, event);
if (!is_congested) {
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->tx_queue)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL) {
status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
if (status == L2CAP_DW_CONGESTED) {
for (xx = 0, p_ccb = gap_cb.conn.ccb_pool; xx < GAP_MAX_CONNECTIONS; xx++, p_ccb++) {
if (p_ccb->con_state == GAP_CCB_STATE_IDLE) {
memset (p_ccb, 0, sizeof (tGAP_CCB));
+ p_ccb->tx_queue = fixed_queue_new(SIZE_MAX);
+ p_ccb->rx_queue = fixed_queue_new(SIZE_MAX);
p_ccb->gap_handle = xx;
p_ccb->rem_mtu_size = L2CAP_MTU_SIZE;
/* Drop any buffers we may be holding */
p_ccb->rx_queue_size = 0;
- while (p_ccb->rx_queue._p_first) {
- GKI_freebuf (GKI_dequeue (&p_ccb->rx_queue));
- }
+ while (!fixed_queue_is_empty(p_ccb->rx_queue)) {
+ osi_free(fixed_queue_try_dequeue(p_ccb->rx_queue));
+ }
+ fixed_queue_free(p_ccb->rx_queue, NULL);
+ p_ccb->rx_queue = NULL;
- while (p_ccb->tx_queue._p_first) {
- GKI_freebuf (GKI_dequeue (&p_ccb->tx_queue));
- }
+ while (!fixed_queue_is_empty(p_ccb->tx_queue)) {
+ osi_free(fixed_queue_try_dequeue(p_ccb->tx_queue));
+ }
+ fixed_queue_free(p_ccb->tx_queue, NULL);
+ p_ccb->tx_queue = NULL;
p_ccb->con_state = GAP_CCB_STATE_IDLE;
{
BT_HDR *p_msg;
- if ((p_msg = (BT_HDR *)GKI_getbuf(BT_HDR_SIZE)) != NULL) {
+ if ((p_msg = (BT_HDR *)osi_malloc(BT_HDR_SIZE)) != NULL) {
p_msg->event = BT_EVT_TO_GAP_MSG;
p_msg->len = 0;
p_msg->offset = 0;
}
}
-/*******************************************************************************
-**
-** Function gap_proc_btu_event
-**
-** Description Event handler for BT_EVT_TO_GAP_MSG event from BTU task
-**
-** Returns None
-**
-*******************************************************************************/
-void gap_proc_btu_event(BT_HDR *p_msg)
-{
- tGAP_CCB *p_ccb = gap_find_ccb_by_handle (p_msg->layer_specific);
- UINT8 status;
- BT_HDR *p_buf;
-
- if (!p_ccb) {
- return;
- }
-
- if (p_ccb->con_state != GAP_CCB_STATE_CONNECTED) {
- return;
- }
-
- if (p_ccb->is_congested) {
- return;
- }
-
- /* Send the buffer through L2CAP */
-
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->tx_queue)) != NULL) {
- status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
-
- if (status == L2CAP_DW_CONGESTED) {
- p_ccb->is_congested = TRUE;
- break;
- } else if (status != L2CAP_DW_SUCCESS) {
- break;
- }
- }
-
-}
#endif /* (GAP_CONN_POST_EVT_INCLUDED == TRUE) */
#endif /* GAP_CONN_INCLUDED */
#define GAP_INT_H
#include "bt_target.h"
+#include "fixed_queue.h"
#include "gap_api.h"
-#include "gki.h"
#include "gatt_api.h"
#define GAP_MAX_BLOCKS 2 /* Concurrent GAP commands pending at a time*/
/* Define the Generic Access Profile control structure */
UINT16 rem_mtu_size;
BOOLEAN is_congested;
- BUFFER_Q tx_queue; /* Queue of buffers waiting to be sent */
- BUFFER_Q rx_queue; /* Queue of buffers waiting to be read */
+ fixed_queue_t *tx_queue; /* Queue of buffers waiting to be sent */
+ fixed_queue_t *rx_queue; /* Queue of buffers waiting to be read */
UINT32 rx_queue_size; /* Total data count in rx_queue */
UINT16 cl_op_uuid;
BOOLEAN in_use;
BOOLEAN connected;
- BUFFER_Q pending_req_q;
+ fixed_queue_t *pending_req_q;
} tGAP_CLCB;
******************************************************************************/
#include "bt_target.h"
+#include "allocator.h"
#if BLE_INCLUDED == TRUE
BT_HDR *p_buf = NULL;
UINT8 *p;
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + GATT_HDR_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + GATT_HDR_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, op_code);
BT_HDR *p_buf = NULL;
UINT8 *p;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf(GATT_BUF_POOL_ID)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(GATT_DATA_BUF_SIZE)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
p_buf->offset = L2CAP_MIN_OFFSET;
BT_HDR *p_buf = NULL;
UINT8 *p;
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + L2CAP_MIN_OFFSET + 5)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + L2CAP_MIN_OFFSET + 5)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, GATT_RSP_ERROR);
BT_HDR *p_buf = NULL;
UINT8 *p;
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + 8 + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + 8 + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
/* Describe the built message location and size */
p_buf->offset = L2CAP_MIN_OFFSET;
UINT8 *p;
UINT16 len = p_value_type->value_len;
- if ((p_buf = (BT_HDR *)GKI_getbuf((UINT16)(sizeof(BT_HDR) + payload_size + L2CAP_MIN_OFFSET))) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc((UINT16)(sizeof(BT_HDR) + payload_size + L2CAP_MIN_OFFSET))) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
p_buf->offset = L2CAP_MIN_OFFSET;
BT_HDR *p_buf = NULL;
UINT8 *p, i = 0;
- if ((p_buf = (BT_HDR *)GKI_getbuf((UINT16)(sizeof(BT_HDR) + num_handle * 2 + 1 + L2CAP_MIN_OFFSET))) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc((UINT16)(sizeof(BT_HDR) + num_handle * 2 + 1 + L2CAP_MIN_OFFSET))) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
p_buf->offset = L2CAP_MIN_OFFSET;
BT_HDR *p_buf = NULL;
UINT8 *p;
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + 5 + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + 5 + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
p_buf->offset = L2CAP_MIN_OFFSET;
BT_HDR *p_buf = NULL;
UINT8 *p;
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + 1 + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + 1 + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
p_buf->offset = L2CAP_MIN_OFFSET;
BT_HDR *p_buf = NULL;
UINT8 *p, *pp, pair_len, *p_pair_len;
- if ((p_buf = (BT_HDR *)GKI_getbuf((UINT16)(sizeof(BT_HDR) + payload_size + L2CAP_MIN_OFFSET))) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc((UINT16)(sizeof(BT_HDR) + payload_size + L2CAP_MIN_OFFSET))) != NULL) {
p = pp = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, op_code);
#if defined(BTA_GATT_INCLUDED) && (BTA_GATT_INCLUDED == TRUE)
-#include "gki.h"
-//#include <stdio.h>
+#include "allocator.h"
#include <string.h>
#include "gatt_api.h"
#include "gatt_int.h"
}
if (p_buf) {
- GKI_freebuf (GKI_remove_from_queue (&gatt_cb.pending_new_srv_start_q, p_buf));
+ osi_free(fixed_queue_try_remove_from_queue(gatt_cb.pending_new_srv_start_q, p_buf));
}
return (0);
}
&p_list->asgn_range.svc_uuid,
p_list->asgn_range.svc_inst)) != NULL) {
GATT_TRACE_DEBUG ("Delete a new service changed item - the service has not yet started");
- GKI_freebuf (GKI_remove_from_queue (&gatt_cb.pending_new_srv_start_q, p_buf));
+ osi_free(fixed_queue_try_remove_from_queue(gatt_cb.pending_new_srv_start_q, p_buf));
} else {
gatt_proc_srv_chg();
}
gatt_proc_srv_chg();
/* remove the new service element after the srv changed processing is completed*/
- GKI_freebuf (GKI_remove_from_queue (&gatt_cb.pending_new_srv_start_q, p_buf));
+ osi_free(fixed_queue_try_remove_from_queue(gatt_cb.pending_new_srv_start_q, p_buf));
}
return GATT_SUCCESS;
}
case GATT_READ_MULTIPLE:
p_clcb->s_handle = 0;
/* copy multiple handles in CB */
- p_read_multi = (tGATT_READ_MULTI *)GKI_getbuf(sizeof(tGATT_READ_MULTI));
+ p_read_multi = (tGATT_READ_MULTI *)osi_malloc(sizeof(tGATT_READ_MULTI));
p_clcb->p_attr_buf = (UINT8 *)p_read_multi;
memcpy (p_read_multi, &p_read->read_multiple, sizeof(tGATT_READ_MULTI));
case GATT_READ_BY_HANDLE:
p_clcb->op_subtype = type;
p_clcb->auth_req = p_write->auth_req;
- if (( p_clcb->p_attr_buf = (UINT8 *)GKI_getbuf((UINT16)sizeof(tGATT_VALUE))) != NULL) {
+ if (( p_clcb->p_attr_buf = (UINT8 *)osi_malloc((UINT16)sizeof(tGATT_VALUE))) != NULL) {
memcpy(p_clcb->p_attr_buf, (void *)p_write, sizeof(tGATT_VALUE));
p = (tGATT_VALUE *)p_clcb->p_attr_buf;
*
******************************************************************************/
#include "bt_target.h"
-//#include "bt_utils.h"
+#include "allocator.h"
#if BLE_INCLUDED == TRUE
#include <string.h>
-#include "gki.h"
#include "gatt_int.h"
#include "gatt_api.h"
/* do not need to mark channel securoty activity for data signing */
gatt_set_sec_act(p_clcb->p_tcb, GATT_SEC_OK);
- p_data = (UINT8 *)GKI_getbuf((UINT16)(p_attr->len + 3)); /* 3 = 2 byte handle + opcode */
+ p_data = (UINT8 *)osi_malloc((UINT16)(p_attr->len + 3)); /* 3 = 2 byte handle + opcode */
if (p_data != NULL) {
p = p_data;
gatt_end_operation(p_clcb, GATT_INTERNAL_ERROR, NULL);
}
- GKI_freebuf(p_data);
+ osi_free(p_data);
}
return status;
*******************************************************************************/
void gatt_sec_check_complete(BOOLEAN sec_check_ok, tGATT_CLCB *p_clcb, UINT8 sec_act)
{
- if (p_clcb && p_clcb->p_tcb && GKI_queue_is_empty(&p_clcb->p_tcb->pending_enc_clcb)) {
+ if (p_clcb && p_clcb->p_tcb &&
+ fixed_queue_is_empty(p_clcb->p_tcb->pending_enc_clcb)) {
gatt_set_sec_act(p_clcb->p_tcb, GATT_SEC_NONE);
}
#if (GATTC_INCLUDED == TRUE)
tGATT_TCB *p_tcb;
UINT8 sec_flag;
BOOLEAN status = FALSE;
- tGATT_PENDING_ENC_CLCB *p_buf;
- UINT16 count;
UNUSED(p_ref_data);
GATT_TRACE_DEBUG("gatt_enc_cmpl_cback");
if (gatt_get_sec_act(p_tcb) == GATT_SEC_ENC_PENDING) {
return;
}
-
- if ((p_buf = (tGATT_PENDING_ENC_CLCB *)GKI_dequeue (&p_tcb->pending_enc_clcb)) != NULL) {
+ tGATT_PENDING_ENC_CLCB *p_buf =
+ (tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
+ if (p_buf != NULL) {
if (result == BTM_SUCCESS) {
if (gatt_get_sec_act(p_tcb) == GATT_SEC_ENCRYPT_MITM ) {
BTM_GetSecurityFlagsByTransport(bd_addr, &sec_flag, transport);
}
}
gatt_sec_check_complete(status , p_buf->p_clcb, p_tcb->sec_act);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
/* start all other pending operation in queue */
- count = GKI_queue_length(&p_tcb->pending_enc_clcb);
- for (; count > 0; count --) {
- if ((p_buf = (tGATT_PENDING_ENC_CLCB *)GKI_dequeue (&p_tcb->pending_enc_clcb)) != NULL) {
+ for (size_t count = fixed_queue_length(p_tcb->pending_enc_clcb);
+ count > 0; count--) {
+ p_buf = (tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
+ if (p_buf != NULL) {
gatt_security_check_start(p_buf->p_clcb);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
} else {
break;
}
void gatt_notify_enc_cmpl(BD_ADDR bd_addr)
{
tGATT_TCB *p_tcb;
- tGATT_PENDING_ENC_CLCB *p_buf;
- UINT16 count;
UINT8 i = 0;
if ((p_tcb = gatt_find_tcb_by_addr(bd_addr, BT_TRANSPORT_LE)) != NULL) {
if (gatt_get_sec_act(p_tcb) == GATT_SEC_ENC_PENDING) {
gatt_set_sec_act(p_tcb, GATT_SEC_NONE);
- count = GKI_queue_length(&p_tcb->pending_enc_clcb);
-
- for (; count > 0; count --) {
- if ((p_buf = (tGATT_PENDING_ENC_CLCB *)GKI_dequeue (&p_tcb->pending_enc_clcb)) != NULL) {
+ size_t count = fixed_queue_length(p_tcb->pending_enc_clcb);
+ for (; count > 0; count--) {
+ tGATT_PENDING_ENC_CLCB *p_buf =
+ (tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
+ if (p_buf != NULL) {
gatt_security_check_start(p_buf->p_clcb);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
} else {
break;
}
#if BLE_INCLUDED == TRUE && GATTC_INCLUDED == TRUE
#include <string.h>
-//#include "bt_utils.h"
-#include "gki.h"
+#include "allocator.h"
#include "gatt_int.h"
#include "l2c_int.h"
if ( p_clcb->counter == (p_clcb->p_tcb->payload_size - 4)) {
p_clcb->op_subtype = GATT_READ_BY_HANDLE;
if (!p_clcb->p_attr_buf) {
- p_clcb->p_attr_buf = (UINT8 *)GKI_getbuf(GATT_MAX_ATTR_LEN);
+ p_clcb->p_attr_buf = (UINT8 *)osi_malloc(GATT_MAX_ATTR_LEN);
}
if (p_clcb->p_attr_buf && p_clcb->counter <= GATT_MAX_ATTR_LEN) {
memcpy(p_clcb->p_attr_buf, p, p_clcb->counter);
/* allocate GKI buffer holding up long attribute value */
if (!p_clcb->p_attr_buf) {
- p_clcb->p_attr_buf = (UINT8 *)GKI_getbuf(GATT_MAX_ATTR_LEN);
+ p_clcb->p_attr_buf = (UINT8 *)osi_malloc(GATT_MAX_ATTR_LEN);
}
/* copy attrobute value into cb buffer */
#if BLE_INCLUDED == TRUE && GATTS_INCLUDED == TRUE
#include "bt_trace.h"
-//#include "bt_utils.h"
+#include "allocator.h"
//#include <stdio.h>
#include <string.h>
BOOLEAN gatts_init_service_db (tGATT_SVC_DB *p_db, tBT_UUID *p_service, BOOLEAN is_pri,
UINT16 s_hdl, UINT16 num_handle)
{
- GKI_init_q(&p_db->svc_buffer);
+ if (p_db->svc_buffer == NULL) { //in case already alloc
+ p_db->svc_buffer = fixed_queue_new(SIZE_MAX);
+ }
if (!allocate_svc_db_buf(p_db)) {
GATT_TRACE_ERROR("gatts_init_service_db failed, no resources\n");
GATT_TRACE_DEBUG("attribute handle = %x\n", p_char_val->handle);
p_char_val->p_value->attr_val.attr_len = attr_val->attr_len;
p_char_val->p_value->attr_val.attr_max_len = attr_val->attr_max_len;
- p_char_val->p_value->attr_val.attr_val = GKI_getbuf(attr_val->attr_max_len);
+ p_char_val->p_value->attr_val.attr_val = osi_malloc(attr_val->attr_max_len);
if (p_char_val->p_value->attr_val.attr_val == NULL) {
deallocate_attr_in_db(p_db, p_char_decl);
deallocate_attr_in_db(p_db, p_char_val);
p_char_dscptr->p_value->attr_val.attr_len = attr_val->attr_len;
p_char_dscptr->p_value->attr_val.attr_max_len = attr_val->attr_max_len;
if (attr_val->attr_max_len != 0) {
- p_char_dscptr->p_value->attr_val.attr_val = GKI_getbuf(attr_val->attr_max_len);
+ p_char_dscptr->p_value->attr_val.attr_val = osi_malloc(attr_val->attr_max_len);
if (p_char_dscptr->p_value->attr_val.attr_val == NULL) {
deallocate_attr_in_db(p_db, p_char_dscptr);
GATT_TRACE_WARNING("Warning in %s, line=%d, insufficient resource to allocate for descriptor value\n", __func__, __LINE__);
GATT_TRACE_DEBUG("allocate_svc_db_buf allocating extra buffer");
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf(GATT_DB_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_calloc(GATT_DB_BUF_SIZE)) == NULL) {
GATT_TRACE_ERROR("allocate_svc_db_buf failed, no resources");
return FALSE;
}
- memset(p_buf, 0, GKI_get_buf_size(p_buf));
p_db->p_free_mem = (UINT8 *) p_buf;
- p_db->mem_free = GKI_get_buf_size(p_buf);
+ p_db->mem_free = GATT_DB_BUF_SIZE;
- GKI_enqueue(&p_db->svc_buffer, p_buf);
+ fixed_queue_enqueue(p_db->svc_buffer, p_buf);
return TRUE;
#if BLE_INCLUDED == TRUE
-#include "gki.h"
#include "gatt_int.h"
#include "l2c_api.h"
#include "btm_int.h"
#include "btm_ble_int.h"
-//#include "bt_utils.h"
+#include "allocator.h"
/* Configuration flags. */
#define GATT_L2C_CFG_IND_DONE (1<<0)
gatt_cb.trace_level = BT_TRACE_LEVEL_NONE; /* No traces */
#endif
gatt_cb.def_mtu_size = GATT_DEF_BLE_MTU_SIZE;
- GKI_init_q (&gatt_cb.sign_op_queue);
- GKI_init_q (&gatt_cb.srv_chg_clt_q);
- GKI_init_q (&gatt_cb.pending_new_srv_start_q);
+ gatt_cb.sign_op_queue = fixed_queue_new(SIZE_MAX);
+ gatt_cb.srv_chg_clt_q = fixed_queue_new(SIZE_MAX);
+ gatt_cb.pending_new_srv_start_q = fixed_queue_new(SIZE_MAX);
/* First, register fixed L2CAP channel for ATT over BLE */
fixed_reg.fixed_chnl_opts.mode = L2CAP_FCR_BASIC_MODE;
fixed_reg.fixed_chnl_opts.max_transmit = 0xFF;
{
int i;
GATT_TRACE_DEBUG("gatt_free()");
+ fixed_queue_free(gatt_cb.sign_op_queue, NULL);
+ gatt_cb.sign_op_queue = NULL;
+ fixed_queue_free(gatt_cb.srv_chg_clt_q, NULL);
+ gatt_cb.srv_chg_clt_q = NULL;
+ fixed_queue_free(gatt_cb.pending_new_srv_start_q, NULL);
+ gatt_cb.pending_new_srv_start_q = NULL;
+
+ for (i = 0; i < GATT_MAX_PHY_CHANNEL; i++)
+ {
+ fixed_queue_free(gatt_cb.tcb[i].pending_enc_clcb, NULL);
+ gatt_cb.tcb[i].pending_enc_clcb = NULL;
+
+ fixed_queue_free(gatt_cb.tcb[i].pending_ind_q, NULL);
+ gatt_cb.tcb[i].pending_ind_q = NULL;
+
+ fixed_queue_free(gatt_cb.tcb[i].sr_cmd.multi_rsp_q, NULL);
+ gatt_cb.tcb[i].sr_cmd.multi_rsp_q = NULL;
+ }
+
for (i = 0; i < GATT_MAX_SR_PROFILES; i++) {
gatt_free_hdl_buffer(&gatt_cb.hdl_list[i]);
}
if ((p_tcb = gatt_allocate_tcb_by_bdaddr(bd_addr, transport)) != NULL) {
if (!gatt_connect(bd_addr, p_tcb, transport)) {
GATT_TRACE_ERROR("gatt_connect failed");
+ fixed_queue_free(p_tcb->pending_enc_clcb, NULL);
+ fixed_queue_free(p_tcb->pending_ind_q, NULL);
memset(p_tcb, 0, sizeof(tGATT_TCB));
} else {
ret = TRUE;
gatt_get_ch_state(p_tcb) >= GATT_CH_OPEN) {
gatt_data_process(p_tcb, p_buf);
} else {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
if (p_tcb != NULL) {
GATT_TRACE_WARNING ("ATT - Ignored L2CAP data while in state: %d\n",
/* process the data */
gatt_data_process(p_tcb, p_buf);
} else { /* prevent buffer leak */
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
GATT_TRACE_ERROR ("invalid data length, ignore\n");
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
/*******************************************************************************
******************************************************************************/
#include "bt_target.h"
-//#include "bt_utils.h"
+#include "allocator.h"
#if BLE_INCLUDED == TRUE && GATTS_INCLUDED == TRUE
#include <string.h>
}
buf_len = (UINT16)(sizeof(BT_HDR) + p_tcb->payload_size + L2CAP_MIN_OFFSET);
- if ((p_msg = (BT_HDR *)GKI_getbuf(buf_len)) == NULL) {
+ if ((p_msg = (BT_HDR *)osi_malloc(buf_len)) == NULL) {
return GATT_NO_RESOURCES;
}
if (p_tcb->sr_cmd.p_rsp_msg) {
GATT_TRACE_ERROR("%s free msg %p", __func__, p_tcb->sr_cmd.p_rsp_msg);
- GKI_freebuf (p_tcb->sr_cmd.p_rsp_msg);
+ osi_free(p_tcb->sr_cmd.p_rsp_msg);
+ p_tcb->sr_cmd.p_rsp_msg = NULL;
}
- while (GKI_getfirst(&p_tcb->sr_cmd.multi_rsp_q)) {
- GKI_freebuf (GKI_dequeue (&p_tcb->sr_cmd.multi_rsp_q));
+ if (p_tcb->sr_cmd.multi_rsp_q) {
+ while (!fixed_queue_is_empty(p_tcb->sr_cmd.multi_rsp_q)) {
+ osi_free(fixed_queue_try_dequeue(p_tcb->sr_cmd.multi_rsp_q));
+ }
+ fixed_queue_free(p_tcb->sr_cmd.multi_rsp_q, NULL);
}
+
memset( &p_tcb->sr_cmd, 0, sizeof(tGATT_SR_CMD));
}
static BOOLEAN process_read_multi_rsp (tGATT_SR_CMD *p_cmd, tGATT_STATUS status,
tGATTS_RSP *p_msg, UINT16 mtu)
{
- tGATTS_RSP *p_rsp = NULL;
UINT16 ii, total_len, len;
- BT_HDR *p_buf = (BT_HDR *)GKI_getbuf((UINT16)sizeof(tGATTS_RSP));
UINT8 *p;
BOOLEAN is_overflow = FALSE;
GATT_TRACE_DEBUG ("process_read_multi_rsp status=%d mtu=%d", status, mtu);
+ if (p_cmd->multi_rsp_q == NULL) {
+ p_cmd->multi_rsp_q = fixed_queue_new(SIZE_MAX);
+ }
+
+ /* Enqueue the response */
+ BT_HDR *p_buf = (BT_HDR *)osi_malloc(sizeof(tGATTS_RSP));
if (p_buf == NULL) {
p_cmd->status = GATT_INSUF_RESOURCE;
return FALSE;
}
-
- /* Enqueue the response */
memcpy((void *)p_buf, (const void *)p_msg, sizeof(tGATTS_RSP));
- GKI_enqueue (&p_cmd->multi_rsp_q, p_buf);
+
+ fixed_queue_enqueue(p_cmd->multi_rsp_q, p_buf);
p_cmd->status = status;
if (status == GATT_SUCCESS) {
GATT_TRACE_DEBUG ("Multi read count=%d num_hdls=%d",
- GKI_queue_length(&p_cmd->multi_rsp_q), p_cmd->multi_req.num_handles);
+ fixed_queue_length(p_cmd->multi_rsp_q),
+ p_cmd->multi_req.num_handles);
/* Wait till we get all the responses */
- if (GKI_queue_length(&p_cmd->multi_rsp_q) == p_cmd->multi_req.num_handles) {
+ if (fixed_queue_length(p_cmd->multi_rsp_q) == p_cmd->multi_req.num_handles) {
len = sizeof(BT_HDR) + L2CAP_MIN_OFFSET + mtu;
- if ((p_buf = (BT_HDR *)GKI_getbuf(len)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_calloc(len)) == NULL) {
p_cmd->status = GATT_INSUF_RESOURCE;
return (TRUE);
}
- memset(p_buf, 0, len);
p_buf->offset = L2CAP_MIN_OFFSET;
p = (UINT8 *)(p_buf + 1) + p_buf->offset;
p_buf->len = 1;
/* Now walk through the buffers puting the data into the response in order */
+ list_t *list = NULL;
+ const list_node_t *node = NULL;
+ if (! fixed_queue_is_empty(p_cmd->multi_rsp_q)) {
+ list = fixed_queue_get_list(p_cmd->multi_rsp_q);
+ }
for (ii = 0; ii < p_cmd->multi_req.num_handles; ii++) {
- if (ii == 0) {
- p_rsp = (tGATTS_RSP *)GKI_getfirst (&p_cmd->multi_rsp_q);
- } else {
- p_rsp = (tGATTS_RSP *)GKI_getnext (p_rsp);
+ tGATTS_RSP *p_rsp = NULL;
+ if (list != NULL) {
+ if (ii == 0) {
+ node = list_begin(list);
+ } else {
+ node = list_next(node);
+ }
+ if (node != list_end(list)) {
+ p_rsp = (tGATTS_RSP *)list_node(node);
+ }
}
if (p_rsp != NULL) {
if (p_buf->len == 0) {
GATT_TRACE_ERROR("process_read_multi_rsp - nothing found!!");
p_cmd->status = GATT_NOT_FOUND;
- GKI_freebuf (p_buf);
- GATT_TRACE_DEBUG(" GKI_freebuf (p_buf)");
+ osi_free (p_buf);
+ GATT_TRACE_DEBUG(" osi_free (p_buf)");
} else if (p_cmd->p_rsp_msg != NULL) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
} else {
p_cmd->p_rsp_msg = p_buf;
}
flag &= GATT_PREP_WRITE_EXEC;
prepare_record = &(p_tcb->prepare_write_record);
- queue_num = prepare_record->queue._count;
-
+ queue_num = fixed_queue_length(prepare_record->queue);
//if received prepare_write packets include stack_rsp and app_rsp,
//stack respond to execute_write only when stack_rsp handle has invalid_offset
}
//dequeue prepare write data
- while(GKI_getfirst(&(prepare_record->queue))) {
- queue_data = GKI_dequeue(&(prepare_record->queue));
+ while(fixed_queue_try_peek_first(prepare_record->queue)) {
+ queue_data = fixed_queue_dequeue(prepare_record->queue);
if (is_prepare_write_valid){
if((queue_data->p_attr->p_value != NULL) && (queue_data->p_attr->p_value->attr_val.attr_val != NULL)){
memcpy(queue_data->p_attr->p_value->attr_val.attr_val+queue_data->offset, queue_data->value, queue_data->len);
}
}
- GKI_freebuf(queue_data);
- }
+ osi_free(queue_data);
+ }
+ fixed_queue_free(prepare_record->queue, NULL);
+ prepare_record->queue = NULL;
/* according to ble spec, even if there is no prep write queued,
* need to respond execute_write_response
gatt_sr_reset_cback_cnt(p_tcb); /* read multiple use multi_rsp_q's count*/
for (ll = 0; ll < p_tcb->sr_cmd.multi_req.num_handles; ll ++) {
- if ((p_msg = (tGATTS_RSP *)GKI_getbuf(sizeof(tGATTS_RSP))) != NULL) {
+ if ((p_msg = (tGATTS_RSP *)osi_malloc(sizeof(tGATTS_RSP))) != NULL) {
memset(p_msg, 0, sizeof(tGATTS_RSP))
;
handle = p_tcb->sr_cmd.multi_req.handles[ll];
gatt_sr_process_app_rsp(p_tcb, gatt_cb.sr_reg[i_rcb].gatt_if , trans_id, op_code, GATT_SUCCESS, p_msg);
}
/* either not using or done using the buffer, release it now */
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
} else {
err = GATT_NO_RESOURCES;
gatt_dequeue_sr_cmd(p_tcb);
}
if (reason == GATT_SUCCESS) {
- if ((p_msg = (BT_HDR *)GKI_getbuf(msg_len)) == NULL) {
+ if ((p_msg = (BT_HDR *)osi_calloc(msg_len)) == NULL) {
GATT_TRACE_ERROR("gatts_process_primary_service_req failed. no resources.");
reason = GATT_NO_RESOURCES;
} else {
- memset(p_msg, 0, msg_len);
reason = gatt_build_primary_service_rsp (p_msg, p_tcb, op_code, s_hdl, e_hdl, p_data, value);
}
}
if (reason != GATT_SUCCESS) {
if (p_msg) {
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
gatt_send_error_rsp (p_tcb, reason, op_code, s_hdl, FALSE);
} else {
if (reason == GATT_SUCCESS) {
buf_len = (UINT16)(sizeof(BT_HDR) + p_tcb->payload_size + L2CAP_MIN_OFFSET);
- if ((p_msg = (BT_HDR *)GKI_getbuf(buf_len)) == NULL) {
+ if ((p_msg = (BT_HDR *)osi_calloc(buf_len)) == NULL) {
reason = GATT_NO_RESOURCES;
} else {
reason = GATT_NOT_FOUND;
- memset(p_msg, 0, buf_len);
p = (UINT8 *)(p_msg + 1) + L2CAP_MIN_OFFSET;
*p ++ = op_code + 1;
p_msg->len = 2;
if (reason != GATT_SUCCESS) {
if (p_msg) {
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
gatt_send_error_rsp (p_tcb, reason, op_code, s_hdl, FALSE);
} else {
#endif
if (reason == GATT_SUCCESS) {
- if ((p_msg = (BT_HDR *)GKI_getbuf(msg_len)) == NULL) {
+ if ((p_msg = (BT_HDR *)osi_calloc(msg_len)) == NULL) {
GATT_TRACE_ERROR("gatts_process_find_info failed. no resources.\n");
reason = GATT_NO_RESOURCES;
} else {
- memset(p_msg, 0, msg_len);
p = (UINT8 *)(p_msg + 1) + L2CAP_MIN_OFFSET;
*p ++ = op_code + 1;
}
if (reason != GATT_SUCCESS && reason != GATT_STACK_RSP) {
if (p_msg) {
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
/* in theroy BUSY is not possible(should already been checked), protected check */
}
if (is_need_queue_data){
- queue_data = (tGATT_PREPARE_WRITE_QUEUE_DATA *)GKI_getbuf(len + sizeof(tGATT_PREPARE_WRITE_QUEUE_DATA));
+ queue_data = (tGATT_PREPARE_WRITE_QUEUE_DATA *)osi_malloc(len + sizeof(tGATT_PREPARE_WRITE_QUEUE_DATA));
if (queue_data == NULL){
status = GATT_PREPARE_Q_FULL;
} else {
queue_data->handle = handle;
queue_data->offset = offset;
memcpy(queue_data->value, p, len);
- GKI_enqueue(&(prepare_record->queue), queue_data);
+ if (prepare_record->queue == NULL) {
+ prepare_record->queue = fixed_queue_new(SIZE_MAX);
+ }
+ fixed_queue_enqueue(prepare_record->queue, queue_data);
}
}
UINT16 offset = 0, value_len = 0;
UNUSED (len);
- if ((p_msg = (BT_HDR *)GKI_getbuf(buf_len)) == NULL) {
+ if ((p_msg = (BT_HDR *)osi_calloc(buf_len)) == NULL) {
GATT_TRACE_ERROR("gatts_process_find_info failed. no resources.\n");
reason = GATT_NO_RESOURCES;
STREAM_TO_UINT16(offset, p_data);
}
- memset(p_msg, 0, buf_len);
p = (UINT8 *)(p_msg + 1) + L2CAP_MIN_OFFSET;
*p ++ = op_code + 1;
p_msg->len = 1;
if (reason != GATT_SUCCESS && reason != GATT_PENDING && reason != GATT_STACK_RSP) {
if (p_msg) {
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
/* in theroy BUSY is not possible(should already been checked), protected check */
gatt_dequeue_sr_cmd(p_tcb);
} else {
if (p_msg) {
- GKI_freebuf(p_msg);
+ osi_free(p_msg);
}
}
static void gatts_chk_pending_ind(tGATT_TCB *p_tcb )
{
#if (GATTS_INCLUDED == TRUE)
- tGATT_VALUE *p_buf = (tGATT_VALUE *)GKI_getfirst(&p_tcb->pending_ind_q);
+ tGATT_VALUE *p_buf = (tGATT_VALUE *)fixed_queue_try_peek_first(p_tcb->pending_ind_q);
GATT_TRACE_DEBUG("gatts_chk_pending_ind");
if (p_buf ) {
p_buf->handle,
p_buf->len,
p_buf->value);
- GKI_freebuf(GKI_remove_from_queue (&p_tcb->pending_ind_q, p_buf));
+ osi_free(fixed_queue_try_remove_from_queue(p_tcb->pending_ind_q,
+ p_buf));
}
#endif ///GATTS_INCLUDED == TRUE
}
*
******************************************************************************/
#include "bt_target.h"
-//#include "bt_utils.h"
+#include "allocator.h"
#if BLE_INCLUDED == TRUE
#include <string.h>
#include <stdio.h>
-#include "gki.h"
#include "l2cdefs.h"
#include "gatt_int.h"
void gatt_free_pending_ind(tGATT_TCB *p_tcb)
{
GATT_TRACE_DEBUG("gatt_free_pending_ind");
- /* release all queued indications */
- while (!GKI_queue_is_empty(&p_tcb->pending_ind_q)) {
- GKI_freebuf (GKI_dequeue (&p_tcb->pending_ind_q));
+ if (p_tcb->pending_ind_q == NULL) {
+ return;
}
+
+ /* release all queued indications */
+ while (!fixed_queue_is_empty(p_tcb->pending_ind_q)) {
+ osi_free(fixed_queue_try_dequeue(p_tcb->pending_ind_q));
+ }
+ fixed_queue_free(p_tcb->pending_ind_q, NULL);
+ p_tcb->pending_ind_q = NULL;
}
/*******************************************************************************
void gatt_free_pending_enc_queue(tGATT_TCB *p_tcb)
{
GATT_TRACE_DEBUG("gatt_free_pending_enc_queue");
+ if (p_tcb->pending_enc_clcb == NULL) {
+ return;
+ }
+
/* release all queued indications */
- while (!GKI_queue_is_empty(&p_tcb->pending_enc_clcb)) {
- GKI_freebuf (GKI_dequeue (&p_tcb->pending_enc_clcb));
+ while (!fixed_queue_is_empty(p_tcb->pending_enc_clcb)) {
+ osi_free(fixed_queue_try_dequeue(p_tcb->pending_enc_clcb));
}
+ fixed_queue_free(p_tcb->pending_enc_clcb, NULL);
+ p_tcb->pending_enc_clcb = NULL;
}
/*******************************************************************************
void gatt_free_pending_prepare_write_queue(tGATT_TCB *p_tcb)
{
GATT_TRACE_DEBUG("gatt_free_pending_prepare_write_queue");
- /* release all queued prepare write packets */
- while (!GKI_queue_is_empty(&(p_tcb->prepare_write_record.queue))) {
- GKI_freebuf (GKI_dequeue (&(p_tcb->prepare_write_record.queue)));
+
+ if (p_tcb->prepare_write_record.queue) {
+ /* release all queued prepare write packets */
+ while (!fixed_queue_is_empty(p_tcb->prepare_write_record.queue)) {
+ osi_free(fixed_queue_dequeue(p_tcb->prepare_write_record.queue));
+ }
+ fixed_queue_free(p_tcb->prepare_write_record.queue, NULL);
+ p_tcb->prepare_write_record.queue = NULL;
}
+
p_tcb->prepare_write_record.total_num = 0;
p_tcb->prepare_write_record.error_code_app = GATT_SUCCESS;
}
memcpy(req.srv_chg.bda, bd_addr, BD_ADDR_LEN);
(*gatt_cb.cb_info.p_srv_chg_callback)(GATTS_SRV_CHG_CMD_REMOVE_CLIENT, &req, NULL);
}
- GKI_freebuf (GKI_remove_from_queue (&gatt_cb.srv_chg_clt_q, p_buf));
+ osi_free(fixed_queue_try_remove_from_queue(gatt_cb.srv_chg_clt_q,
+ p_buf));
}
}
*******************************************************************************/
void gatt_set_srv_chg(void)
{
- tGATTS_SRV_CHG *p_buf = (tGATTS_SRV_CHG *)GKI_getfirst(&gatt_cb.srv_chg_clt_q);
- tGATTS_SRV_CHG_REQ req;
-
GATT_TRACE_DEBUG ("gatt_set_srv_chg");
- while (p_buf) {
+
+ if (fixed_queue_is_empty(gatt_cb.srv_chg_clt_q)) {
+ return;
+ }
+
+ list_t *list = fixed_queue_get_list(gatt_cb.srv_chg_clt_q);
+ for (const list_node_t *node = list_begin(list); node != list_end(list);
+ node = list_next(node)) {
GATT_TRACE_DEBUG ("found a srv_chg clt");
+
+ tGATTS_SRV_CHG *p_buf = (tGATTS_SRV_CHG *)list_node(node);
if (!p_buf->srv_changed) {
- GATT_TRACE_DEBUG ("set srv_changed to TRUE");
+ GATT_TRACE_DEBUG("set srv_changed to TRUE");
p_buf->srv_changed = TRUE;
+ tGATTS_SRV_CHG_REQ req;
memcpy(&req.srv_chg, p_buf, sizeof(tGATTS_SRV_CHG));
if (gatt_cb.cb_info.p_srv_chg_callback) {
- (*gatt_cb.cb_info.p_srv_chg_callback)(GATTS_SRV_CHG_CMD_UPDATE_CLIENT, &req, NULL);
- }
+ (*gatt_cb.cb_info.p_srv_chg_callback)(GATTS_SRV_CHG_CMD_UPDATE_CLIENT,&req, NULL);
+ }
}
- p_buf = (tGATTS_SRV_CHG *)GKI_getnext(p_buf);
}
}
*******************************************************************************/
tGATTS_PENDING_NEW_SRV_START *gatt_sr_is_new_srv_chg(tBT_UUID *p_app_uuid128, tBT_UUID *p_svc_uuid, UINT16 svc_inst)
{
- tGATTS_HNDL_RANGE *p;
- tGATTS_PENDING_NEW_SRV_START *p_buf = (tGATTS_PENDING_NEW_SRV_START *)GKI_getfirst(&gatt_cb.pending_new_srv_start_q);
+ tGATTS_PENDING_NEW_SRV_START *p_buf = NULL;
- while (p_buf != NULL) {
- p = p_buf->p_new_srv_start;
- if ( gatt_uuid_compare (*p_app_uuid128, p->app_uuid128)
- && gatt_uuid_compare (*p_svc_uuid, p->svc_uuid)
- && (svc_inst == p->svc_inst) ) {
- GATT_TRACE_DEBUG ("gatt_sr_is_new_srv_chg: Yes");
+ if (fixed_queue_is_empty(gatt_cb.pending_new_srv_start_q)) {
+ return NULL;
+ }
+
+ list_t *list = fixed_queue_get_list(gatt_cb.pending_new_srv_start_q);
+ for (const list_node_t *node = list_begin(list); node != list_end(list);
+ node = list_next(node)) {
+ p_buf = (tGATTS_PENDING_NEW_SRV_START *)list_node(node);
+ tGATTS_HNDL_RANGE *p = p_buf->p_new_srv_start;
+ if (gatt_uuid_compare(*p_app_uuid128, p->app_uuid128)
+ && gatt_uuid_compare (*p_svc_uuid, p->svc_uuid)
+ && (svc_inst == p->svc_inst)) {
+ GATT_TRACE_DEBUG("gatt_sr_is_new_srv_chg: Yes");
break;
}
- p_buf = (tGATTS_PENDING_NEW_SRV_START *)GKI_getnext(p_buf);
}
return p_buf;
{
tGATT_VALUE *p_buf;
GATT_TRACE_DEBUG ("gatt_add_pending_ind");
- if ((p_buf = (tGATT_VALUE *)GKI_getbuf((UINT16)sizeof(tGATT_VALUE))) != NULL) {
+ if ((p_buf = (tGATT_VALUE *)osi_malloc((UINT16)sizeof(tGATT_VALUE))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a pending indication");
memcpy(p_buf, p_ind, sizeof(tGATT_VALUE));
- GKI_enqueue (&p_tcb->pending_ind_q, p_buf);
+ fixed_queue_enqueue(p_tcb->pending_ind_q, p_buf);
}
return p_buf;
}
tGATTS_PENDING_NEW_SRV_START *p_buf;
GATT_TRACE_DEBUG ("gatt_add_pending_new_srv_start");
- if ((p_buf = (tGATTS_PENDING_NEW_SRV_START *)GKI_getbuf((UINT16)sizeof(tGATTS_PENDING_NEW_SRV_START))) != NULL) {
+ if ((p_buf = (tGATTS_PENDING_NEW_SRV_START *)osi_malloc((UINT16)sizeof(tGATTS_PENDING_NEW_SRV_START))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a new pending new srv start");
p_buf->p_new_srv_start = p_new_srv_start;
- GKI_enqueue (&gatt_cb.pending_new_srv_start_q, p_buf);
+ fixed_queue_enqueue(gatt_cb.pending_new_srv_start_q, p_buf);
}
return p_buf;
}
{
tGATTS_SRV_CHG *p_buf;
GATT_TRACE_DEBUG ("gatt_add_srv_chg_clt");
- if ((p_buf = (tGATTS_SRV_CHG *)GKI_getbuf((UINT16)sizeof(tGATTS_SRV_CHG))) != NULL) {
+ if ((p_buf = (tGATTS_SRV_CHG *)osi_malloc((UINT16)sizeof(tGATTS_SRV_CHG))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a srv chg client");
memcpy(p_buf, p_srv_chg, sizeof(tGATTS_SRV_CHG));
- GKI_enqueue (&gatt_cb.srv_chg_clt_q, p_buf);
+ fixed_queue_enqueue(gatt_cb.srv_chg_clt_q, p_buf);
}
return p_buf;
if (!p_cb->hdl_list[i].in_use) {
memset(p_elem, 0, sizeof(tGATT_HDL_LIST_ELEM));
p_elem->in_use = TRUE;
+ p_elem->svc_db.svc_buffer = fixed_queue_new(SIZE_MAX);
return p_elem;
}
}
if (p_attr->mask & GATT_ATTR_VALUE_ALLOCATED){
p_value = p_attr->p_value;
if ((p_value != NULL) && (p_value->attr_val.attr_val != NULL)){
- GKI_freebuf(p_value->attr_val.attr_val);
+ osi_free(p_value->attr_val.attr_val);
}
}
p_attr = p_attr->p_next;
{
if (p) {
- while (!GKI_queue_is_empty(&p->svc_db.svc_buffer)) {
- GKI_freebuf (GKI_dequeue (&p->svc_db.svc_buffer));
- }
+ while (!fixed_queue_is_empty(p->svc_db.svc_buffer)) {
+ osi_free(fixed_queue_try_dequeue(p->svc_db.svc_buffer));
+ }
+ fixed_queue_free(p->svc_db.svc_buffer, NULL);
memset(p, 0, sizeof(tGATT_HDL_LIST_ELEM));
}
}
for (i = 0; i < GATT_MAX_SR_PROFILES; i ++, p_elem ++) {
if (memcmp(p_app_id, &p_elem->asgn_range.app_uuid128, sizeof(tBT_UUID)) == 0) {
gatt_free_attr_value_buffer(p_elem);
- while (!GKI_queue_is_empty(&p_elem->svc_db.svc_buffer)) {
- GKI_freebuf (GKI_dequeue (&p_elem->svc_db.svc_buffer));
- }
+ while (!fixed_queue_is_empty(p_elem->svc_db.svc_buffer)) {
+ osi_free(fixed_queue_try_dequeue(p_elem->svc_db.svc_buffer));
+ }
+ fixed_queue_free(p_elem->svc_db.svc_buffer, NULL);
+ p_elem->svc_db.svc_buffer = NULL;
p_elem->svc_db.mem_free = 0;
p_elem->svc_db.p_attr_list = p_elem->svc_db.p_free_mem = NULL;
*******************************************************************************/
BOOLEAN gatt_is_srv_chg_ind_pending (tGATT_TCB *p_tcb)
{
- tGATT_VALUE *p_buf = (tGATT_VALUE *)GKI_getfirst(&p_tcb->pending_ind_q);
BOOLEAN srv_chg_ind_pending = FALSE;
- GATT_TRACE_DEBUG("gatt_is_srv_chg_ind_pending is_queue_empty=%d", GKI_queue_is_empty(&p_tcb->pending_ind_q) );
+ GATT_TRACE_DEBUG("gatt_is_srv_chg_ind_pending is_queue_empty=%d",
+ fixed_queue_is_empty(p_tcb->pending_ind_q));
if (p_tcb->indicate_handle == gatt_cb.handle_of_h_r) {
srv_chg_ind_pending = TRUE;
- } else {
- while (p_buf) {
- if (p_buf->handle == gatt_cb.handle_of_h_r) {
+ } else if (! fixed_queue_is_empty(p_tcb->pending_ind_q)) {
+ list_t *list = fixed_queue_get_list(p_tcb->pending_ind_q);
+ for (const list_node_t *node = list_begin(list);
+ node != list_end(list);
+ node = list_next(node)) {
+ tGATT_VALUE *p_buf = (tGATT_VALUE *)list_node(node);
+ if (p_buf->handle == gatt_cb.handle_of_h_r)
+ {
srv_chg_ind_pending = TRUE;
break;
}
- p_buf = (tGATT_VALUE *)GKI_getnext(p_buf);
}
}
*******************************************************************************/
tGATTS_SRV_CHG *gatt_is_bda_in_the_srv_chg_clt_list (BD_ADDR bda)
{
- tGATTS_SRV_CHG *p_buf = (tGATTS_SRV_CHG *)GKI_getfirst(&gatt_cb.srv_chg_clt_q);
+ tGATTS_SRV_CHG *p_buf = NULL;
GATT_TRACE_DEBUG("gatt_is_bda_in_the_srv_chg_clt_list :%02x-%02x-%02x-%02x-%02x-%02x",
bda[0], bda[1], bda[2], bda[3], bda[4], bda[5]);
- while (p_buf != NULL) {
+ if (fixed_queue_is_empty(gatt_cb.srv_chg_clt_q)) {
+ return NULL;
+ }
+
+ list_t *list = fixed_queue_get_list(gatt_cb.srv_chg_clt_q);
+ for (const list_node_t *node = list_begin(list); node != list_end(list);
+ node = list_next(node)) {
+ tGATTS_SRV_CHG *p_buf = (tGATTS_SRV_CHG *)list_node(node);
if (!memcmp( bda, p_buf->bda, BD_ADDR_LEN)) {
GATT_TRACE_DEBUG("bda is in the srv chg clt list");
break;
}
- p_buf = (tGATTS_SRV_CHG *)GKI_getnext(p_buf);
}
return p_buf;
if (allocated) {
memset(p_tcb, 0, sizeof(tGATT_TCB));
- GKI_init_q (&p_tcb->pending_enc_clcb);
- GKI_init_q (&p_tcb->pending_ind_q);
+ p_tcb->pending_enc_clcb = fixed_queue_new(SIZE_MAX);
+ p_tcb->pending_ind_q = fixed_queue_new(SIZE_MAX);
p_tcb->in_use = TRUE;
p_tcb->tcb_idx = i;
p_tcb->transport = transport;
p_sreg->e_hdl = p_list->asgn_range.e_handle;
p_sreg->p_db = &p_list->svc_db;
- GATT_TRACE_DEBUG ("total GKI buffer in db [%d]", GKI_queue_length(&p_sreg->p_db->svc_buffer));
+ GATT_TRACE_DEBUG ("total buffer in db [%d]", fixed_queue_length(p_sreg->p_db->svc_buffer));
break;
}
}
}
if (p_clcb->p_attr_buf) {
- GKI_freebuf(p_clcb->p_attr_buf);
+ osi_free(p_clcb->p_attr_buf);
}
operation = p_clcb->operation;
gatt_free_pending_ind(p_tcb);
gatt_free_pending_enc_queue(p_tcb);
gatt_free_pending_prepare_write_queue(p_tcb);
+ fixed_queue_free(p_tcb->sr_cmd.multi_rsp_q, osi_free_func);
+ p_tcb->sr_cmd.multi_rsp_q = NULL;
for (i = 0; i < GATT_MAX_APPS; i ++) {
p_reg = &gatt_cb.cl_rcb[i];
tGATT_PENDING_ENC_CLCB *p_buf;
GATT_TRACE_DEBUG ("gatt_add_pending_new_srv_start");
- if ((p_buf = (tGATT_PENDING_ENC_CLCB *)GKI_getbuf((UINT16)sizeof(tGATT_PENDING_ENC_CLCB))) != NULL) {
+ if ((p_buf = (tGATT_PENDING_ENC_CLCB *)osi_malloc((UINT16)sizeof(tGATT_PENDING_ENC_CLCB))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a new pending encryption channel clcb");
p_buf->p_clcb = p_clcb;
- GKI_enqueue (&p_tcb->pending_enc_clcb, p_buf);
+ fixed_queue_enqueue(p_tcb->pending_enc_clcb, p_buf);
}
return p_buf;
}
#define GATT_INT_H
#include "bt_target.h"
-
-
#include "bt_trace.h"
#include "gatt_api.h"
#include "btm_ble_api.h"
#include "btu.h"
+#include "fixed_queue.h"
#include <string.h>
typedef struct {
void *p_attr_list; /* pointer to the first attribute, either tGATT_ATTR16 or tGATT_ATTR128 */
UINT8 *p_free_mem; /* Pointer to free memory */
- BUFFER_Q svc_buffer; /* buffer queue used for service database */
+ fixed_queue_t *svc_buffer; /* buffer queue used for service database */
UINT32 mem_free; /* Memory still available */
UINT16 end_handle; /* Last handle number */
UINT16 next_handle; /* Next usable handle value */
BT_HDR *p_rsp_msg;
UINT32 trans_id;
tGATT_READ_MULTI multi_req;
- BUFFER_Q multi_rsp_q;
+ fixed_queue_t *multi_rsp_q;
UINT16 handle;
UINT8 op_code;
UINT8 status;
typedef struct{
//only store prepare write packets which need
//to be responded by stack (not by application)
- BUFFER_Q queue;
+ fixed_queue_t *queue;
//store the total number of prepare write packets
//including that should be responded by stack or by application
}tGATT_PREPARE_WRITE_RECORD;
typedef struct {
- BUFFER_Q pending_enc_clcb; /* pending encryption channel q */
+ fixed_queue_t *pending_enc_clcb; /* pending encryption channel q */
tGATT_SEC_ACTION sec_act;
BD_ADDR peer_bda;
tBT_TRANSPORT transport;
tGATT_SR_CMD sr_cmd;
#endif ///GATTS_INCLUDED == TRUE
UINT16 indicate_handle;
- BUFFER_Q pending_ind_q;
+ fixed_queue_t *pending_ind_q;
TIMER_LIST_ENT conf_timer_ent; /* peer confirm to indication timer */
typedef struct {
tGATT_TCB tcb[GATT_MAX_PHY_CHANNEL];
- BUFFER_Q sign_op_queue;
+ fixed_queue_t *sign_op_queue;
tGATT_SR_REG sr_reg[GATT_MAX_SR_PROFILES];
UINT16 next_handle; /* next available handle */
tGATT_SRV_LIST_INFO srv_list_info;
tGATT_SRV_LIST_ELEM srv_list[GATT_MAX_SR_PROFILES];
#endif ///GATTS_INCLUDED == TRUE
- BUFFER_Q srv_chg_clt_q; /* service change clients queue */
- BUFFER_Q pending_new_srv_start_q; /* pending new service start queue */
+ fixed_queue_t *srv_chg_clt_q; /* service change clients queue */
+ fixed_queue_t *pending_new_srv_start_q; /* pending new service start queue */
tGATT_REG cl_rcb[GATT_MAX_APPS];
tGATT_CLCB clcb[GATT_CL_MAX_LCB]; /* connection link control block*/
tGATT_SCCB sccb[GATT_MAX_SCCB]; /* sign complete callback function GATT_MAX_SCCB <= GATT_CL_MAX_LCB */
******************************************************************************/
#include "bt_target.h"
-#include "gki.h"
+#include "allocator.h"
#include "hcidefs.h"
#include "hcimsgs.h"
#include "hcidefs.h"
#include <stddef.h>
#include <string.h>
+#define HCI_GET_CMD_BUF(paramlen) ((BT_HDR *)osi_malloc(HCI_CMD_BUF_SIZE))
+
#if (defined BLE_INCLUDED) && (BLE_INCLUDED == TRUE)
BOOLEAN btsnd_hcic_ble_set_local_used_feat (UINT8 feat_set[8])
******************************************************************************/
#include "bt_target.h"
-//#include "btcore/include/counter.h"
-#include "gki.h"
+#include "allocator.h"
#include "hcidefs.h"
#include "hcimsgs.h"
#include "hcidefs.h"
#include "btm_int.h" /* Included for UIPC_* macro definitions */
+#define HCI_GET_CMD_BUF(paramlen) ((BT_HDR *)osi_malloc(HCI_CMD_BUF_SIZE))
+
BOOLEAN btsnd_hcic_inquiry(const LAP inq_lap, UINT8 duration, UINT8 response_cnt)
{
BT_HDR *p;
}
/* Must have room to store BT_HDR + max VSC length + callback pointer */
-#if (HCI_CMD_POOL_BUF_SIZE < 268)
+#if (HCI_CMD_BUF_SIZE < 268)
#error "HCI_CMD_POOL_BUF_SIZE must be larger than 268"
#endif
#include "bt_defs.h"
#include "btm_api.h"
-#include "gki.h"
#include "bt_common_types.h"
#define CHANNEL_MAP_LEN 5
#define BTM_BLE_INT_H
#include "bt_target.h"
-#include "gki.h"
+#include "fixed_queue.h"
#include "hcidefs.h"
#include "btm_ble_api.h"
#include "btm_int.h"
UINT8 white_list_avail_size;
tBTM_BLE_WL_STATE wl_state;
- BUFFER_Q conn_pending_q;
+ fixed_queue_t *conn_pending_q;
tBTM_BLE_CONN_ST conn_state;
/* random address management control block */
UINT8 scan_filter_policy);
void btm_ble_stop_inquiry(void);
void btm_ble_init (void);
+void btm_ble_free (void);
void btm_ble_connected (UINT8 *bda, UINT16 handle, UINT8 enc_mode, UINT8 role, tBLE_ADDR_TYPE addr_type, BOOLEAN addr_matched);
void btm_ble_read_remote_features_complete(UINT8 *p);
void btm_ble_write_adv_enable_complete(UINT8 *p);
#include "bt_defs.h"
#include "bt_target.h"
-#include "gki.h"
#include "hcidefs.h"
#include "rfcdefs.h"
typedef struct {
tBTM_ESCO_INFO esco; /* Current settings */
#if BTM_SCO_HCI_INCLUDED == TRUE
- BUFFER_Q xmit_data_q; /* SCO data transmitting queue */
+ fixed_queue_t *xmit_data_q; /* SCO data transmitting queue */
#endif
tBTM_SCO_CB *p_conn_cb; /* Callback for when connected */
tBTM_SCO_CB *p_disc_cb; /* Callback for when disconnect */
UINT8 busy_level; /* the current busy level */
BOOLEAN is_paging; /* TRUE, if paging is in progess */
BOOLEAN is_inquiry; /* TRUE, if inquiry is in progess */
- BUFFER_Q page_queue;
+ fixed_queue_t *page_queue;
BOOLEAN paging;
BOOLEAN discing;
- BUFFER_Q sec_pending_q; /* pending sequrity requests in tBTM_SEC_QUEUE_ENTRY format */
+ fixed_queue_t *sec_pending_q; /* pending sequrity requests in tBTM_SEC_QUEUE_ENTRY format */
#if (!defined(BT_TRACE_VERBOSE) || (BT_TRACE_VERBOSE == FALSE))
char state_temp_buffer[BTM_STATE_BUFFER_SIZE];
#endif
********************************************
*/
void btm_init (void);
+void btm_free (void);
/* Internal functions provided by btm_inq.c
*******************************************
#define BTU_H
#include "bt_target.h"
-#include "gki.h"
+#include "bt_defs.h"
// HACK(zachoverflow): temporary dark magic
#define BTU_POST_TO_TASK_NO_GOOD_HORRIBLE_HACK 0x1700 // didn't look used in bt_types...here goes nothing
typedef struct {
UINT8 preferred_mode;
UINT8 allowed_modes;
- UINT8 user_rx_pool_id;
- UINT8 user_tx_pool_id;
- UINT8 fcr_rx_pool_id;
- UINT8 fcr_tx_pool_id;
+ UINT16 user_rx_buf_size;
+ UINT16 user_tx_buf_size;
+ UINT16 fcr_rx_buf_size;
+ UINT16 fcr_tx_buf_size;
} tL2CAP_ERTM_INFO;
#define L2CAP_BLE_EXTFEA_MASK 0
#endif
-/* Define a value that tells L2CAP to use the default HCI ACL buffer pool */
-#define L2CAP_DEFAULT_ERM_POOL_ID 0xFF
+/* Define a value that tells L2CAP to use the default HCI ACL buffer size */
+#define L2CAP_INVALID_ERM_BUF_SIZE 0
+
/* Define a value that tells L2CAP to use the default MPS */
#define L2CAP_DEFAULT_ERM_MPS 0x0000
/* To optimize this, it must be a multiplum of the L2CAP PDU length AND match the 3DH5 air
* including the l2cap headers in each packet - to match the latter - the -5 is added
*/
-#define L2CAP_MAX_SDU_LENGTH (GKI_BUF4_SIZE - (L2CAP_MIN_OFFSET + L2CAP_MAX_HEADER_FCS) -5)
+#define L2CAP_MAX_SDU_LENGTH (8080 + 26 - (L2CAP_MIN_OFFSET + 6))
+#define L2CAP_MAX_BUF_SIZE (10240 + 24)
/* Part of L2CAP_MIN_OFFSET that is not part of L2CAP
*/
#ifndef PORTEXT_H
#define PORTEXT_H
-#include "gki.h"
/* Port emulation entity Entry Points */
extern void rfcomm_process_timeout (TIMER_LIST_ENT *p_tle);
#include <stdbool.h>
#include "btm_api.h"
-#include "gki.h"
#include "l2c_api.h"
#include "l2cdefs.h"
#include "list.h"
+#include "fixed_queue.h"
#define L2CAP_MIN_MTU 48 /* Minimum acceptable MTU is 48 bytes */
UINT16 rx_sdu_len; /* Length of the SDU being received */
BT_HDR *p_rx_sdu; /* Buffer holding the SDU being received */
- BUFFER_Q waiting_for_ack_q; /* Buffers sent and waiting for peer to ack */
- BUFFER_Q srej_rcv_hold_q; /* Buffers rcvd but held pending SREJ rsp */
- BUFFER_Q retrans_q; /* Buffers being retransmitted */
+ fixed_queue_t *waiting_for_ack_q; /* Buffers sent and waiting for peer to ack */
+ fixed_queue_t *srej_rcv_hold_q; /* Buffers rcvd but held pending SREJ rsp */
+ fixed_queue_t *retrans_q; /* Buffers being retransmitted */
TIMER_LIST_ENT ack_timer; /* Timer delaying RR */
TIMER_LIST_ENT mon_retrans_timer; /* Timer Monitor or Retransmission */
} tL2C_RCB;
+#ifndef L2CAP_CBB_DEFAULT_DATA_RATE_BUFF_QUOTA
+#define L2CAP_CBB_DEFAULT_DATA_RATE_BUFF_QUOTA 100
+#endif
/* Define a channel control block (CCB). There may be many channel control blocks
** between the same two Bluetooth devices (i.e. on the same link).
** Each CCB has unique local and remote CIDs. All channel control blocks on
tL2CAP_CH_CFG_BITS peer_cfg_bits; /* Store what peer wants to configure */
tL2CAP_CFG_INFO peer_cfg; /* Peer's saved configuration options */
- BUFFER_Q xmit_hold_q; /* Transmit data hold queue */
+ fixed_queue_t *xmit_hold_q; /* Transmit data hold queue */
BOOLEAN cong_sent; /* Set when congested status sent */
UINT16 buff_quota; /* Buffer quota before sending congestion */
UINT8 peer_chnl_mask[L2CAP_FIXED_CHNL_ARRAY_SIZE];
#if (L2CAP_UCD_INCLUDED == TRUE)
UINT16 ucd_mtu; /* peer MTU on UCD */
- BUFFER_Q ucd_out_sec_pending_q; /* Security pending outgoing UCD packet */
- BUFFER_Q ucd_in_sec_pending_q; /* Security pending incoming UCD packet */
+ fixed_queue_t *ucd_out_sec_pending_q; /* Security pending outgoing UCD packet */
+ fixed_queue_t *ucd_in_sec_pending_q; /* Security pending incoming UCD packet */
#endif
BT_HDR *p_hcit_rcv_acl; /* Current HCIT ACL buf being rcvd */
extern void l2c_fcr_proc_tout (tL2C_CCB *p_ccb);
extern void l2c_fcr_proc_ack_tout (tL2C_CCB *p_ccb);
extern void l2c_fcr_send_S_frame (tL2C_CCB *p_ccb, UINT16 function_code, UINT16 pf_bit);
-extern BT_HDR *l2c_fcr_clone_buf (BT_HDR *p_buf, UINT16 new_offset, UINT16 no_of_bytes, UINT8 pool);
+extern BT_HDR *l2c_fcr_clone_buf (BT_HDR *p_buf, UINT16 new_offset, UINT16 no_of_bytes);
extern BOOLEAN l2c_fcr_is_flow_controlled (tL2C_CCB *p_ccb);
extern BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length);
extern void l2c_fcr_start_timer (tL2C_CCB *p_ccb);
#include <string.h>
#include <stdio.h>
#include "bt_trace.h"
-#include "gki.h"
#include "bt_types.h"
#include "hcidefs.h"
#include "hcimsgs.h"
p_ccb->ertm_info = *p_ertm_info;
/* Replace default indicators with the actual default pool */
- if (p_ccb->ertm_info.fcr_rx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.fcr_rx_pool_id = L2CAP_FCR_RX_POOL_ID;
- }
+ if (p_ccb->ertm_info.fcr_rx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.fcr_rx_buf_size = L2CAP_FCR_RX_BUF_SIZE;
- if (p_ccb->ertm_info.fcr_tx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.fcr_tx_pool_id = L2CAP_FCR_TX_POOL_ID;
- }
+ if (p_ccb->ertm_info.fcr_tx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.fcr_tx_buf_size = L2CAP_FCR_TX_BUF_SIZE;
- if (p_ccb->ertm_info.user_rx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.user_rx_pool_id = HCI_ACL_POOL_ID;
- }
+ if (p_ccb->ertm_info.user_rx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.user_rx_buf_size = L2CAP_USER_RX_BUF_SIZE;
- if (p_ccb->ertm_info.user_tx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.user_tx_pool_id = HCI_ACL_POOL_ID;
- }
+ if (p_ccb->ertm_info.user_tx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.user_tx_buf_size = L2CAP_USER_TX_BUF_SIZE;
- p_ccb->max_rx_mtu = GKI_get_pool_bufsize (p_ertm_info->user_rx_pool_id) -
- (L2CAP_MIN_OFFSET + L2CAP_SDU_LEN_OFFSET + L2CAP_FCS_LEN);
+ p_ccb->max_rx_mtu = p_ertm_info->user_rx_buf_size -
+ (L2CAP_MIN_OFFSET + L2CAP_SDU_LEN_OFFSET + L2CAP_FCS_LEN);
}
+
+
/* If link is up, start the L2CAP connection */
if (p_lcb->link_state == LST_CONNECTED) {
l2c_csm_execute (p_ccb, L2CEVT_L2CA_CONNECT_REQ, NULL);
p_ccb->ertm_info = *p_ertm_info;
/* Replace default indicators with the actual default pool */
- if (p_ccb->ertm_info.fcr_rx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.fcr_rx_pool_id = L2CAP_FCR_RX_POOL_ID;
- }
+ if (p_ccb->ertm_info.fcr_rx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.fcr_rx_buf_size = L2CAP_FCR_RX_BUF_SIZE;
- if (p_ccb->ertm_info.fcr_tx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.fcr_tx_pool_id = L2CAP_FCR_TX_POOL_ID;
- }
+ if (p_ccb->ertm_info.fcr_tx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.fcr_tx_buf_size = L2CAP_FCR_TX_BUF_SIZE;
- if (p_ccb->ertm_info.user_rx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.user_rx_pool_id = HCI_ACL_POOL_ID;
- }
+ if (p_ccb->ertm_info.user_rx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.user_rx_buf_size = L2CAP_USER_RX_BUF_SIZE;
- if (p_ccb->ertm_info.user_tx_pool_id == L2CAP_DEFAULT_ERM_POOL_ID) {
- p_ccb->ertm_info.user_tx_pool_id = HCI_ACL_POOL_ID;
- }
+ if (p_ccb->ertm_info.user_tx_buf_size == L2CAP_INVALID_ERM_BUF_SIZE)
+ p_ccb->ertm_info.user_tx_buf_size = L2CAP_USER_TX_BUF_SIZE;
- p_ccb->max_rx_mtu = GKI_get_pool_bufsize (p_ertm_info->user_rx_pool_id) - (L2CAP_MIN_OFFSET + L2CAP_SDU_LEN_OFFSET + L2CAP_FCS_LEN);
+ p_ccb->max_rx_mtu = p_ertm_info->user_rx_buf_size -
+ (L2CAP_MIN_OFFSET + L2CAP_SDU_LEN_OFFSET + L2CAP_FCS_LEN);
}
if (result == L2CAP_CONN_OK) {
if ( (fixed_cid < L2CAP_FIRST_FIXED_CHNL) || (fixed_cid > L2CAP_LAST_FIXED_CHNL)
|| (l2cb.fixed_reg[fixed_cid - L2CAP_FIRST_FIXED_CHNL].pL2CA_FixedData_Cb == NULL) ) {
L2CAP_TRACE_ERROR ("L2CA_SendFixedChnlData() Invalid CID: 0x%04x", fixed_cid);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
// Fail if BT is not yet up
if (!BTM_IsDeviceUp()) {
L2CAP_TRACE_WARNING ("L2CA_SendFixedChnlData(0x%04x) - BTU not ready", fixed_cid);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
/* if link is disconnecting, also report data sending failure */
p_lcb->link_state == LST_DISCONNECTING) {
L2CAP_TRACE_WARNING ("L2CA_SendFixedChnlData(0x%04x) - no LCB", fixed_cid);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
if ((peer_channel_mask & (1 << fixed_cid)) == 0) {
L2CAP_TRACE_WARNING ("L2CA_SendFixedChnlData() - peer does not support fixed chnl: 0x%04x", fixed_cid);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
if (!p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]) {
if (!l2cu_initialize_fixed_ccb (p_lcb, fixed_cid, &l2cb.fixed_reg[fixed_cid - L2CAP_FIRST_FIXED_CHNL].fixed_chnl_opts)) {
L2CAP_TRACE_WARNING ("L2CA_SendFixedChnlData() - no CCB for chnl: 0x%4x", fixed_cid);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
}
if (p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->cong_sent) {
L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested \
xmit_hold_q.count: %u buff_quota: %u", fixed_cid,
- GKI_queue_length(&p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q),
- p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->buff_quota);
- GKI_freebuf (p_buf);
+ fixed_queue_length(p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q),
+ p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->buff_quota);
+ osi_free(p_buf);
return (L2CAP_DW_FAILED);
}
if (num_to_flush != L2CAP_FLUSH_CHANS_GET) {
L2CAP_TRACE_API ("L2CA_FlushChannel (FLUSH) CID: 0x%04x NumToFlush: %d QC: %u pFirst: %p",
- lcid, num_to_flush, GKI_queue_length(&p_ccb->xmit_hold_q), GKI_getfirst(&p_ccb->xmit_hold_q));
+ lcid, num_to_flush,
+ fixed_queue_length(p_ccb->xmit_hold_q),
+ fixed_queue_try_peek_first(p_ccb->xmit_hold_q));
} else {
L2CAP_TRACE_API ("L2CA_FlushChannel (QUERY) CID: 0x%04x", lcid);
}
num_flushed1++;
list_remove(p_lcb->link_xmit_data_q, p_buf);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
}
/* If needed, flush buffers in the CCB xmit hold queue */
- while ( (num_to_flush != 0) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))) {
- BT_HDR *p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
+ while ( (num_to_flush != 0) && (!fixed_queue_is_empty(p_ccb->xmit_hold_q))) {
+ BT_HDR *p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
if (p_buf) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
num_to_flush--;
num_flushed2++;
}
/* Add in the number in the CCB xmit queue */
- num_left += GKI_queue_length(&p_ccb->xmit_hold_q);
+ num_left += fixed_queue_length(p_ccb->xmit_hold_q);
/* Return the local number of buffers left for the CID */
L2CAP_TRACE_DEBUG ("L2CA_FlushChannel() flushed: %u + %u, num_left: %u", num_flushed1, num_flushed2, num_left);
#include <stdio.h>
#include "bt_target.h"
-#include "gki.h"
#include "hcidefs.h"
#include "hcimsgs.h"
#include "l2cdefs.h"
#include "btm_int.h"
#include "btu.h"
#include "hcimsgs.h"
+#include "allocator.h"
+
#if (CLASSIC_BT_INCLUDED == TRUE)
/********************************************************************************/
/* L O C A L F U N C T I O N P R O T O T Y P E S */
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
case L2CEVT_L2CA_DISCONNECT_REQ: /* Upper wants to disconnect */
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
case L2CEVT_L2CA_DISCONNECT_REQ: /* Upper wants to disconnect */
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
case L2CEVT_L2CA_DISCONNECT_REQ: /* Upper wants to disconnect */
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
case L2CEVT_L2CAP_INFO_RSP:
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
case L2CEVT_L2CA_DISCONNECT_REQ: /* Upper wants to disconnect */
}
#if (L2CAP_ERTM_STATS == TRUE)
- p_ccb->fcrb.connect_tick_count = GKI_get_os_tick_count();
+ p_ccb->fcrb.connect_tick_count = osi_time_get_os_boottime_ms();
#endif
/* See if we can forward anything on the hold queue */
- if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
+ if (!fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
}
}
}
#if (L2CAP_ERTM_STATS == TRUE)
- p_ccb->fcrb.connect_tick_count = GKI_get_os_tick_count();
+ p_ccb->fcrb.connect_tick_count = osi_time_get_os_boottime_ms();
#endif
/* See if we can forward anything on the hold queue */
- if ( (p_ccb->chnl_state == CST_OPEN) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))) {
+ if ( (p_ccb->chnl_state == CST_OPEN) &&
+ (!fixed_queue_is_empty(p_ccb->xmit_hold_q))) {
l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
}
break;
(*l2cb.fixed_reg[p_ccb->local_cid - L2CAP_FIRST_FIXED_CHNL].pL2CA_FixedData_Cb)
(p_ccb->local_cid, p_ccb->p_lcb->remote_bd_addr, (BT_HDR *)p_data);
else {
- GKI_freebuf (p_data);
+ osi_free (p_data);
}
break;
}
if (p_ccb->config_done & OB_CFG_DONE) {
l2c_enqueue_peer_data (p_ccb, (BT_HDR *)p_data);
} else {
- GKI_freebuf (p_data);
+ osi_free (p_data);
}
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
}
}
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
}
}
UINT16_TO_STREAM (p, p_ccb->remote_cid);
}
- GKI_enqueue (&p_ccb->xmit_hold_q, p_buf);
+ fixed_queue_enqueue(p_ccb->xmit_hold_q, p_buf);
l2cu_check_channel_congestion (p_ccb);
#include <string.h>
#include "bt_trace.h"
#include "bt_types.h"
-#include "gki.h"
#include "hcimsgs.h"
#include "l2c_api.h"
#include "l2c_int.h"
#include "btm_api.h"
#include "btm_int.h"
#include "btu.h"
+#include "allocator.h"
#if (CLASSIC_BT_INCLUDED == TRUE)
l2c_fcr_stop_timer (p_ccb);
if (p_fcrb->p_rx_sdu) {
- GKI_freebuf (p_fcrb->p_rx_sdu);
+ osi_free(p_fcrb->p_rx_sdu);
+ p_fcrb->p_rx_sdu = NULL;
}
- while (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) {
- GKI_freebuf (GKI_dequeue (&p_fcrb->waiting_for_ack_q));
- }
- while (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q)) {
- GKI_freebuf (GKI_dequeue (&p_fcrb->srej_rcv_hold_q));
- }
+ fixed_queue_free(p_fcrb->waiting_for_ack_q, osi_free_func);
+ p_fcrb->waiting_for_ack_q = NULL;
- while (!GKI_queue_is_empty(&p_fcrb->retrans_q)) {
- GKI_freebuf (GKI_dequeue (&p_fcrb->retrans_q));
- }
+ fixed_queue_free(p_fcrb->srej_rcv_hold_q, osi_free_func);
+ p_fcrb->srej_rcv_hold_q = NULL;
+ fixed_queue_free(p_fcrb->retrans_q, osi_free_func);
+ p_fcrb->retrans_q = NULL;
+
btu_stop_quick_timer (&p_fcrb->ack_timer);
btu_stop_quick_timer (&p_ccb->fcrb.mon_retrans_timer);
#if (L2CAP_ERTM_STATS == TRUE)
if ( (p_ccb->local_cid >= L2CAP_BASE_APPL_CID) && (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_ERTM_MODE) ) {
- UINT32 dur = GKI_get_os_tick_count() - p_ccb->fcrb.connect_tick_count;
- char *p_str = (char *)GKI_getbuf(120);
+ UINT32 dur = osi_time_get_os_boottime_ms() - p_ccb->fcrb.connect_tick_count;
+ char *p_str = (char *)osi_malloc(120);
UINT16 i;
UINT32 throughput_avg, ack_delay_avg, ack_q_count_avg;
"throughput_avg: %8u (kbytes/sec), ack_delay_avg: %8u ms, ack_q_count_avg: %8u",
throughput_avg, ack_delay_avg, ack_q_count_avg );
- GKI_freebuf(p_str);
+ osi_free(p_str);
}
BT_TRACE(TRACE_CTRL_GENERAL | TRACE_LAYER_GKI | TRACE_ORG_GKI , TRACE_TYPE_GENERIC,
** Returns pointer to new buffer
**
*******************************************************************************/
-BT_HDR *l2c_fcr_clone_buf (BT_HDR *p_buf, UINT16 new_offset, UINT16 no_of_bytes, UINT8 pool)
+BT_HDR *l2c_fcr_clone_buf (BT_HDR *p_buf, UINT16 new_offset, UINT16 no_of_bytes)
{
assert(p_buf != NULL);
- BT_HDR *p_buf2;
-
- /* If using the common pool, should be at least 10% free. */
- if ( (pool == HCI_ACL_POOL_ID) && (GKI_poolutilization (pool) > 90) ) {
- L2CAP_TRACE_ERROR ("L2CAP - failed to clone buffer on HCI_ACL_POOL_ID Utilization: %u", GKI_poolutilization(pool));
- return (NULL);
- }
-
- if ((p_buf2 = (BT_HDR *)GKI_getpoolbuf(pool)) != NULL) {
- UINT16 pool_buf_size = GKI_get_pool_bufsize (pool);
-
- /* Make sure buffer fits into buffer pool */
- if ((no_of_bytes + sizeof(BT_HDR) + new_offset) > pool_buf_size) {
- L2CAP_TRACE_ERROR("##### l2c_fcr_clone_buf (NumBytes %d) -> Exceeds poolsize %d [bytes %d + BT_HDR %d + offset %d]",
- (no_of_bytes + sizeof(BT_HDR) + new_offset),
- pool_buf_size, no_of_bytes, sizeof(BT_HDR),
- new_offset);
-
- GKI_freebuf(p_buf2);
- return (NULL);
- }
-
- p_buf2->offset = new_offset;
- p_buf2->len = no_of_bytes;
+ /*
+ * NOTE: We allocate extra L2CAP_FCS_LEN octets, in case we need to put
+ * the FCS (Frame Check Sequence) at the end of the buffer.
+ */
+ uint16_t buf_size = no_of_bytes + sizeof(BT_HDR) + new_offset + L2CAP_FCS_LEN;
+#if (L2CAP_ERTM_STATS == TRUE)
+ /*
+ * NOTE: If L2CAP_ERTM_STATS is enabled, we need 4 extra octets at the
+ * end for a timestamp at the end of an I-frame.
+ */
+ buf_size += sizeof(uint32_t);
+#endif
+ BT_HDR *p_buf2 = (BT_HDR *)osi_malloc(buf_size);
- memcpy (((UINT8 *)(p_buf2 + 1)) + p_buf2->offset,
- ((UINT8 *)(p_buf + 1)) + p_buf->offset,
- no_of_bytes);
- } else {
- L2CAP_TRACE_ERROR ("L2CAP - failed to clone buffer, Pool: %u Count: %u", pool, GKI_poolfreecount(pool));
- }
+ p_buf2->offset = new_offset;
+ p_buf2->len = no_of_bytes;
+ memcpy(((UINT8 *)(p_buf2 + 1)) + p_buf2->offset,
+ ((UINT8 *)(p_buf + 1)) + p_buf->offset,
+ no_of_bytes);
return (p_buf2);
}
if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_ERTM_MODE) {
/* Check if remote side flowed us off or the transmit window is full */
if ( (p_ccb->fcrb.remote_busy == TRUE)
- || (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q) >= p_ccb->peer_cfg.fcr.tx_win_sz) ) {
+ || (fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q) >= p_ccb->peer_cfg.fcr.tx_win_sz) ) {
#if (L2CAP_ERTM_STATS == TRUE)
- if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
+ if (!fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
p_ccb->fcrb.xmit_window_closed++;
if ((p_ccb->p_lcb->sent_not_acked < 2) && (l2cb.controller_xmit_window > 0)) {
ctrl_word |= (p_ccb->fcrb.next_seq_expected << L2CAP_FCR_REQ_SEQ_BITS_SHIFT);
ctrl_word |= pf_bit;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (L2CAP_CMD_POOL_ID)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(L2CAP_CMD_BUF_SIZE)) != NULL) {
p_buf->offset = HCI_DATA_PREAMBLE_SIZE;
p_buf->len = L2CAP_PKT_OVERHEAD + L2CAP_FCR_OVERHEAD;
if (p_buf->len < min_pdu_len) {
L2CAP_TRACE_WARNING ("Rx L2CAP PDU: CID: 0x%04x Len too short: %u", p_ccb->local_cid, p_buf->len);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
}
L2CAP_TRACE_EVENT (" eRTM Rx Nxt_tx_seq %u, Lst_rx_ack %u, Nxt_seq_exp %u, Lst_ack_snt %u, wt_q.cnt %u, tries %u",
- p_ccb->fcrb.next_tx_seq, p_ccb->fcrb.last_rx_ack, p_ccb->fcrb.next_seq_expected,
- p_ccb->fcrb.last_ack_sent, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q), p_ccb->fcrb.num_tries);
+ p_ccb->fcrb.next_tx_seq, p_ccb->fcrb.last_rx_ack,
+ p_ccb->fcrb.next_seq_expected,
+ p_ccb->fcrb.last_ack_sent,
+ fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q),
+ p_ccb->fcrb.num_tries);
#endif /* BT_TRACE_VERBOSE */
if (l2c_fcr_rx_get_fcs(p_buf) != fcs) {
L2CAP_TRACE_WARNING ("Rx L2CAP PDU: CID: 0x%04x BAD FCS", p_ccb->local_cid);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
return;
}
}
/* then it speeds up recovery significantly if we poll him back soon after his poll. */
btu_start_quick_timer (&p_ccb->fcrb.mon_retrans_timer, BTU_TTYPE_L2CAP_CHNL, QUICK_TIMER_TICKS_PER_SEC);
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
ctrl_word &= ~L2CAP_FCR_P_BIT;
}
- if (GKI_queue_is_empty(&p_ccb->fcrb.waiting_for_ack_q)) {
+ if (fixed_queue_is_empty(p_ccb->fcrb.waiting_for_ack_q)) {
p_ccb->fcrb.num_tries = 0;
}
/* Process receive sequence number */
if (!process_reqseq (p_ccb, ctrl_word)) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
}
/* If we have some buffers held while doing SREJ, and SREJ has cleared, process them now */
- if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) && (!GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q))) {
- BUFFER_Q temp_q = p_ccb->fcrb.srej_rcv_hold_q;
-
- GKI_init_q (&p_ccb->fcrb.srej_rcv_hold_q);
+ if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) &&
+ (!fixed_queue_is_empty(p_ccb->fcrb.srej_rcv_hold_q))) {
+ fixed_queue_t *temp_q = p_ccb->fcrb.srej_rcv_hold_q;
+ p_ccb->fcrb.srej_rcv_hold_q = fixed_queue_new(SIZE_MAX);
- while ((p_buf = (BT_HDR *)GKI_dequeue (&temp_q)) != NULL) {
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(temp_q)) != NULL) {
if (p_ccb->in_use && (p_ccb->chnl_state == CST_OPEN)) {
/* Get the control word */
p = ((UINT8 *)(p_buf + 1)) + p_buf->offset - L2CAP_FCR_OVERHEAD;
/* Process the SREJ held I-frame, but do not send an RR for each individual frame */
process_i_frame (p_ccb, p_buf, ctrl_word, TRUE);
} else {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
/* If more frames were lost during SREJ, send a REJ */
l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_REJ, 0);
}
}
+ fixed_queue_free(temp_q, NULL);
/* Now, if needed, send one RR for the whole held queue */
if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.rej_sent) && (!p_ccb->fcrb.srej_sent)
}
/* If a window has opened, check if we can send any more packets */
- if ( (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q) || !GKI_queue_is_empty(&p_ccb->xmit_hold_q))
+ if ( (!fixed_queue_is_empty(p_ccb->fcrb.retrans_q) ||
+ !fixed_queue_is_empty(p_ccb->xmit_hold_q))
&& (p_ccb->fcrb.wait_ack == FALSE)
&& (l2c_fcr_is_flow_controlled (p_ccb) == FALSE) ) {
l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
{
assert(p_ccb != NULL);
L2CAP_TRACE_DEBUG ("l2c_fcr_proc_tout: CID: 0x%04x num_tries: %u (max: %u) wait_ack: %u ack_q_count: %u",
- p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit,
- p_ccb->fcrb.wait_ack, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q));
+ p_ccb->local_cid, p_ccb->fcrb.num_tries,
+ p_ccb->peer_cfg.fcr.max_transmit,
+ p_ccb->fcrb.wait_ack,
+ fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q));
#if (L2CAP_ERTM_STATS == TRUE)
p_ccb->fcrb.retrans_touts++;
&& ((ctrl_word & L2CAP_FCR_SUP_BITS) == (L2CAP_FCR_SUP_SREJ << L2CAP_FCR_SUP_SHIFT))
&& ((ctrl_word & L2CAP_FCR_P_BIT) == 0) ) {
/* If anything still waiting for ack, restart the timer if it was stopped */
- if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) {
- l2c_fcr_start_timer (p_ccb);
- }
+ if (!fixed_queue_is_empty(p_fcrb->waiting_for_ack_q)) {
+ l2c_fcr_start_timer(p_ccb);
+ }
return (TRUE);
}
num_bufs_acked = (req_seq - p_fcrb->last_rx_ack) & L2CAP_FCR_SEQ_MODULO;
/* Verify the request sequence is in range before proceeding */
- if (num_bufs_acked > GKI_queue_length(&p_fcrb->waiting_for_ack_q)) {
+ if (num_bufs_acked > fixed_queue_length(p_fcrb->waiting_for_ack_q)) {
/* The channel is closed if ReqSeq is not in range */
L2CAP_TRACE_WARNING ("L2CAP eRTM Frame BAD Req_Seq - ctrl_word: 0x%04x req_seq 0x%02x last_rx_ack: 0x%02x QCount: %u",
- ctrl_word, req_seq, p_fcrb->last_rx_ack, GKI_queue_length(&p_fcrb->waiting_for_ack_q));
+ ctrl_word, req_seq, p_fcrb->last_rx_ack,
+ fixed_queue_length(p_fcrb->waiting_for_ack_q));
l2cu_disconnect_chnl (p_ccb);
return (FALSE);
#endif
for (xx = 0; xx < num_bufs_acked; xx++) {
- ls = ((BT_HDR *)(GKI_getfirst(&p_fcrb->waiting_for_ack_q)))->layer_specific & L2CAP_FCR_SAR_BITS;
+ BT_HDR *p_tmp = (BT_HDR *)fixed_queue_try_dequeue(p_fcrb->waiting_for_ack_q);
+ ls = p_tmp->layer_specific & L2CAP_FCR_SAR_BITS;
if ( (ls == L2CAP_FCR_UNSEG_SDU) || (ls == L2CAP_FCR_END_SDU) ) {
full_sdus_xmitted++;
}
-
- GKI_freebuf (GKI_dequeue (&p_fcrb->waiting_for_ack_q));
+ osi_free(p_tmp);
}
/* If we are still in a wait_ack state, do not mess with the timer */
/* Check if we need to call the "packet_sent" callback */
if ( (p_ccb->p_rcb) && (p_ccb->p_rcb->api.pL2CA_TxComplete_Cb) && (full_sdus_xmitted) ) {
/* Special case for eRTM, if all packets sent, send 0xFFFF */
- if (GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q) && (GKI_queue_is_empty(&p_ccb->xmit_hold_q))) {
+ if (fixed_queue_is_empty(p_fcrb->waiting_for_ack_q) &&
+ fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
full_sdus_xmitted = 0xFFFF;
}
}
/* If anything still waiting for ack, restart the timer if it was stopped */
- if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q)) {
+ if (!fixed_queue_is_empty(p_fcrb->waiting_for_ack_q)) {
l2c_fcr_start_timer (p_ccb);
}
L2CAP_TRACE_DEBUG ("process_s_frame hit_max_retries");
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
/* If we were doing checkpoint recovery, first retransmit all unacked I-frames */
if (ctrl_word & L2CAP_FCR_F_BIT) {
if (!retransmit_i_frames (p_ccb, L2C_FCR_RETX_ALL_PKTS)) {
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
return;
}
}
if ( (tx_seq != p_fcrb->next_seq_expected) && (p_fcrb->local_busy) ) {
L2CAP_TRACE_WARNING ("Dropping bad I-Frame since we flowed off, tx_seq:%u", tx_seq);
l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_RNR, 0);
- GKI_freebuf(p_buf);
- return;
- }
-
- /* If there are no free buffers in the user Rx queue, drop the */
- /* received buffer now before we update any sequence numbers */
- if (GKI_poolfreecount (p_ccb->ertm_info.user_rx_pool_id) == 0) {
- L2CAP_TRACE_WARNING ("L2CAP CID: 0x%04x Dropping I-Frame seq: %u User RX Pool: %u (Size: %u) has no free buffers!!",
- p_ccb->local_cid, tx_seq, p_ccb->ertm_info.user_rx_pool_id,
- GKI_poolcount (p_ccb->ertm_info.user_rx_pool_id));
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
return;
}
if (num_lost >= p_ccb->our_cfg.fcr.tx_win_sz) {
/* Duplicate - simply drop it */
L2CAP_TRACE_WARNING ("process_i_frame() Dropping Duplicate Frame tx_seq:%u ExpectedTxSeq %u", tx_seq, p_fcrb->next_seq_expected);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
} else {
L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x Lost: %u tx_seq:%u ExpTxSeq %u Rej: %u SRej: %u",
p_ccb->local_cid, num_lost, tx_seq, p_fcrb->next_seq_expected, p_fcrb->rej_sent, p_fcrb->srej_sent);
if (p_fcrb->srej_sent) {
/* If SREJ sent, save the frame for later processing as long as it is in sequence */
- next_srej = (((BT_HDR *)GKI_getlast(&p_fcrb->srej_rcv_hold_q))->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO;
+ next_srej = (((BT_HDR *)fixed_queue_try_peek_last(p_fcrb->srej_rcv_hold_q))->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO;
- if ( (tx_seq == next_srej) && (GKI_queue_length(&p_fcrb->srej_rcv_hold_q) < p_ccb->our_cfg.fcr.tx_win_sz) ) {
+ if ( (tx_seq == next_srej) && (fixed_queue_length(p_fcrb->srej_rcv_hold_q) < p_ccb->our_cfg.fcr.tx_win_sz) ) {
/* If user gave us a pool for held rx buffers, use that */
- if (p_ccb->ertm_info.fcr_rx_pool_id != HCI_ACL_POOL_ID) {
+ /* TODO: Could that happen? Get rid of this code. */
+ if (p_ccb->ertm_info.fcr_rx_buf_size != L2CAP_FCR_RX_BUF_SIZE) {
BT_HDR *p_buf2;
/* Adjust offset and len so that control word is copied */
p_buf->offset -= L2CAP_FCR_OVERHEAD;
p_buf->len += L2CAP_FCR_OVERHEAD;
- p_buf2 = l2c_fcr_clone_buf (p_buf, p_buf->offset, p_buf->len, p_ccb->ertm_info.fcr_rx_pool_id);
+ p_buf2 = l2c_fcr_clone_buf (p_buf, p_buf->offset, p_buf->len);
if (p_buf2) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
p_buf = p_buf2;
}
p_buf->offset += L2CAP_FCR_OVERHEAD;
num_lost, tx_seq, p_fcrb->next_seq_expected, p_fcrb->rej_sent);
p_buf->layer_specific = tx_seq;
- GKI_enqueue (&p_fcrb->srej_rcv_hold_q, p_buf);
+ fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf);
} else {
L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x frame dropped in Srej Sent next_srej:%u hold_q.count:%u win_sz:%u",
- p_ccb->local_cid, next_srej, GKI_queue_length(&p_fcrb->srej_rcv_hold_q), p_ccb->our_cfg.fcr.tx_win_sz);
+ p_ccb->local_cid, next_srej, fixed_queue_length(p_fcrb->srej_rcv_hold_q), p_ccb->our_cfg.fcr.tx_win_sz);
p_fcrb->rej_after_srej = TRUE;
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
} else if (p_fcrb->rej_sent) {
L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x Lost: %u tx_seq:%u ExpTxSeq %u Rej: 1 SRej: %u",
p_ccb->local_cid, num_lost, tx_seq, p_fcrb->next_seq_expected, p_fcrb->srej_sent);
/* If REJ sent, just drop the frame */
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
} else {
L2CAP_TRACE_DEBUG ("process_i_frame() CID: 0x%04x tx_seq:%u ExpTxSeq %u Rej: %u",
p_ccb->local_cid, tx_seq, p_fcrb->next_seq_expected, p_fcrb->rej_sent);
/* If only one lost, we will send SREJ, otherwise we will send REJ */
if (num_lost > 1) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
p_fcrb->rej_sent = TRUE;
l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_REJ, 0);
} else {
- if (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q)) {
+ if (!fixed_queue_is_empty(p_fcrb->srej_rcv_hold_q)) {
L2CAP_TRACE_ERROR ("process_i_frame() CID: 0x%04x sending SREJ tx_seq:%d hold_q.count:%u",
- p_ccb->local_cid, tx_seq, GKI_queue_length(&p_fcrb->srej_rcv_hold_q));
+ p_ccb->local_cid, tx_seq, fixed_queue_length(p_fcrb->srej_rcv_hold_q));
}
p_buf->layer_specific = tx_seq;
- GKI_enqueue (&p_fcrb->srej_rcv_hold_q, p_buf);
+ fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf);
p_fcrb->srej_sent = TRUE;
l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_SREJ, 0);
}
btu_start_quick_timer (&p_ccb->fcrb.ack_timer, BTU_TTYPE_L2CAP_FCR_ACK,
(L2CAP_FCR_ACK_TOUT * QUICK_TIMER_TICKS_PER_SEC) / 1000);
}
- } else if ( ((GKI_queue_is_empty(&p_ccb->xmit_hold_q)) || (l2c_fcr_is_flow_controlled (p_ccb)))
- && (GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q))) {
+ } else if ((fixed_queue_is_empty(p_ccb->xmit_hold_q) ||
+ l2c_fcr_is_flow_controlled(p_ccb))
+ && fixed_queue_is_empty(p_ccb->fcrb.srej_rcv_hold_q)) {
if (p_fcrb->local_busy) {
l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_RNR, 0);
} else {
if (l2c_fcr_rx_get_fcs(p_buf) != fcs) {
L2CAP_TRACE_WARNING ("Rx L2CAP PDU: CID: 0x%04x BAD FCS", p_ccb->local_cid);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
return;
}
}
/* Make sure it is an I-frame */
if (ctrl_word & L2CAP_FCR_S_FRAME_BIT) {
L2CAP_TRACE_WARNING ("Rx L2CAP PDU: CID: 0x%04x BAD S-frame in streaming mode ctrl_word: 0x%04x", p_ccb->local_cid, ctrl_word);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
/* Lost one or more packets, so flush the SAR queue */
if (p_ccb->fcrb.p_rx_sdu != NULL) {
- GKI_freebuf (p_ccb->fcrb.p_rx_sdu);
+ osi_free(p_ccb->fcrb.p_rx_sdu);
p_ccb->fcrb.p_rx_sdu = NULL;
}
}
if (!do_sar_reassembly (p_ccb, p_buf, ctrl_word)) {
/* Some sort of SAR error, so flush the SAR queue */
if (p_ccb->fcrb.p_rx_sdu != NULL) {
- GKI_freebuf (p_ccb->fcrb.p_rx_sdu);
+ osi_free(p_ccb->fcrb.p_rx_sdu);
p_ccb->fcrb.p_rx_sdu = NULL;
}
}
if (p_fcrb->rx_sdu_len > p_ccb->max_rx_mtu) {
L2CAP_TRACE_WARNING ("SAR - SDU len: %u larger than MTU: %u", p_fcrb->rx_sdu_len, p_fcrb->rx_sdu_len);
packet_ok = FALSE;
- } else if ((p_fcrb->p_rx_sdu = (BT_HDR *)GKI_getpoolbuf (p_ccb->ertm_info.user_rx_pool_id)) == NULL) {
- L2CAP_TRACE_ERROR ("SAR - no buffer for SDU start user_rx_pool_id:%d", p_ccb->ertm_info.user_rx_pool_id);
+ } else if ((p_fcrb->p_rx_sdu = (BT_HDR *)osi_malloc(L2CAP_MAX_BUF_SIZE)) == NULL) {
+ L2CAP_TRACE_ERROR ("SAR - no buffer for SDU start user_rx_buf_size:%d", p_ccb->ertm_info.user_rx_buf_size);
packet_ok = FALSE;
} else {
p_fcrb->p_rx_sdu->offset = 4; /* this is the minimal offset required by OBX to process incoming packets */
p_fcrb->p_rx_sdu->len += p_buf->len;
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
p_buf = NULL;
if (sar_type == L2CAP_FCR_END_SDU) {
}
if (packet_ok == FALSE) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
} else if (p_buf != NULL) {
#if (L2CAP_NUM_FIXED_CHNLS > 0)
if (p_ccb->local_cid < L2CAP_BASE_APPL_CID &&
{
assert(p_ccb != NULL);
- BT_HDR *p_buf, *p_buf2;
+ BT_HDR *p_buf = NULL;
UINT8 *p;
UINT8 buf_seq;
UINT16 ctrl_word;
- if ( (GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q))
+ if ( (!fixed_queue_is_empty(p_ccb->fcrb.waiting_for_ack_q))
&& (p_ccb->peer_cfg.fcr.max_transmit != 0)
&& (p_ccb->fcrb.num_tries >= p_ccb->peer_cfg.fcr.max_transmit) ) {
L2CAP_TRACE_EVENT ("Max Tries Exceeded: (last_acq: %d CID: 0x%04x num_tries: %u (max: %u) ack_q_count: %u",
p_ccb->fcrb.last_rx_ack, p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit,
- GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q));
+ fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q));
l2cu_disconnect_chnl (p_ccb);
return (FALSE);
}
/* tx_seq indicates whether to retransmit a specific sequence or all (if == L2C_FCR_RETX_ALL_PKTS) */
+ list_t *list_ack = NULL;
+ const list_node_t *node_ack = NULL;
+ if (! fixed_queue_is_empty(p_ccb->fcrb.waiting_for_ack_q)) {
+ list_ack = fixed_queue_get_list(p_ccb->fcrb.waiting_for_ack_q);
+ node_ack = list_begin(list_ack);
+ }
if (tx_seq != L2C_FCR_RETX_ALL_PKTS) {
/* If sending only one, the sequence number tells us which one. Look for it.
*/
- for (p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q); p_buf; p_buf = (BT_HDR *)GKI_getnext (p_buf)) {
- /* Get the old control word */
- p = ((UINT8 *) (p_buf + 1)) + p_buf->offset + L2CAP_PKT_OVERHEAD;
+ if (list_ack != NULL) {
+ for ( ; node_ack != list_end(list_ack); node_ack = list_next(node_ack)) {
+ p_buf = (BT_HDR *)list_node(node_ack);
+ /* Get the old control word */
+ p = ((UINT8 *) (p_buf+1)) + p_buf->offset + L2CAP_PKT_OVERHEAD;
- STREAM_TO_UINT16 (ctrl_word, p);
+ STREAM_TO_UINT16 (ctrl_word, p);
- buf_seq = (ctrl_word & L2CAP_FCR_TX_SEQ_BITS) >> L2CAP_FCR_TX_SEQ_BITS_SHIFT;
+ buf_seq = (ctrl_word & L2CAP_FCR_TX_SEQ_BITS) >> L2CAP_FCR_TX_SEQ_BITS_SHIFT;
- L2CAP_TRACE_DEBUG ("retransmit_i_frames() cur seq: %u looking for: %u", buf_seq, tx_seq);
+ L2CAP_TRACE_DEBUG ("retransmit_i_frames() cur seq: %u looking for: %u", buf_seq, tx_seq);
- if (tx_seq == buf_seq) {
- break;
+ if (tx_seq == buf_seq) {
+ break;
+ }
}
}
if (!p_buf) {
- L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u q_count: %u", tx_seq, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q));
+ L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u q_count: %u",
+ tx_seq,
+ fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q));
return (TRUE);
}
} else {
/* Do not flush other CIDs or partial segments */
if ((p_buf->layer_specific == 0) && (p_buf->event == p_ccb->local_cid)) {
list_remove(p_ccb->p_lcb->link_xmit_data_q, p_buf);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
}
/* Also flush our retransmission queue */
- while (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) {
- GKI_freebuf (GKI_dequeue (&p_ccb->fcrb.retrans_q));
- }
+ while (!fixed_queue_is_empty(p_ccb->fcrb.retrans_q)) {
+ osi_free(fixed_queue_try_dequeue(p_ccb->fcrb.retrans_q));
+ }
- p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q);
+ if (list_ack != NULL) {
+ node_ack = list_begin(list_ack);
+ }
}
- while (p_buf != NULL) {
- p_buf2 = l2c_fcr_clone_buf (p_buf, p_buf->offset, p_buf->len, p_ccb->ertm_info.fcr_tx_pool_id);
+ if (list_ack != NULL) {
+ while (node_ack != list_end(list_ack))
+ {
+ p_buf = (BT_HDR *)list_node(node_ack);
+ node_ack = list_next(node_ack);
- if (p_buf2) {
- p_buf2->layer_specific = p_buf->layer_specific;
+ BT_HDR *p_buf2 = l2c_fcr_clone_buf(p_buf, p_buf->offset, p_buf->len);
+ if (p_buf2)
+ {
+ p_buf2->layer_specific = p_buf->layer_specific;
- GKI_enqueue (&p_ccb->fcrb.retrans_q, p_buf2);
- }
+ fixed_queue_enqueue(p_ccb->fcrb.retrans_q, p_buf2);
+ }
- if ( (tx_seq != L2C_FCR_RETX_ALL_PKTS) || (p_buf2 == NULL) ) {
- break;
- } else {
- p_buf = (BT_HDR *)GKI_getnext (p_buf);
+ if ( (tx_seq != L2C_FCR_RETX_ALL_PKTS) || (p_buf2 == NULL) ) {
+ break;
+ }
}
}
l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
- if (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q)) {
+ if (fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q))
+ {
p_ccb->fcrb.num_tries++;
l2c_fcr_start_timer (p_ccb);
}
/* If there is anything in the retransmit queue, that goes first
*/
- if (GKI_getfirst(&p_ccb->fcrb.retrans_q)) {
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->fcrb.retrans_q);
-
+ p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->fcrb.retrans_q);
+ if (p_buf != NULL) {
/* Update Rx Seq and FCS if we acked some packets while this one was queued */
prepare_I_frame (p_ccb, p_buf, TRUE);
max_pdu = max_packet_length - L2CAP_MAX_HEADER_FCS;
}
- p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->xmit_hold_q);
+ p_buf = (BT_HDR *)fixed_queue_try_peek_first(p_ccb->xmit_hold_q);
/* If there is more data than the MPS, it requires segmentation */
if (p_buf->len > max_pdu) {
/* Get a new buffer and copy the data that can be sent in a PDU */
p_xmit = l2c_fcr_clone_buf (p_buf, L2CAP_MIN_OFFSET + L2CAP_SDU_LEN_OFFSET,
- max_pdu, p_ccb->ertm_info.fcr_tx_pool_id);
+ max_pdu);
if (p_xmit != NULL) {
p_buf->event = p_ccb->local_cid;
/* copy PBF setting */
p_xmit->layer_specific = p_buf->layer_specific;
} else { /* Should never happen if the application has configured buffers correctly */
- L2CAP_TRACE_ERROR ("L2CAP - cannot get buffer, for segmentation, pool: %u", p_ccb->ertm_info.fcr_tx_pool_id);
+ L2CAP_TRACE_ERROR ("L2CAP - cannot get buffer for segmentation, max_pdu: %u", max_pdu);
return (NULL);
}
} else { /* Use the original buffer if no segmentation, or the last segment */
- p_xmit = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
+ p_xmit = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
if (p_xmit->event != 0) {
last_seg = TRUE;
prepare_I_frame (p_ccb, p_xmit, FALSE);
if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_ERTM_MODE) {
- BT_HDR *p_wack = l2c_fcr_clone_buf (p_xmit, HCI_DATA_PREAMBLE_SIZE, p_xmit->len, p_ccb->ertm_info.fcr_tx_pool_id);
+ BT_HDR *p_wack = l2c_fcr_clone_buf (p_xmit, HCI_DATA_PREAMBLE_SIZE, p_xmit->len);
if (!p_wack) {
- L2CAP_TRACE_ERROR ("L2CAP - no buffer for xmit cloning, CID: 0x%04x Pool: %u Count: %u",
- p_ccb->local_cid, p_ccb->ertm_info.fcr_tx_pool_id, GKI_poolfreecount(p_ccb->ertm_info.fcr_tx_pool_id));
+ L2CAP_TRACE_ERROR("L2CAP - no buffer for xmit cloning, CID: 0x%04x Length: %u",
+ p_ccb->local_cid, p_xmit->len);
/* We will not save the FCS in case we reconfigure and change options */
if (p_ccb->bypass_fcs != L2CAP_BYPASS_FCS) {
}
/* Pretend we sent it and it got lost */
- GKI_enqueue (&p_ccb->fcrb.waiting_for_ack_q, p_xmit);
+ fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_xmit);
return (NULL);
} else {
#if (L2CAP_ERTM_STATS == TRUE)
/* set timestamp at the end of tx I-frame to get acking delay */
p = ((UINT8 *) (p_wack + 1)) + p_wack->offset + p_wack->len;
- UINT32_TO_STREAM (p, GKI_get_os_tick_count());
+ UINT32_TO_STREAM (p, osi_time_get_os_boottime_ms());
#endif
/* We will not save the FCS in case we reconfigure and change options */
if (p_ccb->bypass_fcs != L2CAP_BYPASS_FCS) {
}
p_wack->layer_specific = p_xmit->layer_specific;
- GKI_enqueue (&p_ccb->fcrb.waiting_for_ack_q, p_wack);
+ fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_wack);
}
#if (L2CAP_ERTM_STATS == TRUE)
}
}
- max_retrans_size = GKI_get_pool_bufsize (p_ccb->ertm_info.fcr_tx_pool_id) - sizeof(BT_HDR)
+ max_retrans_size = p_ccb->ertm_info.fcr_tx_buf_size - sizeof(BT_HDR)
- L2CAP_MIN_OFFSET - L2CAP_SDU_LEN_OFFSET - L2CAP_FCS_LEN;
/* Ensure the MPS is not bigger than the MTU */
index = p_ccb->fcrb.ack_delay_avg_index;
/* update sum, max and min of waiting for ack queue size */
- p_ccb->fcrb.ack_q_count_avg[index] += p_ccb->fcrb.waiting_for_ack_q.count;
+ p_ccb->fcrb.ack_q_count_avg[index] +=
+ fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q);
- if ( p_ccb->fcrb.waiting_for_ack_q.count > p_ccb->fcrb.ack_q_count_max[index] ) {
- p_ccb->fcrb.ack_q_count_max[index] = p_ccb->fcrb.waiting_for_ack_q.count;
- }
+ if (fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q) > p_ccb->fcrb.ack_q_count_max[index]) {
+ p_ccb->fcrb.ack_q_count_max[index] = fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q);
+ }
- if ( p_ccb->fcrb.waiting_for_ack_q.count < p_ccb->fcrb.ack_q_count_min[index] ) {
- p_ccb->fcrb.ack_q_count_min[index] = p_ccb->fcrb.waiting_for_ack_q.count;
- }
+ if (fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q) < p_ccb->fcrb.ack_q_count_min[index]) {
+ p_ccb->fcrb.ack_q_count_min[index] = fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q);
+ }
/* update sum, max and min of round trip delay of acking */
- p_buf = (BT_HDR *)(p_ccb->fcrb.waiting_for_ack_q.p_first);
- for (xx = 0; (xx < num_bufs_acked) && (p_buf); xx++) {
- /* adding up length of acked I-frames to get throughput */
- p_ccb->fcrb.throughput[index] += p_buf->len - 8;
-
- if ( xx == num_bufs_acked - 1 ) {
- /* get timestamp from tx I-frame that receiver is acking */
- p = ((UINT8 *) (p_buf + 1)) + p_buf->offset + p_buf->len;
- if (p_ccb->bypass_fcs != L2CAP_BYPASS_FCS) {
- p += L2CAP_FCS_LEN;
- }
+ list_t *list = NULL;
+ if (! fixed_queue_is_empty(p_ccb->fcrb.waiting_for_ack_q))
+ list = fixed_queue_get_list(p_ccb->fcrb.waiting_for_ack_q);
+ if (list != NULL) {
+ for (const list_node_t *node = list_begin(list), xx = 0;
+ (node != list_end(list)) && (xx < num_bufs_acked);
+ node = list_next(node), xx++) {
+ p_buf = list_node(node);
+ /* adding up length of acked I-frames to get throughput */
+ p_ccb->fcrb.throughput[index] += p_buf->len - 8;
+
+ if ( xx == num_bufs_acked - 1 ) {
+ /* get timestamp from tx I-frame that receiver is acking */
+ p = ((UINT8 *) (p_buf+1)) + p_buf->offset + p_buf->len;
+ if (p_ccb->bypass_fcs != L2CAP_BYPASS_FCS) {
+ p += L2CAP_FCS_LEN;
+ }
- STREAM_TO_UINT32 (timestamp, p);
- delay = GKI_get_os_tick_count() - timestamp;
+ STREAM_TO_UINT32(timestamp, p);
+ delay = osi_time_get_os_boottime_ms() - timestamp;
- p_ccb->fcrb.ack_delay_avg[index] += delay;
- if ( delay > p_ccb->fcrb.ack_delay_max[index] ) {
- p_ccb->fcrb.ack_delay_max[index] = delay;
- }
- if ( delay < p_ccb->fcrb.ack_delay_min[index] ) {
- p_ccb->fcrb.ack_delay_min[index] = delay;
- }
+ p_ccb->fcrb.ack_delay_avg[index] += delay;
+ if ( delay > p_ccb->fcrb.ack_delay_max[index] ) {
+ p_ccb->fcrb.ack_delay_max[index] = delay;
+ }
+ if ( delay < p_ccb->fcrb.ack_delay_min[index] ) {
+ p_ccb->fcrb.ack_delay_min[index] = delay;
+ }
+ }
}
-
- p_buf = GKI_getnext(p_buf);
}
p_ccb->fcrb.ack_delay_avg_count++;
p_ccb->fcrb.ack_delay_avg[index] /= L2CAP_ERTM_STATS_AVG_NUM_SAMPLES;
/* calculate throughput */
- timestamp = GKI_get_os_tick_count();
+ timestamp = osi_time_get_os_boottime_ms();
if (timestamp - p_ccb->fcrb.throughput_start > 0 ) {
p_ccb->fcrb.throughput[index] /= (timestamp - p_ccb->fcrb.throughput_start);
}
}
}
#endif
-#endif ///CLASSIC_BT_INCLUDED == TRUE
\ No newline at end of file
+#endif ///CLASSIC_BT_INCLUDED == TRUE
#include "controller.h"
//#include "btcore/include/counter.h"
-#include "gki.h"
#include "bt_types.h"
//#include "bt_utils.h"
#include "hcimsgs.h"
while (!list_is_empty(p_lcb->link_xmit_data_q)) {
p_buf = list_front(p_lcb->link_xmit_data_q);
list_remove(p_lcb->link_xmit_data_q, p_buf);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
} else
#endif
*******************************************************************************/
void l2c_link_adjust_chnl_allocation (void)
{
- tL2C_CCB *p_ccb;
UINT8 xx;
- UINT16 weighted_chnls[GKI_NUM_TOTAL_BUF_POOLS];
- UINT16 quota_per_weighted_chnls[GKI_NUM_TOTAL_BUF_POOLS];
- UINT16 reserved_buff[GKI_NUM_TOTAL_BUF_POOLS];
L2CAP_TRACE_DEBUG ("l2c_link_adjust_chnl_allocation");
- /* initialize variables */
- for (xx = 0; xx < GKI_NUM_TOTAL_BUF_POOLS; xx++ ) {
- weighted_chnls[xx] = 0;
- reserved_buff[xx] = 0;
- }
-
- /* add up all of tx and rx data rate requirement */
- /* channel required higher data rate will get more buffer quota */
- for (xx = 0; xx < MAX_L2CAP_CHANNELS; xx++) {
- p_ccb = l2cb.ccb_pool + xx;
-
- if (!p_ccb->in_use) {
- continue;
- }
-
- if (p_ccb->peer_cfg.fcr.mode != L2CAP_FCR_BASIC_MODE) {
- weighted_chnls[p_ccb->ertm_info.user_tx_pool_id] += p_ccb->tx_data_rate;
- weighted_chnls[p_ccb->ertm_info.user_rx_pool_id] += p_ccb->rx_data_rate;
-
- if (p_ccb->ertm_info.fcr_tx_pool_id == HCI_ACL_POOL_ID) {
- /* reserve buffers only for wait_for_ack_q to maximize throughput */
- /* retrans_q will work based on buffer status */
- reserved_buff[HCI_ACL_POOL_ID] += p_ccb->peer_cfg.fcr.tx_win_sz;
- }
-
- if (p_ccb->ertm_info.fcr_rx_pool_id == HCI_ACL_POOL_ID) {
- /* reserve buffers for srej_rcv_hold_q */
- reserved_buff[HCI_ACL_POOL_ID] += p_ccb->peer_cfg.fcr.tx_win_sz;
- }
- } else {
- /* low data rate is 1, medium is 2, high is 3 and no traffic is 0 */
- weighted_chnls[HCI_ACL_POOL_ID] += p_ccb->tx_data_rate + p_ccb->rx_data_rate;
- }
- }
-
-
- /* get unit quota per pool */
- for (xx = 0; xx < GKI_NUM_TOTAL_BUF_POOLS; xx++ ) {
- if ( weighted_chnls[xx] > 0 ) {
- if (GKI_poolcount(xx) > reserved_buff[xx]) {
- quota_per_weighted_chnls[xx] = ((GKI_poolcount(xx) - reserved_buff[xx]) / weighted_chnls[xx]) + 1;
- } else {
- quota_per_weighted_chnls[xx] = 1;
- }
-
- L2CAP_TRACE_DEBUG ("POOL ID:%d, GKI_poolcount = %d, reserved_buff = %d, weighted_chnls = %d, quota_per_weighted_chnls = %d",
- xx, GKI_poolcount(xx), reserved_buff[xx], weighted_chnls[xx], quota_per_weighted_chnls[xx] );
- } else {
- quota_per_weighted_chnls[xx] = 0;
- }
- }
-
-
/* assign buffer quota to each channel based on its data rate requirement */
- for (xx = 0; xx < MAX_L2CAP_CHANNELS; xx++) {
- p_ccb = l2cb.ccb_pool + xx;
+ for (xx = 0; xx < MAX_L2CAP_CHANNELS; xx++)
+ {
+ tL2C_CCB *p_ccb = l2cb.ccb_pool + xx;
if (!p_ccb->in_use) {
continue;
- }
-
- if (p_ccb->peer_cfg.fcr.mode != L2CAP_FCR_BASIC_MODE) {
- p_ccb->buff_quota = quota_per_weighted_chnls[p_ccb->ertm_info.user_tx_pool_id] * p_ccb->tx_data_rate;
+ }
- L2CAP_TRACE_EVENT ("CID:0x%04x FCR Mode:%u UserTxPool:%u Priority:%u TxDataRate:%u Quota:%u",
- p_ccb->local_cid, p_ccb->peer_cfg.fcr.mode, p_ccb->ertm_info.user_tx_pool_id,
- p_ccb->ccb_priority, p_ccb->tx_data_rate, p_ccb->buff_quota);
-
- } else {
- p_ccb->buff_quota = quota_per_weighted_chnls[HCI_ACL_POOL_ID] * p_ccb->tx_data_rate;
-
- L2CAP_TRACE_EVENT ("CID:0x%04x Priority:%u TxDataRate:%u Quota:%u",
- p_ccb->local_cid,
- p_ccb->ccb_priority, p_ccb->tx_data_rate, p_ccb->buff_quota);
- }
+ tL2CAP_CHNL_DATA_RATE data_rate = p_ccb->tx_data_rate + p_ccb->rx_data_rate;
+ p_ccb->buff_quota = L2CAP_CBB_DEFAULT_DATA_RATE_BUFF_QUOTA * data_rate;
+ L2CAP_TRACE_EVENT("CID:0x%04x FCR Mode:%u Priority:%u TxDataRate:%u RxDataRate:%u Quota:%u",
+ p_ccb->local_cid, p_ccb->peer_cfg.fcr.mode,
+ p_ccb->ccb_priority, p_ccb->tx_data_rate,
+ p_ccb->rx_data_rate, p_ccb->buff_quota);
/* quota may be change so check congestion */
l2cu_check_channel_congestion (p_ccb);
*/
if (list_is_empty(p_lcb->link_xmit_data_q)) {
for (p_ccb = p_lcb->ccb_queue.p_first_ccb; p_ccb; p_ccb = p_ccb->p_next_ccb) {
- if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
+ if (!fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
need_to_active = TRUE;
break;
}
/* Find the LCB based on the handle */
if ((p_lcb = l2cu_find_lcb_by_handle (handle)) == NULL) {
L2CAP_TRACE_WARNING ("L2CAP - rcvd segment complete, unknown handle: %d\n", handle);
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return;
}
l2c_link_check_send_pkts (p_lcb, NULL, NULL);
} else {
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
}
#include "bt_target.h"
#include "btm_int.h"
#include "btu.h"
-#include "gki.h"
#include "hcimsgs.h"
#include "l2c_api.h"
#include "l2c_int.h"
/* Ensure we have enough space in the buffer for the L2CAP and HCI headers */
if (p_buf->offset < L2CAP_BCST_MIN_OFFSET) {
L2CAP_TRACE_ERROR ("L2CAP - cannot send buffer, offset: %d", p_buf->offset);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
" opcode:%d cur count:%d", handle, p_msg->layer_specific, rcv_cid,
cmd_code, list_length(l2cb.rcv_pending_q));
}
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return;
}
} else {
L2CAP_TRACE_WARNING ("L2CAP - expected pkt start or complete, got: %d", pkt_type);
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return;
}
if (rcv_cid >= L2CAP_BASE_APPL_CID) {
if ((p_ccb = l2cu_find_ccb_by_cid (p_lcb, rcv_cid)) == NULL) {
L2CAP_TRACE_WARNING ("L2CAP - unknown CID: 0x%04x", rcv_cid);
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return;
}
}
p_msg->offset += L2CAP_PKT_OVERHEAD;
} else {
L2CAP_TRACE_WARNING ("L2CAP - got incorrect hci header" );
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return;
}
L2CAP_TRACE_WARNING ("L2CAP - bad length in pkt. Exp: %d Act: %d",
l2cap_len, p_msg->len);
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return;
}
#if (CLASSIC_BT_INCLUDED == TRUE)
process_l2cap_cmd (p_lcb, p, l2cap_len);
#endif ///CLASSIC_BT_INCLUDED == TRUE
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
} else if (rcv_cid == L2CAP_CONNECTIONLESS_CID) {
//counter_add("l2cap.ch2.rx.bytes", l2cap_len);
//counter_add("l2cap.ch2.rx.pkts", 1);
/* nothing to do */
} else
#endif
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
#if (BLE_INCLUDED == TRUE)
else if (rcv_cid == L2CAP_BLE_SIGNALLING_CID) {
//counter_add("l2cap.ble.rx.bytes", l2cap_len);
//counter_add("l2cap.ble.rx.pkts", 1);
l2cble_process_sig_cmd (p_lcb, p, l2cap_len);
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
#endif
#if (L2CAP_NUM_FIXED_CHNLS > 0)
(*l2cb.fixed_reg[rcv_cid - L2CAP_FIRST_FIXED_CHNL].pL2CA_FixedData_Cb)
(rcv_cid, p_lcb->remote_bd_addr, p_msg);
} else {
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
}
#endif
//counter_add("l2cap.dyn.rx.bytes", l2cap_len);
//counter_add("l2cap.dyn.rx.pkts", 1);
if (p_ccb == NULL) {
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
} else {
/* Basic mode packets go straight to the state machine */
if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE) {
l2c_fcr_proc_pdu (p_ccb, p_msg);
#endif ///CLASSIC_BT_INCLUDED == TRUE
} else {
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
}
}
void l2c_free(void)
{
list_free(l2cb.rcv_pending_q);
+ l2cb.rcv_pending_q = NULL;
}
/*******************************************************************************
/* Find the channel control block. We don't know the link it is on. */
if ((p_ccb = l2cu_find_ccb_by_cid (NULL, cid)) == NULL) {
L2CAP_TRACE_WARNING ("L2CAP - no CCB for L2CA_DataWrite, CID: %d", cid);
- GKI_freebuf (p_data);
+ osi_free (p_data);
return (L2CAP_DW_FAILED);
}
bigger than mtu size of peer is a violation of protocol */
if (p_data->len > p_ccb->peer_cfg.mtu) {
L2CAP_TRACE_WARNING ("L2CAP - CID: 0x%04x cannot send message bigger than peer's mtu size", cid);
- GKI_freebuf (p_data);
+ osi_free (p_data);
return (L2CAP_DW_FAILED);
}
#endif
/* If already congested, do not accept any more packets */
if (p_ccb->cong_sent) {
L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested xmit_hold_q.count: %u buff_quota: %u",
- p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q), p_ccb->buff_quota);
+ p_ccb->local_cid,
+ fixed_queue_length(p_ccb->xmit_hold_q),
+ p_ccb->buff_quota);
- GKI_freebuf (p_data);
+ osi_free (p_data);
return (L2CAP_DW_FAILED);
}
#include <string.h>
//#include <stdio.h>
-#include "gki.h"
#include "bt_types.h"
#include "hcidefs.h"
#include "hcimsgs.h"
if ((p_rcb = l2cu_find_rcb_by_psm (psm)) == NULL) {
L2CAP_TRACE_ERROR ("L2CAP - no RCB for l2c_ucd_data_ind_cback, PSM: 0x%04x", psm);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
} else {
p_rcb->ucd.cb_info.pL2CA_UCD_Data_Cb(rem_bda, p_buf);
}
if (((p_rcb = l2cu_find_rcb_by_psm (psm)) == NULL)
|| ( p_rcb->ucd.state == L2C_UCD_STATE_UNUSED )) {
L2CAP_TRACE_WARNING ("L2CAP - no RCB for L2CA_UcdDataWrite, PSM: 0x%04x", psm);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
if (((p_lcb = l2cu_find_lcb_by_bd_addr (rem_bda, BT_TRANSPORT_BR_EDR)) == NULL)
|| ((p_ccb = l2cu_find_ccb_by_cid (p_lcb, L2CAP_CONNECTIONLESS_CID)) == NULL)) {
if ( l2c_ucd_connect (rem_bda) == FALSE ) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
/* If we still don't have lcb and ccb after connect attempt, then can't proceed */
if (((p_lcb = l2cu_find_lcb_by_bd_addr (rem_bda, BT_TRANSPORT_BR_EDR)) == NULL)
|| ((p_ccb = l2cu_find_ccb_by_cid (p_lcb, L2CAP_CONNECTIONLESS_CID)) == NULL)) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
}
/* UCD MTU check */
if ((p_lcb->ucd_mtu) && (p_buf->len > p_lcb->ucd_mtu)) {
L2CAP_TRACE_WARNING ("L2CAP - Handle: 0x%04x UCD bigger than peer's UCD mtu size cannot be sent", p_lcb->handle);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
if (p_ccb->cong_sent) {
L2CAP_TRACE_ERROR ("L2CAP - Handle: 0x%04x UCD cannot be sent, already congested count: %u buff_quota: %u",
p_lcb->handle,
- (p_ccb->xmit_hold_q.count + p_lcb->ucd_out_sec_pending_q.count),
+ (fixed_queue_length(p_ccb->xmit_hold_q) +
+ fixed_queue_length(p_lcb->ucd_out_sec_pending_q)),
p_ccb->buff_quota);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (L2CAP_DW_FAILED);
}
{
/* clean up any security pending UCD */
while (p_lcb->ucd_out_sec_pending_q.p_first) {
- GKI_freebuf (GKI_dequeue (&p_lcb->ucd_out_sec_pending_q));
- }
-
- while (p_lcb->ucd_in_sec_pending_q.p_first) {
- GKI_freebuf (GKI_dequeue (&p_lcb->ucd_in_sec_pending_q));
- }
+ osi_free(fixed_queue_try_dequeue(p_lcb->ucd_out_sec_pending_q));
+ }
+ fixed_queue_free(p_lcb->ucd_out_sec_pending_q, NULL);
+ p_lcb->ucd_out_sec_pending_q = NULL;
+
+ while (! fixed_queue_is_empty(p_lcb->ucd_in_sec_pending_q)) {
+ osi_free(fixed_queue_try_dequeue(p_lcb->ucd_in_sec_pending_q));
+ }
+ fixed_queue_free(p_lcb->ucd_in_sec_pending_q);
+ p_lcb->ucd_in_sec_pending_q = NULL;
}
/*******************************************************************************
*******************************************************************************/
void l2c_ucd_enqueue_pending_out_sec_q(tL2C_CCB *p_ccb, void *p_data)
{
- GKI_enqueue (&p_ccb->p_lcb->ucd_out_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_out_sec_pending_q, p_data);
l2cu_check_channel_congestion (p_ccb);
}
*******************************************************************************/
BOOLEAN l2c_ucd_check_pending_out_sec_q(tL2C_CCB *p_ccb)
{
- UINT8 *p;
- UINT16 psm;
- BT_HDR *p_buf;
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_peek_first(p_ccb->p_lcb->ucd_out_sec_pending_q);
+
+ if (p_buf != NULL) {
+ UINT16 psm;
+ UINT8 *p = (UINT8 *)(p_buf + 1) + p_buf->offset;
- if ( p_ccb->p_lcb->ucd_out_sec_pending_q.count ) {
- p_buf = (BT_HDR *)(p_ccb->p_lcb->ucd_out_sec_pending_q.p_first);
- p = (UINT8 *)(p_buf + 1) + p_buf->offset;
STREAM_TO_UINT16(psm, p)
p_ccb->chnl_state = CST_ORIG_W4_SEC_COMP;
*******************************************************************************/
void l2c_ucd_send_pending_out_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf;
-
- if ( p_ccb->p_lcb->ucd_out_sec_pending_q.count ) {
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->p_lcb->ucd_out_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q);
+ if (p_buf != NULL) {
l2c_enqueue_peer_data (p_ccb, (BT_HDR *)p_buf);
l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
}
*******************************************************************************/
void l2c_ucd_discard_pending_out_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf;
-
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->p_lcb->ucd_out_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q);
/* we may need to report to application */
if (p_buf) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
}
*******************************************************************************/
BOOLEAN l2c_ucd_check_pending_in_sec_q(tL2C_CCB *p_ccb)
{
- UINT8 *p;
- UINT16 psm;
- BT_HDR *p_buf;
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q);
- if ( p_ccb->p_lcb->ucd_in_sec_pending_q.count ) {
- p_buf = (BT_HDR *)(p_ccb->p_lcb->ucd_in_sec_pending_q.p_first);
- p = (UINT8 *)(p_buf + 1) + p_buf->offset;
+ if (p_buf != NULL) {
+ UINT16 psm;
+ UINT8 *p = (UINT8 *)(p_buf + 1) + p_buf->offset;
STREAM_TO_UINT16(psm, p)
p_ccb->chnl_state = CST_TERM_W4_SEC_COMP;
*******************************************************************************/
void l2c_ucd_send_pending_in_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf;
-
- if ( p_ccb->p_lcb->ucd_in_sec_pending_q.count ) {
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->p_lcb->ucd_in_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q)
+ if (p_buf != NULL) {
p_ccb->p_rcb->ucd.cb_info.pL2CA_UCD_Data_Cb(p_ccb->p_lcb->remote_bd_addr, (BT_HDR *)p_buf);
}
}
*******************************************************************************/
void l2c_ucd_discard_pending_in_sec_q(tL2C_CCB *p_ccb)
{
- BT_HDR *p_buf;
-
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->p_lcb->ucd_in_sec_pending_q);
+ BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q);
if (p_buf) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
}
/* Allocate a channel control block */
if ((p_ccb = l2cu_allocate_ccb (p_lcb, 0)) == NULL) {
L2CAP_TRACE_WARNING ("L2CAP - no CCB for UCD reception");
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
return TRUE;
} else {
/* Set CID for the connection */
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- GKI_enqueue (&p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
break;
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
p_ccb->chnl_state = CST_OPEN;
l2c_ucd_send_pending_out_sec_q(p_ccb);
- if ( p_ccb->p_lcb->ucd_out_sec_pending_q.count ) {
+ if (! fixed_queue_is_empty(p_ccb->p_lcb->ucd_out_sec_pending_q))
+ {
/* start a timer to send next UCD packet in OPEN state */
/* it will prevent stack overflow */
btu_start_timer (&p_ccb->timer_entry, BTU_TTYPE_L2CAP_CHNL, 0);
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- GKI_enqueue (&p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
break;
case L2CEVT_L2CAP_INFO_RSP:
p_ccb->chnl_state = CST_OPEN;
l2c_ucd_send_pending_in_sec_q (p_ccb);
- if ( p_ccb->p_lcb->ucd_in_sec_pending_q.count ) {
+ if (! fixed_queue_is_empty(p_ccb->p_lcb->ucd_in_sec_pending_q)) {
/* start a timer to check next UCD packet in OPEN state */
/* it will prevent stack overflow */
btu_start_timer (&p_ccb->timer_entry, BTU_TTYPE_L2CAP_CHNL, 0);
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
- GKI_enqueue (&p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
break;
case L2CEVT_SEC_RE_SEND_CMD: /* BTM has enough info to proceed */
/* stop idle timer of UCD */
btu_stop_timer (&p_ccb->timer_entry);
- GKI_enqueue (&p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
+ fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
l2c_ucd_check_pending_in_sec_q (p_ccb);
break;
#include <stdlib.h>
#include <string.h>
-//#include <stdio.h>
+#include "allocator.h"
#include "controller.h"
-#include "gki.h"
#include "bt_types.h"
#include "hcimsgs.h"
#include "l2cdefs.h"
/* Release any unfinished L2CAP packet on this link */
if (p_lcb->p_hcit_rcv_acl) {
- GKI_freebuf(p_lcb->p_hcit_rcv_acl);
+ osi_free(p_lcb->p_hcit_rcv_acl);
p_lcb->p_hcit_rcv_acl = NULL;
}
while (!list_is_empty(p_lcb->link_xmit_data_q)) {
BT_HDR *p_buf = list_front(p_lcb->link_xmit_data_q);
list_remove(p_lcb->link_xmit_data_q, p_buf);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
}
list_free(p_lcb->link_xmit_data_q);
p_lcb->link_xmit_data_q = NULL;
*******************************************************************************/
BT_HDR *l2cu_build_header (tL2C_LCB *p_lcb, UINT16 len, UINT8 cmd, UINT8 id)
{
- BT_HDR *p_buf = (BT_HDR *)GKI_getpoolbuf (L2CAP_CMD_POOL_ID);
+ BT_HDR *p_buf = (BT_HDR *)osi_malloc(L2CAP_CMD_BUF_SIZE);
UINT8 *p;
if (!p_buf) {
return;
}
- p_buf = (BT_HDR *)GKI_getbuf (len + rej_len);
+ p_buf = (BT_HDR *)osi_malloc (len + rej_len);
if (!p_buf) {
L2CAP_TRACE_ERROR ("L2CAP - no buffer for cfg_rej");
layer checks that all buffers are sent before disconnecting.
*/
if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE) {
- while (GKI_getfirst(&p_ccb->xmit_hold_q)) {
- p_buf2 = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
+ while ((p_buf2 = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q)) != NULL) {
l2cu_set_acl_hci_header (p_buf2, p_ccb);
l2c_link_check_send_pkts (p_ccb->p_lcb, p_ccb, p_buf2);
}
} else {
p_lcb->cur_echo_id = id;
}
- /* Don't respond if we more than 10% of our buffers are used */
- if (GKI_poolutilization (L2CAP_CMD_POOL_ID) > 10) {
- L2CAP_TRACE_WARNING ("L2CAP gki pool used up to more than 10%%, ignore echo response");
- return;
- }
uint16_t acl_data_size = controller_get_interface()->get_acl_data_size_classic();
uint16_t acl_packet_size = controller_get_interface()->get_acl_packet_size_classic();
/* Don't return data if it does not fit in ACL and L2CAP MTU */
- maxlen = (GKI_get_pool_bufsize(L2CAP_CMD_POOL_ID) > acl_packet_size) ?
- acl_data_size : (UINT16)GKI_get_pool_bufsize(L2CAP_CMD_POOL_ID);
+ maxlen = (L2CAP_CMD_BUF_SIZE > acl_packet_size) ?
+ acl_data_size : (UINT16)L2CAP_CMD_BUF_SIZE;
maxlen -= (UINT16)(BT_HDR_SIZE + HCI_DATA_PREAMBLE_SIZE + L2CAP_PKT_OVERHEAD +
L2CAP_CMD_OVERHEAD + L2CAP_ECHO_RSP_LEN);
#endif ///CLASSIC_BT_INCLUDED == TRUE
p_ccb->ertm_info.preferred_mode = L2CAP_FCR_BASIC_MODE; /* Default mode for channel is basic mode */
p_ccb->ertm_info.allowed_modes = L2CAP_FCR_CHAN_OPT_BASIC; /* Default mode for channel is basic mode */
- p_ccb->ertm_info.fcr_rx_pool_id = L2CAP_FCR_RX_POOL_ID;
- p_ccb->ertm_info.fcr_tx_pool_id = L2CAP_FCR_TX_POOL_ID;
- p_ccb->ertm_info.user_rx_pool_id = HCI_ACL_POOL_ID;
- p_ccb->ertm_info.user_tx_pool_id = HCI_ACL_POOL_ID;
+ p_ccb->ertm_info.fcr_rx_buf_size = L2CAP_FCR_RX_BUF_SIZE;
+ p_ccb->ertm_info.fcr_tx_buf_size = L2CAP_FCR_TX_BUF_SIZE;
+ p_ccb->ertm_info.user_rx_buf_size = L2CAP_USER_RX_BUF_SIZE;
+ p_ccb->ertm_info.user_tx_buf_size = L2CAP_USER_TX_BUF_SIZE;
p_ccb->max_rx_mtu = L2CAP_MTU_SIZE;
- p_ccb->tx_mps = GKI_get_pool_bufsize(HCI_ACL_POOL_ID) - 32;
+ p_ccb->tx_mps = L2CAP_FCR_TX_BUF_SIZE - 32;
- GKI_init_q (&p_ccb->xmit_hold_q);
+ p_ccb->xmit_hold_q = fixed_queue_new(SIZE_MAX);
+ p_ccb->fcrb.srej_rcv_hold_q = fixed_queue_new(SIZE_MAX);
+ p_ccb->fcrb.retrans_q = fixed_queue_new(SIZE_MAX);
+ p_ccb->fcrb.waiting_for_ack_q = fixed_queue_new(SIZE_MAX);
p_ccb->cong_sent = FALSE;
p_ccb->buff_quota = 2; /* This gets set after config */
/* Stop the timer */
btu_stop_timer (&p_ccb->timer_entry);
- while (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
- GKI_freebuf (GKI_dequeue (&p_ccb->xmit_hold_q));
- }
+
+ fixed_queue_free(p_ccb->xmit_hold_q, osi_free_func);
+ p_ccb->xmit_hold_q = NULL;
+
#if (CLASSIC_BT_INCLUDED == TRUE)
l2c_fcr_cleanup (p_ccb);
#endif ///CLASSIC_BT_INCLUDED == TRUE
p_ccb->local_cid = fixed_cid;
p_ccb->remote_cid = fixed_cid;
- GKI_init_q (&p_ccb->xmit_hold_q);
-
p_ccb->is_flushable = FALSE;
p_ccb->timer_entry.param = (TIMER_PARAM_TYPE)p_ccb;
/* Set the FCR parameters. For now, we will use default pools */
p_ccb->our_cfg.fcr = p_ccb->peer_cfg.fcr = *p_fcr;
- p_ccb->ertm_info.fcr_rx_pool_id = HCI_ACL_POOL_ID;
- p_ccb->ertm_info.fcr_tx_pool_id = HCI_ACL_POOL_ID;
- p_ccb->ertm_info.user_rx_pool_id = HCI_ACL_POOL_ID;
- p_ccb->ertm_info.user_tx_pool_id = HCI_ACL_POOL_ID;
+ p_ccb->ertm_info.fcr_rx_buf_size = L2CAP_FCR_RX_BUF_SIZE;
+ p_ccb->ertm_info.fcr_tx_buf_size = L2CAP_FCR_TX_BUF_SIZE;
+ p_ccb->ertm_info.user_rx_buf_size = L2CAP_USER_RX_BUF_SIZE;
+ p_ccb->ertm_info.user_tx_buf_size = L2CAP_USER_TX_BUF_SIZE;
p_ccb->fcrb.max_held_acks = p_fcr->tx_win_sz / 3;
}
}
L2CAP_TRACE_DEBUG("RR scan pri=%d, lcid=0x%04x, q_cout=%d",
- p_ccb->ccb_priority, p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q));
+ p_ccb->ccb_priority, p_ccb->local_cid,
+ fixed_queue_length(p_ccb->xmit_hold_q));
/* store the next serving channel */
/* this channel is the last channel of its priority group */
continue;
}
- if ( GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) {
- if ( GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
+ if (fixed_queue_is_empty(p_ccb->fcrb.retrans_q)) {
+ if (fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
continue;
}
- /* If using the common pool, should be at least 10% free. */
- if ( (p_ccb->ertm_info.fcr_tx_pool_id == HCI_ACL_POOL_ID) && (GKI_poolutilization (HCI_ACL_POOL_ID) > 90) ) {
- continue;
- }
+
#if (CLASSIC_BT_INCLUDED == TRUE)
/* If in eRTM mode, check for window closure */
if ( (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_ERTM_MODE) && (l2c_fcr_is_flow_controlled (p_ccb)) ) {
#endif ///CLASSIC_BT_INCLUDED == TRUE
}
} else {
- if (GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
+ if (fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
continue;
}
}
continue;
}
- if (p_ccb->fcrb.retrans_q.count != 0) {
+ if (!fixed_queue_is_empty(p_ccb->fcrb.retrans_q))
return p_ccb;
}
- if (p_ccb->xmit_hold_q.count == 0) {
- continue;
- }
-
- /* If using the common pool, should be at least 10% free. */
- if ( (p_ccb->ertm_info.fcr_tx_pool_id == HCI_ACL_POOL_ID) && (GKI_poolutilization (HCI_ACL_POOL_ID) > 90) ) {
+ if (fixed_queue_is_empty(p_ccb->xmit_hold_q))
continue;
}
}
/* No more checks needed if sending from the reatransmit queue */
- if (GKI_queue_is_empty(&p_ccb->fcrb.retrans_q)) {
- if (GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
- continue;
- }
-
- /* If using the common pool, should be at least 10% free. */
- if ( (p_ccb->ertm_info.fcr_tx_pool_id == HCI_ACL_POOL_ID) && (GKI_poolutilization (HCI_ACL_POOL_ID) > 90) ) {
+ if (fixed_queue_is_empty(p_ccb->fcrb.retrans_q))
+ {
+ if (fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
continue;
}
/* If in eRTM mode, check for window closure */
#endif ///CLASSIC_BT_INCLUDED == TRUE
} else {
- if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)) {
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
+ if (!fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
+ p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
if (NULL == p_buf) {
L2CAP_TRACE_ERROR("l2cu_get_buffer_to_send: No data to be sent");
return (NULL);
}
} else {
- p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
+ p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
if (NULL == p_buf) {
L2CAP_TRACE_ERROR("l2cu_get_buffer_to_send() #2: No data to be sent");
return (NULL);
*******************************************************************************/
void l2cu_check_channel_congestion (tL2C_CCB *p_ccb)
{
- UINT16 q_count = GKI_queue_length(&p_ccb->xmit_hold_q);
+ size_t q_count = fixed_queue_length(p_ccb->xmit_hold_q);
#if (L2CAP_UCD_INCLUDED == TRUE)
if ( p_ccb->local_cid == L2CAP_CONNECTIONLESS_CID ) {
- q_count += p_ccb->p_lcb->ucd_out_sec_pending_q.count;
+ q_count += fixed_queue_length(p_ccb->p_lcb->ucd_out_sec_pending_q);
}
#endif
/* If the CCB queue limit is subject to a quota, check for congestion */
else if ( p_ccb->p_rcb && p_ccb->local_cid == L2CAP_CONNECTIONLESS_CID ) {
if ( p_ccb->p_rcb->ucd.cb_info.pL2CA_UCD_Congestion_Status_Cb ) {
L2CAP_TRACE_DEBUG ("L2CAP - Calling UCD CongestionStatus_Cb (FALSE), SecPendingQ:%u,XmitQ:%u,Quota:%u",
- p_ccb->p_lcb->ucd_out_sec_pending_q.count,
- p_ccb->xmit_hold_q.count, p_ccb->buff_quota);
+ fixed_queue_length(p_ccb->p_lcb->ucd_out_sec_pending_q),
+ fixed_queue_length(p_ccb->xmit_hold_q),
+ p_ccb->buff_quota);
p_ccb->p_rcb->ucd.cb_info.pL2CA_UCD_Congestion_Status_Cb( p_ccb->p_lcb->remote_bd_addr, FALSE );
}
}
else if ( p_ccb->p_rcb && p_ccb->local_cid == L2CAP_CONNECTIONLESS_CID ) {
if ( p_ccb->p_rcb->ucd.cb_info.pL2CA_UCD_Congestion_Status_Cb ) {
L2CAP_TRACE_DEBUG ("L2CAP - Calling UCD CongestionStatus_Cb (TRUE), SecPendingQ:%u,XmitQ:%u,Quota:%u",
- p_ccb->p_lcb->ucd_out_sec_pending_q.count,
- p_ccb->xmit_hold_q.count, p_ccb->buff_quota);
+ fixed_queue_length(p_ccb->p_lcb->ucd_out_sec_pending_q),
+ fixed_queue_length(p_ccb->xmit_hold_q),
+ p_ccb->buff_quota);
p_ccb->p_rcb->ucd.cb_info.pL2CA_UCD_Congestion_Status_Cb( p_ccb->p_lcb->remote_bd_addr, TRUE );
}
}
#include "bt_trace.h"
#include "bt_defs.h"
#include "bdaddr.h"
-#include "gki.h"
#include "allocator.h"
#include "buffer.h"
#include "list.h"
client->is_congested = false;
for (const list_node_t *node = list_begin(client->outbound_fragments); node != list_end(client->outbound_fragments); node = list_next(node)) {
- GKI_freebuf(list_node(node));
+ osi_free(list_node(node));
}
list_clear(client->outbound_fragments);
// TODO(sharvil): eliminate copy from BT_HDR.
buffer_t *buffer = buffer_new(packet->len);
memcpy(buffer_ptr(buffer), packet->data + packet->offset, packet->len);
- GKI_freebuf(packet);
+ osi_free(packet);
client->callbacks.read_ready(client, buffer, client->context);
buffer_free(buffer);
assert(packet != NULL);
// TODO(sharvil): eliminate copy into BT_HDR.
- BT_HDR *bt_packet = GKI_getbuf(buffer_length(packet) + L2CAP_MIN_OFFSET);
+ BT_HDR *bt_packet = osi_malloc(buffer_length(packet) + L2CAP_MIN_OFFSET);
bt_packet->offset = L2CAP_MIN_OFFSET;
bt_packet->len = buffer_length(packet);
memcpy(bt_packet->data + bt_packet->offset, buffer_ptr(packet), buffer_length(packet));
if (bt_packet->len > 0) {
list_append(client->outbound_fragments, bt_packet);
} else {
- GKI_freebuf(bt_packet);
+ osi_free(bt_packet);
}
break;
}
- BT_HDR *fragment = GKI_getbuf(client->remote_mtu + L2CAP_MIN_OFFSET);
+ BT_HDR *fragment = osi_malloc(client->remote_mtu + L2CAP_MIN_OFFSET);
fragment->offset = L2CAP_MIN_OFFSET;
fragment->len = client->remote_mtu;
memcpy(fragment->data + fragment->offset, bt_packet->data + bt_packet->offset, client->remote_mtu);
#define PORT_INT_H
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
** Define Port Data Transfere control block
*/
typedef struct {
- BUFFER_Q queue; /* Queue of buffers waiting to be sent */
+ fixed_queue_t *queue; /* Queue of buffers waiting to be sent */
BOOLEAN peer_fc; /* TRUE if flow control is set based on peer's request */
BOOLEAN user_fc; /* TRUE if flow control is set based on user's request */
UINT32 queue_size; /* Number of data bytes in the queue */
*/
typedef struct {
TIMER_LIST_ENT tle; /* Timer list entry */
- BUFFER_Q cmd_q; /* Queue for command messages on this mux */
+ fixed_queue_t *cmd_q; /* Queue for command messages on this mux */
UINT8 port_inx[RFCOMM_MAX_DLCI + 1]; /* Array for quick access to */
/* tPORT based on dlci */
BD_ADDR bd_addr; /* BD ADDR of the peer if initiator */
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
#include "port_int.h"
}
if (purge_flags & PORT_PURGE_RXCLEAR) {
- PORT_SCHEDULE_LOCK; /* to prevent missing credit */
+ osi_mutex_global_lock(); /* to prevent missing credit */
- count = GKI_queue_length(&p_port->rx.queue);
+ count = fixed_queue_length(p_port->rx.queue);
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue)) != NULL) {
- GKI_freebuf (p_buf);
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue)) != NULL) {
+ osi_free (p_buf);
}
p_port->rx.queue_size = 0;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
/* If we flowed controlled peer based on rx_queue size enable data again */
if (count) {
}
if (purge_flags & PORT_PURGE_TXCLEAR) {
- PORT_SCHEDULE_LOCK; /* to prevent tx.queue_size from being negative */
+ osi_mutex_global_lock(); /* to prevent tx.queue_size from being negative */
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->tx.queue)) != NULL) {
- GKI_freebuf (p_buf);
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
+ osi_free (p_buf);
}
p_port->tx.queue_size = 0;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
events = PORT_EV_TXEMPTY;
return (PORT_LINE_ERR);
}
- p_buf = (BT_HDR *)GKI_getfirst (&p_port->rx.queue);
+ if (fixed_queue_is_empty(p_port->rx.queue))
if (!p_buf) {
return (PORT_SUCCESS);
}
count = 0;
- while (max_len && p_buf) {
+ while (max_len)
+ {
+ p_buf = (BT_HDR *)fixed_queue_try_peek_first(p_port->rx.queue);
+ if (p_buf == NULL)
+ break;
+
if (p_buf->len > max_len) {
memcpy (p_data, (UINT8 *)(p_buf + 1) + p_buf->offset, max_len);
p_buf->offset += max_len;
*p_len += max_len;
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
p_port->rx.queue_size -= max_len;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
break;
} else {
*p_len += p_buf->len;
max_len -= p_buf->len;
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
p_port->rx.queue_size -= p_buf->len;
if (max_len) {
p_data += p_buf->len;
- p_buf = (BT_HDR *)GKI_getnext (p_buf);
}
- GKI_freebuf (GKI_dequeue (&p_port->rx.queue));
+ osi_free(fixed_queue_try_dequeue(p_port->rx.queue));
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
count++;
}
return (PORT_LINE_ERR);
}
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
- p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue);
+ p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue);
if (p_buf) {
p_port->rx.queue_size -= p_buf->len;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
/* If rfcomm suspended traffic from the peer based on the rx_queue_size */
/* check if it can be resumed now */
port_flow_control_peer (p_port, TRUE, 1);
} else {
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
}
*pp_buf = p_buf;
{
/* We should not allow to write data in to server port when connection is not opened */
if (p_port->is_server && (p_port->rfc.state != RFC_STATE_OPENED)) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (PORT_CLOSED);
}
|| ((p_port->port_ctrl & (PORT_CTRL_REQ_SENT | PORT_CTRL_IND_RECEIVED)) !=
(PORT_CTRL_REQ_SENT | PORT_CTRL_IND_RECEIVED))) {
if ((p_port->tx.queue_size > PORT_TX_CRITICAL_WM)
- || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_CRITICAL_WM)) {
+ || (fixed_queue_length(p_port->tx.queue) > PORT_TX_BUF_CRITICAL_WM))
RFCOMM_TRACE_WARNING ("PORT_Write: Queue size: %d",
p_port->tx.queue_size);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
if ((p_port->p_callback != NULL) && (p_port->ev_mask & PORT_EV_ERR)) {
p_port->p_callback (PORT_EV_ERR, p_port->inx);
p_port->rfc.state,
p_port->port_ctrl);
- GKI_enqueue (&p_port->tx.queue, p_buf);
+ fixed_queue_enqueue(p_port->tx.queue, p_buf);
p_port->tx.queue_size += p_buf->len;
return (PORT_CMD_PENDING);
/* Check if handle is valid to avoid crashing */
if ((handle == 0) || (handle > MAX_RFC_PORTS)) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (PORT_BAD_HANDLE);
}
p_port = &rfc_cb.port.port[handle - 1];
if (!p_port->in_use || (p_port->state == PORT_STATE_CLOSED)) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (PORT_NOT_OPENED);
}
if (p_port->line_status) {
RFCOMM_TRACE_WARNING ("PORT_Write: Data dropped line_status:0x%x",
p_port->line_status);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return (PORT_LINE_ERR);
}
return PORT_SUCCESS;
}
/* Length for each buffer is the smaller of GKI buffer, peer MTU, or max_len */
- length = RFCOMM_DATA_POOL_BUF_SIZE -
+ length = RFCOMM_DATA_BUF_SIZE -
(UINT16)(sizeof(BT_HDR) + L2CAP_MIN_OFFSET + RFCOMM_DATA_OVERHEAD);
/* If there are buffers scheduled for transmission check if requested */
/* data fits into the end of the queue */
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
- if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL)
+ if (((p_buf = (BT_HDR *)fixed_queue_try_peek_last(p_port->tx.queue)) != NULL)
&& (((int)p_buf->len + available) <= (int)p_port->peer_mtu)
&& (((int)p_buf->len + available) <= (int)length)) {
//if(recv(fd, (UINT8 *)(p_buf + 1) + p_buf->offset + p_buf->len, available, 0) != available)
{
RFCOMM_TRACE_ERROR("p_data_co_callback DATA_CO_CALLBACK_TYPE_OUTGOING failed, available:%d", available);
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
return (PORT_UNKNOWN_ERROR);
}
//memcpy ((UINT8 *)(p_buf + 1) + p_buf->offset + p_buf->len, p_data, max_len);
*p_len = available;
p_buf->len += (UINT16)available;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
return (PORT_SUCCESS);
}
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
//int max_read = length < p_port->peer_mtu ? length : p_port->peer_mtu;
while (available) {
/* if we're over buffer high water mark, we're done */
if ((p_port->tx.queue_size > PORT_TX_HIGH_WM)
- || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
+ || (fixed_queue_length(p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
port_flow_control_user(p_port);
event |= PORT_EV_FC;
RFCOMM_TRACE_EVENT ("tx queue is full,tx.queue_size:%d,tx.queue.count:%d,available:%d",
- p_port->tx.queue_size, GKI_queue_length(&p_port->tx.queue), available);
+ p_port->tx.queue_size, fixed_queue_length(p_port->tx.queue), available);
break;
}
/* continue with rfcomm data write */
- p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_DATA_POOL_ID);
+ p_buf = (BT_HDR *)osi_malloc(RFCOMM_DATA_BUF_SIZE);
if (!p_buf) {
break;
}
/* If there are buffers scheduled for transmission check if requested */
/* data fits into the end of the queue */
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
- if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL)
+ if (((p_buf = (BT_HDR *)fixed_queue_try_peek_last(p_port->tx.queue)) != NULL) {
&& ((p_buf->len + max_len) <= p_port->peer_mtu)
&& ((p_buf->len + max_len) <= length)) {
memcpy ((UINT8 *)(p_buf + 1) + p_buf->offset + p_buf->len, p_data, max_len);
*p_len = max_len;
p_buf->len += max_len;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
return (PORT_SUCCESS);
}
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
while (max_len) {
/* if we're over buffer high water mark, we're done */
if ((p_port->tx.queue_size > PORT_TX_HIGH_WM)
- || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
+ || (fixed_queue_length(p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
break;
}
/* continue with rfcomm data write */
- p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_DATA_POOL_ID);
+ p_buf = (BT_HDR *)osi_malloc(RFCOMM_DATA_BUF_SIZE);
if (!p_buf) {
break;
}
return (PORT_UNKNOWN_ERROR);
}
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) != NULL) {
p_buf->offset = L2CAP_MIN_OFFSET + RFCOMM_MIN_OFFSET + 2;
p_buf->len = len;
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
#include "btm_int.h"
RFCOMM_TRACE_EVENT("PORT_DataInd with data length %d, p_mcb:%p,p_port:%p,dlci:%d",
p_buf->len, p_mcb, p_port, dlci);
if (!p_port) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
/* If client registered callout callback with flow control we can just deliver receive data */
} else {
port_flow_control_peer(p_port, FALSE, 0);
}
- //GKI_freebuf (p_buf);
+ //osi_free (p_buf);
return;
} else {
RFCOMM_TRACE_ERROR("PORT_DataInd, p_port:%p, p_data_co_callback is null", p_port);
port_flow_control_peer(p_port, TRUE, 1);
p_port->p_data_callback (p_port->inx, (UINT8 *)(p_buf + 1) + p_buf->offset, p_buf->len);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
/* Check if rx queue exceeds the limit */
if ((p_port->rx.queue_size + p_buf->len > PORT_RX_CRITICAL_WM)
- || (GKI_queue_length(&p_port->rx.queue) + 1 > p_port->rx_buf_critical)) {
+ || (fixed_queue_length(p_port->rx.queue) + 1 > p_port->rx_buf_critical)) {
RFCOMM_TRACE_EVENT ("PORT_DataInd. Buffer over run. Dropping the buffer");
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
RFCOMM_LineStatusReq (p_mcb, dlci, LINE_STATUS_OVERRUN);
return;
}
}
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
- GKI_enqueue (&p_port->rx.queue, p_buf);
+ fixed_queue_enqueue(p_port->rx.queue, p_buf);
p_port->rx.queue_size += p_buf->len;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
/* perform flow control procedures if necessary */
port_flow_control_peer(p_port, FALSE, 0);
/* while the rfcomm peer is not flow controlling us, and peer is ready */
while (!p_port->tx.peer_fc && p_port->rfc.p_mcb && p_port->rfc.p_mcb->peer_ready) {
/* get data from tx queue and send it */
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
- if ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->tx.queue)) != NULL) {
+ if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
p_port->tx.queue_size -= p_buf->len;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
RFCOMM_TRACE_DEBUG ("Sending RFCOMM_DataReq tx.queue_size=%d", p_port->tx.queue_size);
}
/* queue is empty-- all data sent */
else {
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
events |= PORT_EV_TXEMPTY;
break;
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
#include "port_int.h"
memset (&p_port->peer_ctrl, 0, sizeof (p_port->peer_ctrl));
memset (&p_port->rx, 0, sizeof (p_port->rx));
memset (&p_port->tx, 0, sizeof (p_port->tx));
+
+ p_port->tx.queue = fixed_queue_new(SIZE_MAX);
+ p_port->rx.queue = fixed_queue_new(SIZE_MAX);
}
/*******************************************************************************
tPORT_CALLBACK *p_port_cb;
tPORT_STATE user_port_pars;
- PORT_SCHEDULE_LOCK;
+ osi_mutex_global_lock();
RFCOMM_TRACE_DEBUG("port_release_port, p_port:%p", p_port);
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue)) != NULL) {
- GKI_freebuf (p_buf);
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue)) != NULL) {
+ osi_free (p_buf);
}
p_port->rx.queue_size = 0;
- while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->tx.queue)) != NULL) {
- GKI_freebuf (p_buf);
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
+ osi_free (p_buf);
}
p_port->tx.queue_size = 0;
- PORT_SCHEDULE_UNLOCK;
+ osi_mutex_global_unlock();
p_port->state = PORT_STATE_CLOSED;
rfc_check_mcb_active (p_port->rfc.p_mcb);
}
rfc_port_timer_stop (p_port);
+ fixed_queue_free(p_port->tx.queue, NULL);
+ p_port->tx.queue = NULL;
+ fixed_queue_free(p_port->rx.queue, NULL);
+ p_port->rx.queue = NULL;
+
RFCOMM_TRACE_DEBUG ("port_release_port:p_port->keep_port_handle:%d", p_port->keep_port_handle);
if ( p_port->keep_port_handle ) {
RFCOMM_TRACE_DEBUG ("port_release_port:Initialize handle:%d", p_port->inx);
|| !p_port->rfc.p_mcb
|| !p_port->rfc.p_mcb->peer_ready
|| (p_port->tx.queue_size > PORT_TX_HIGH_WM)
- || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM);
+ || (fixed_queue_length(p_port->tx.queue) > PORT_TX_BUF_HIGH_WM);
if (p_port->tx.user_fc == fc) {
return (0);
p_port->rx.peer_fc = TRUE;
}
/* if queue count reached credit rx max, set peer fc */
- else if (GKI_queue_length(&p_port->rx.queue) >= p_port->credit_rx_max) {
+ else if (fixed_queue_length(p_port->rx.queue) >= p_port->credit_rx_max) {
p_port->rx.peer_fc = TRUE;
}
}
/* check if it can be resumed now */
if (p_port->rx.peer_fc
&& (p_port->rx.queue_size < PORT_RX_LOW_WM)
- && (GKI_queue_length(&p_port->rx.queue) < PORT_RX_BUF_LOW_WM)) {
+ && (fixed_queue_length(p_port->rx.queue) < PORT_RX_BUF_LOW_WM)) {
p_port->rx.peer_fc = FALSE;
/* If user did not force flow control allow traffic now */
/* Check the size of the rx queue. If it exceeds certain */
/* level and flow control has not been sent to the peer do it now */
else if ( ((p_port->rx.queue_size > PORT_RX_HIGH_WM)
- || (GKI_queue_length(&p_port->rx.queue) > PORT_RX_BUF_HIGH_WM))
+ || (fixed_queue_length(p_port->rx.queue) > PORT_RX_BUF_HIGH_WM))
&& !p_port->rx.peer_fc) {
RFCOMM_TRACE_EVENT ("PORT_DataInd Data reached HW. Sending FC set.");
#include <stddef.h>
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
RFCOMM_TRACE_DEBUG ("RFCOMM_ConnectInd start timer for collision, initiator's LCID(0x%x), acceptor's LCID(0x%x)",
p_mcb->lcid, p_mcb->pending_lcid);
- rfc_timer_start(p_mcb, (UINT16)(GKI_get_os_tick_count() % 10 + 2));
+ rfc_timer_start(p_mcb, (UINT16)(osi_time_get_os_boottime_ms() % 10 + 2));
return;
} else {
/* we cannot accept connection request from peer at this state */
if (!p_mcb) {
RFCOMM_TRACE_WARNING ("RFCOMM_BufDataInd LCID:0x%x", lcid);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
/* If the frame did not pass validation just ignore it */
if (event == RFC_EVENT_BAD_FRAME) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
/* Other multiplexer events go to state machine */
rfc_mx_sm_execute (p_mcb, event, NULL);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
|| (!p_mcb->is_initiator && rfc_cb.rfc.rx_frame.cr)) {
rfc_send_dm (p_mcb, rfc_cb.rfc.rx_frame.dlci, rfc_cb.rfc.rx_frame.pf);
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
if ((p_port = port_find_dlci_port (rfc_cb.rfc.rx_frame.dlci)) == NULL) {
rfc_send_dm (p_mcb, rfc_cb.rfc.rx_frame.dlci, TRUE);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
p_mcb->port_inx[rfc_cb.rfc.rx_frame.dlci] = p_port->inx;
if (p_buf->len > 0) {
rfc_port_sm_execute (p_port, event, p_buf);
} else {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
if (rfc_cb.rfc.rx_frame.credit != 0) {
return;
}
rfc_port_sm_execute (p_port, event, NULL);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
/*******************************************************************************
*
******************************************************************************/
#include <string.h>
-#include "gki.h"
#include "bt_types.h"
#include "rfcdefs.h"
#include "l2cdefs.h"
rfc_save_lcid_mcb (p_mcb, p_mcb->lcid);
/* clean up before reuse it */
- while ((p_buf = (BT_HDR *)GKI_dequeue(&p_mcb->cmd_q)) != NULL) {
- GKI_freebuf(p_buf);
+ while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_mcb->cmd_q)) != NULL) {
+ osi_free(p_buf);
}
rfc_timer_start (p_mcb, RFC_MCB_INIT_INACT_TIMER);
return;
case RFC_EVENT_UIH:
- GKI_freebuf (p_data);
+ osi_free (p_data);
rfc_send_dm (p_mcb, RFCOMM_MX_DLCI, FALSE);
return;
******************************************************************************/
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "btm_api.h"
#include "btm_int.h"
return;
case RFC_EVENT_DATA:
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
case RFC_EVENT_SABME:
return;
case RFC_EVENT_UIH:
- GKI_freebuf (p_data);
+ osi_free (p_data);
rfc_send_dm (p_port->rfc.p_mcb, p_port->dlci, FALSE);
return;
return;
case RFC_EVENT_DATA:
- GKI_freebuf (p_data);
+ osi_free (p_data);
break;
case RFC_EVENT_UA:
return;
case RFC_EVENT_UIH:
- GKI_freebuf (p_data);
+ osi_free (p_data);
return;
case RFC_EVENT_TIMEOUT:
case RFC_EVENT_DATA:
RFCOMM_TRACE_ERROR ("Port error state Term Wait Sec event Data");
- GKI_freebuf (p_data);
+ osi_free (p_data);
return;
case RFC_EVENT_SABME:
return;
case RFC_EVENT_UIH:
- GKI_freebuf (p_data);
+ osi_free (p_data);
return;
case RFC_EVENT_ESTABLISH_RSP:
case RFC_EVENT_DATA:
RFCOMM_TRACE_ERROR ("Port error state Orig Wait Sec event Data");
- GKI_freebuf (p_data);
+ osi_free (p_data);
return;
case RFC_EVENT_UIH:
- GKI_freebuf (p_data);
+ osi_free (p_data);
return;
}
RFCOMM_TRACE_WARNING ("Port state orig_wait_sec_check Event ignored %d", event);
case RFC_EVENT_DISC:
p_port->rfc.state = RFC_STATE_CLOSED;
rfc_send_ua (p_port->rfc.p_mcb, p_port->dlci);
- if (!GKI_queue_is_empty(&p_port->rx.queue)) {
+ if (! fixed_queue_is_empty(p_port->rx.queue)) {
/* give a chance to upper stack to close port properly */
RFCOMM_TRACE_DEBUG("port queue is not empty");
rfc_port_timer_start (p_port, RFC_DISC_TIMEOUT);
return;
case RFC_EVENT_DATA:
- GKI_freebuf (p_data);
+ osi_free (p_data);
return;
case RFC_EVENT_UA:
return;
case RFC_EVENT_UIH:
- GKI_freebuf (p_data);
+ osi_free (p_data);
rfc_send_dm (p_port->rfc.p_mcb, p_port->dlci, FALSE);
return;
{
UNUSED(p_mcb);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
#include <string.h>
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
#include "l2c_api.h"
#include <stddef.h>
#include "bt_target.h"
-#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
#include "l2c_api.h"
UINT8 *p_data;
UINT8 cr = RFCOMM_CR(p_mcb->is_initiator, TRUE);
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
UINT8 *p_data;
UINT8 cr = RFCOMM_CR(p_mcb->is_initiator, FALSE);
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
UINT8 *p_data;
UINT8 cr = RFCOMM_CR(p_mcb->is_initiator, FALSE);
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
UINT8 *p_data;
UINT8 cr = RFCOMM_CR(p_mcb->is_initiator, TRUE);
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
BT_HDR *p_buf;
UINT8 *p_data;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
BT_HDR *p_buf;
UINT8 *p_data;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
BT_HDR *p_buf;
UINT8 *p_data;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
signals = p_pars->modem_signal;
break_duration = p_pars->break_signal;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
BT_HDR *p_buf;
UINT8 *p_data;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
BT_HDR *p_buf;
UINT8 *p_data;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
BT_HDR *p_buf;
UINT8 *p_data;
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
UINT8 *p_data;
UINT8 cr = RFCOMM_CR(p_mcb->is_initiator, TRUE);
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) == NULL) {
return;
}
if (!p_rx_frame->ea || !length) {
RFCOMM_TRACE_ERROR ("Illegal MX Frame ea:%d len:%d", p_rx_frame->ea, length);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
if (mx_len != length) {
RFCOMM_TRACE_ERROR ("Bad MX frame");
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
break;
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
rfc_process_pn (p_mcb, is_command, p_rx_frame);
return;
break;
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
rfc_process_fcon (p_mcb, is_command);
return;
break;
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
rfc_process_fcoff (p_mcb, is_command);
return;
p_rx_frame->u.msc.break_present = FALSE;
p_rx_frame->u.msc.break_duration = 0;
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
rfc_process_msc (p_mcb, is_command, p_rx_frame);
return;
p_rx_frame->u.nsc.cr = (*p_data & RFCOMM_CR_MASK) >> RFCOMM_SHIFT_CR;
p_rx_frame->u.nsc.type = *p_data++ >> RFCOMM_SHIFT_DLCI;
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
rfc_process_nsc (p_mcb, p_rx_frame);
return;
p_rx_frame->u.rpn.xoff_char = *p_data++;
p_rx_frame->u.rpn.param_mask = (*p_data + (*(p_data + 1) << 8)) & RFCOMM_RPN_PM_MASK;
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
rfc_process_rpn (p_mcb, is_command, p_rx_frame->u.rpn.is_request, p_rx_frame);
return;
break;
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
rfc_process_rls (p_mcb, is_command, p_rx_frame);
return;
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
if (is_command) {
rfc_send_nsc (p_mcb);
*****************************************************************************/
#include "bt_target.h"
-#include "gki.h"
#include "btm_api.h"
#include "btm_int.h"
p_mcb = &rfc_cb.port.rfc_mcb[j];
if (rfc_cb.port.rfc_mcb[j].state == RFC_MX_STATE_IDLE) {
/* New multiplexer control block */
+ fixed_queue_free(p_mcb->cmd_q, NULL);
memset (p_mcb, 0, sizeof (tRFC_MCB));
memcpy (p_mcb->bd_addr, bd_addr, BD_ADDR_LEN);
RFCOMM_TRACE_DEBUG("rfc_alloc_multiplexer_channel:is_initiator:%d, create new p_mcb:%p, index:%d",
is_initiator, &rfc_cb.port.rfc_mcb[j], j);
- GKI_init_q(&p_mcb->cmd_q);
+ p_mcb->cmd_q = fixed_queue_new(SIZE_MAX);
p_mcb->is_initiator = is_initiator;
*******************************************************************************/
void rfc_release_multiplexer_channel (tRFC_MCB *p_mcb)
{
- void *p_buf;
rfc_timer_stop (p_mcb);
- while ((p_buf = GKI_dequeue(&p_mcb->cmd_q)) != NULL) {
- GKI_freebuf(p_buf);
- }
+
+ fixed_queue_free(p_mcb->cmd_q, osi_free);
memset (p_mcb, 0, sizeof (tRFC_MCB));
p_mcb->state = RFC_MX_STATE_IDLE;
/* if passed a buffer queue it */
if (p_buf != NULL) {
- GKI_enqueue(&p_mcb->cmd_q, p_buf);
+ if (p_mcb->cmd_q == NULL) {
+ RFCOMM_TRACE_ERROR("%s: empty queue: p_mcb = %p p_mcb->lcid = %u cached p_mcb = %p",
+ __func__, p_mcb, p_mcb->lcid,
+ rfc_find_lcid_mcb(p_mcb->lcid));
+ }
+ fixed_queue_enqueue(p_mcb->cmd_q, p_buf);
}
/* handle queue if L2CAP not congested */
while (p_mcb->l2cap_congested == FALSE) {
- if ((p = (BT_HDR *) GKI_dequeue(&p_mcb->cmd_q)) == NULL) {
+ if ((p = (BT_HDR *)fixed_queue_try_dequeue(p_mcb->cmd_q)) == NULL) {
break;
}
#define SDP_INT_H
#include "bt_target.h"
+#include "bt_defs.h"
#include "sdp_api.h"
#include "l2c_api.h"
#include "bt_target.h"
//#include "bt_utils.h"
-#include "gki.h"
#include "l2cdefs.h"
#include "hcidefs.h"
#include "hcimsgs.h"
#include <stdio.h>
#include "bt_target.h"
+#include "allocator.h"
-#include "gki.h"
#include "l2cdefs.h"
#include "hcidefs.h"
UINT8 *p_head;
BOOLEAN result;
- if ((p_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
+ if ((p_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
SDP_TRACE_ERROR("SDP_AddSequence cannot get a buffer!\n");
return (FALSE);
}
if (p_head == p_buff) {
/* the first element exceed the max length */
SDP_TRACE_ERROR ("SDP_AddSequence - too long(attribute is not added)!!\n");
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return FALSE;
} else {
SDP_TRACE_ERROR ("SDP_AddSequence - too long, add %d elements of %d\n", xx, num_elem);
}
}
result = SDP_AddAttribute (handle, attr_id, DATA_ELE_SEQ_DESC_TYPE, (UINT32) (p - p_buff), p_buff);
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return result;
#else /* SDP_SERVER_ENABLED == FALSE */
return (FALSE);
INT32 max_len = SDP_MAX_ATTR_LEN - 3;
BOOLEAN result;
- if ((p_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
+ if ((p_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
SDP_TRACE_ERROR("SDP_AddUuidSequence cannot get a buffer!\n");
return (FALSE);
}
}
result = SDP_AddAttribute (handle, attr_id, DATA_ELE_SEQ_DESC_TYPE, (UINT32) (p - p_buff), p_buff);
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return result;
#else /* SDP_SERVER_ENABLED == FALSE */
return (FALSE);
int offset;
BOOLEAN result;
- if ((p_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
+ if ((p_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
SDP_TRACE_ERROR("SDP_AddProtocolList cannot get a buffer!\n");
return (FALSE);
}
offset = sdp_compose_proto_list(p_buff, num_elem, p_elem_list);
result = SDP_AddAttribute (handle, ATTR_ID_PROTOCOL_DESC_LIST, DATA_ELE_SEQ_DESC_TYPE, (UINT32) offset, p_buff);
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return result;
#else /* SDP_SERVER_ENABLED == FALSE */
return (FALSE);
int offset;
BOOLEAN result;
- if ((p_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
+ if ((p_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
SDP_TRACE_ERROR("SDP_AddAdditionProtoLists cannot get a buffer!\n");
return (FALSE);
}
}
result = SDP_AddAttribute (handle, ATTR_ID_ADDITION_PROTO_DESC_LISTS, DATA_ELE_SEQ_DESC_TYPE,
(UINT32) (p - p_buff), p_buff);
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return result;
#else /* SDP_SERVER_ENABLED == FALSE */
UINT8 *p;
BOOLEAN result;
- if ((p_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN)) == NULL) {
+ if ((p_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN)) == NULL) {
SDP_TRACE_ERROR("SDP_AddProfileDescriptorList cannot get a buffer!\n");
return (FALSE);
}
*(p_buff + 1) = (UINT8) (p - (p_buff + 2));
result = SDP_AddAttribute (handle, ATTR_ID_BT_PROFILE_DESC_LIST, DATA_ELE_SEQ_DESC_TYPE, (UINT32) (p - p_buff), p_buff);
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return result;
#else /* SDP_SERVER_ENABLED == FALSE */
UINT8 *p;
BOOLEAN result;
- if ((p_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN)) == NULL) {
+ if ((p_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN)) == NULL) {
SDP_TRACE_ERROR("SDP_AddLanguageBaseAttrIDList cannot get a buffer!\n");
return (FALSE);
}
result = SDP_AddAttribute (handle, ATTR_ID_LANGUAGE_BASE_ATTR_ID_LIST, DATA_ELE_SEQ_DESC_TYPE,
(UINT32) (p - p_buff), p_buff);
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return result;
#else /* SDP_SERVER_ENABLED == FALSE */
return (FALSE);
UINT8 *p;
BOOLEAN result;
- if ((p_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
+ if ((p_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN * 2)) == NULL) {
SDP_TRACE_ERROR("SDP_AddServiceClassIdList cannot get a buffer!\n");
return (FALSE);
}
result = SDP_AddAttribute (handle, ATTR_ID_SERVICE_CLASS_ID_LIST, DATA_ELE_SEQ_DESC_TYPE,
(UINT32) (p - p_buff), p_buff);
- GKI_freebuf(p_buff);
+ osi_free(p_buff);
return result;
#else /* SDP_SERVER_ENABLED == FALSE */
return (FALSE);
}
#endif
-#endif ///SDP_INCLUDED == TRUE
\ No newline at end of file
+#endif ///SDP_INCLUDED == TRUE
#include <stdio.h>
#include "bt_target.h"
-#include "gki.h"
+#include "allocator.h"
#include "l2cdefs.h"
#include "hcidefs.h"
#include "hcimsgs.h"
UINT16 param_len;
/* Get a buffer to send the packet to L2CAP */
- if ((p_cmd = (BT_HDR *) GKI_getpoolbuf (SDP_POOL_ID)) == NULL) {
+ if ((p_cmd = (BT_HDR *) osi_malloc(SDP_DATA_BUF_SIZE)) == NULL) {
sdp_disconnect (p_ccb, SDP_NO_RESOURCES);
return;
}
p_ccb->list_len, list_byte_count);
#endif
if (p_ccb->rsp_list == NULL) {
- p_ccb->rsp_list = (UINT8 *)GKI_getbuf (SDP_MAX_LIST_BYTE_COUNT);
+ p_ccb->rsp_list = (UINT8 *)osi_malloc (SDP_MAX_LIST_BYTE_COUNT);
if (p_ccb->rsp_list == NULL) {
SDP_TRACE_ERROR ("SDP - no gki buf to save rsp\n");
sdp_disconnect (p_ccb, SDP_NO_RESOURCES);
/* Now, ask for the next handle. Re-use the buffer we just got. */
if (p_ccb->cur_handle < p_ccb->num_handles) {
- BT_HDR *p_msg = (BT_HDR *) GKI_getpoolbuf (SDP_POOL_ID);
+ BT_HDR *p_msg = (BT_HDR *) osi_malloc(SDP_DATA_BUF_SIZE);
UINT8 *p;
if (!p_msg) {
p_ccb->list_len, lists_byte_count);
#endif
if (p_ccb->rsp_list == NULL) {
- p_ccb->rsp_list = (UINT8 *)GKI_getbuf (SDP_MAX_LIST_BYTE_COUNT);
+ p_ccb->rsp_list = (UINT8 *)osi_malloc (SDP_MAX_LIST_BYTE_COUNT);
if (p_ccb->rsp_list == NULL) {
SDP_TRACE_ERROR ("SDP - no gki buf to save rsp\n");
sdp_disconnect (p_ccb, SDP_NO_RESOURCES);
#endif
/* If continuation request (or first time request) */
if ((cont_request_needed) || (!p_reply)) {
- BT_HDR *p_msg = (BT_HDR *) GKI_getpoolbuf (SDP_POOL_ID);
+ BT_HDR *p_msg = (BT_HDR *) osi_malloc(SDP_DATA_BUF_SIZE);
UINT8 *p;
if (!p_msg) {
//#include <stdio.h>
#include "bt_target.h"
-//#include "bt_utils.h"
-#include "gki.h"
+#include "allocator.h"
#include "l2cdefs.h"
#include "hcidefs.h"
#include "hcimsgs.h"
SDP_TRACE_WARNING ("SDP - Rcvd L2CAP data, unknown CID: 0x%x\n", l2cap_cid);
}
- GKI_freebuf (p_msg);
+ osi_free (p_msg);
}
#include <string.h>
//#include <stdio.h>
-#include "gki.h"
#include "bt_types.h"
-//#include "bt_utils.h"
+#include "allocator.h"
#include "btu.h"
#include "bt_defs.h"
#include "l2cdefs.h"
}
/* Get a buffer to use to build the response */
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (SDP_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(SDP_DATA_BUF_SIZE)) == NULL) {
SDP_TRACE_ERROR ("SDP - no buf for search rsp\n");
return;
}
if (*p_req) {
/* Free and reallocate buffer */
if (p_ccb->rsp_list) {
- GKI_freebuf(p_ccb->rsp_list);
+ osi_free(p_ccb->rsp_list);
}
- p_ccb->rsp_list = (UINT8 *)GKI_getbuf(max_list_len);
+ p_ccb->rsp_list = (UINT8 *)osi_malloc(max_list_len);
if (p_ccb->rsp_list == NULL) {
SDP_TRACE_ERROR("%s No scratch buf for attr rsp\n", __func__);
return;
p_rsp = &p_ccb->rsp_list[0];
attr_seq.attr_entry[p_ccb->cont_info.next_attr_index].start = p_ccb->cont_info.next_attr_start_id;
} else {
- /* Get a scratch buffer to store response */
- if (!p_ccb->rsp_list || (GKI_get_buf_size(p_ccb->rsp_list) < max_list_len)) {
- /* Free and reallocate if the earlier allocated buffer is small */
- if (p_ccb->rsp_list) {
- GKI_freebuf (p_ccb->rsp_list);
- }
+ if (p_ccb->rsp_list) {
+ osi_free (p_ccb->rsp_list);
+ }
- p_ccb->rsp_list = (UINT8 *)GKI_getbuf (max_list_len);
- if (p_ccb->rsp_list == NULL) {
- SDP_TRACE_ERROR ("SDP - no scratch buf for search rsp\n");
- return;
- }
+ p_ccb->rsp_list = (UINT8 *)osi_malloc (max_list_len);
+ if (p_ccb->rsp_list == NULL) {
+ SDP_TRACE_ERROR ("SDP - no scratch buf for search rsp\n");
+ return;
}
p_ccb->cont_offset = 0;
}
/* Get a buffer to use to build the response */
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (SDP_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(SDP_DATA_BUF_SIZE)) == NULL) {
SDP_TRACE_ERROR ("SDP - no buf for search rsp\n");
return;
}
if (*p_req) {
/* Free and reallocate buffer */
if (p_ccb->rsp_list) {
- GKI_freebuf (p_ccb->rsp_list);
+ osi_free (p_ccb->rsp_list);
}
- p_ccb->rsp_list = (UINT8 *)GKI_getbuf (max_list_len);
+ p_ccb->rsp_list = (UINT8 *)osi_malloc (max_list_len);
if (p_ccb->rsp_list == NULL) {
SDP_TRACE_ERROR ("SDP - no scratch buf for search rsp\n");
return;
attr_seq.attr_entry[p_ccb->cont_info.next_attr_index].start = p_ccb->cont_info.next_attr_start_id;
} else {
/* Get a scratch buffer to store response */
- if (!p_ccb->rsp_list || (GKI_get_buf_size(p_ccb->rsp_list) < max_list_len)) {
- /* Free and reallocate if the earlier allocated buffer is small */
- if (p_ccb->rsp_list) {
- GKI_freebuf (p_ccb->rsp_list);
- }
+ /* Free and reallocate if the earlier allocated buffer is small */
+ if (p_ccb->rsp_list) {
+ osi_free (p_ccb->rsp_list);
+ }
- p_ccb->rsp_list = (UINT8 *)GKI_getbuf (max_list_len);
- if (p_ccb->rsp_list == NULL) {
- SDP_TRACE_ERROR ("SDP - no scratch buf for search rsp\n");
- return;
- }
+ p_ccb->rsp_list = (UINT8 *)osi_malloc (max_list_len);
+ if (p_ccb->rsp_list == NULL) {
+ SDP_TRACE_ERROR ("SDP - no scratch buf for search rsp\n");
+ return;
}
p_ccb->cont_offset = 0;
}
/* Get a buffer to use to build the response */
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (SDP_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(SDP_DATA_BUF_SIZE)) == NULL) {
SDP_TRACE_ERROR ("SDP - no buf for search rsp\n");
return;
}
#include <stdlib.h>
#include <string.h>
-//#include <netinet/in.h>
-//#include <stdio.h>
+
+#include "allocator.h"
#include "bt_defs.h"
-#include "gki.h"
#include "bt_types.h"
#include "l2cdefs.h"
if (p_ccb->rsp_list) {
SDP_TRACE_DEBUG("releasing SDP rsp_list\n");
- GKI_freebuf(p_ccb->rsp_list);
+ osi_free(p_ccb->rsp_list);
p_ccb->rsp_list = NULL;
}
}
error_code, p_ccb->connection_id);
/* Get a buffer to use to build and send the packet to L2CAP */
- if ((p_buf = (BT_HDR *)GKI_getpoolbuf (SDP_POOL_ID)) == NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(SDP_DATA_BUF_SIZE)) == NULL) {
SDP_TRACE_ERROR ("SDP - no buf for err msg\n");
return;
}
size_t len_to_copy;
UINT16 attr_len;
- if ((p_attr_buff = (UINT8 *) GKI_getbuf(sizeof(UINT8) * SDP_MAX_ATTR_LEN )) == NULL) {
+ if ((p_attr_buff = (UINT8 *) osi_malloc(sizeof(UINT8) * SDP_MAX_ATTR_LEN )) == NULL) {
SDP_TRACE_ERROR("sdpu_build_partial_attrib_entry cannot get a buffer!\n");
return NULL;
}
p_out = &p_out[len_to_copy];
*offset += len_to_copy;
- GKI_freebuf(p_attr_buff);
+ osi_free(p_attr_buff);
return p_out;
}
memcpy(p_uuid128 + 2, &uuid16_bo, sizeof(uint16_t));
}
-#endif ///SDP_INCLUDED == TRUE
\ No newline at end of file
+#endif ///SDP_INCLUDED == TRUE
******************************************************************************/
#include "bt_target.h"
+#include "allocator.h"
#if SMP_INCLUDED == TRUE
// #include <stdio.h>
static void cmac_aes_cleanup(void)
{
if (cmac_cb.text != NULL) {
- GKI_freebuf(cmac_cb.text);
+ osi_free(cmac_cb.text);
}
memset(&cmac_cb, 0, sizeof(tCMAC_CB));
}
SMP_TRACE_WARNING("AES128_CMAC started, allocate buffer size = %d", len);
/* allocate a memory space of multiple of 16 bytes to hold text */
- if ((cmac_cb.text = (UINT8 *)GKI_getbuf(len)) != NULL) {
+ if ((cmac_cb.text = (UINT8 *)osi_malloc(len)) != NULL) {
cmac_cb.round = n;
memset(cmac_cb.text, 0, len);
return FALSE;
}
- if ((p_start = (UINT8 *)GKI_getbuf((SMP_ENCRYT_DATA_SIZE * 4))) == NULL) {
+ if ((p_start = (UINT8 *)osi_malloc((SMP_ENCRYT_DATA_SIZE * 4))) == NULL) {
SMP_TRACE_ERROR ("%s failed unable to allocate buffer\n", __func__);
return FALSE;
}
p_out->status = HCI_SUCCESS;
p_out->opcode = HCI_BLE_ENCRYPT;
- GKI_freebuf(p_start);
+ osi_free(p_start);
return TRUE;
}
******************************************************************************/
#include "bt_target.h"
+#include "allocator.h"
#if SMP_INCLUDED == TRUE
/* sanity check */
if ((SMP_OPCODE_MAX < cmd) || (SMP_OPCODE_MIN > cmd)) {
SMP_TRACE_WARNING( "Ignore received command with RESERVED code 0x%02x\n", cmd);
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
return;
}
p_cb->role = L2CA_GetBleConnRole(bd_addr);
memcpy(&p_cb->pairing_bda[0], bd_addr, BD_ADDR_LEN);
} else if (memcmp(&bd_addr[0], p_cb->pairing_bda, BD_ADDR_LEN)) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
smp_reject_unexpected_pairing_command(bd_addr);
return;
}
smp_sm_event(p_cb, cmd, p);
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
/*******************************************************************************
/* sanity check */
if ((SMP_OPCODE_MAX < cmd) || (SMP_OPCODE_MIN > cmd)) {
SMP_TRACE_WARNING( "Ignore received command with RESERVED code 0x%02x", cmd);
- GKI_freebuf(p_buf);
+ osi_free(p_buf);
return;
}
p_cb->smp_over_br = TRUE;
memcpy(&p_cb->pairing_bda[0], bd_addr, BD_ADDR_LEN);
} else if (memcmp(&bd_addr[0], p_cb->pairing_bda, BD_ADDR_LEN)) {
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
smp_reject_unexpected_pairing_command(bd_addr);
return;
}
smp_br_state_machine_event(p_cb, cmd, p);
}
- GKI_freebuf (p_buf);
+ osi_free (p_buf);
}
#endif /* CLASSIC_BT_INCLUDED == TRUE */
UINT8 *p;
SMP_TRACE_EVENT("smp_build_pairing_cmd");
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_PAIRING_REQ_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_PAIRING_REQ_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, cmd_code);
UNUSED(cmd_code);
SMP_TRACE_EVENT("smp_build_confirm_cmd\n");
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_CONFIRM_CMD_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_CONFIRM_CMD_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, SMP_OPCODE_CONFIRM);
UNUSED(cmd_code);
SMP_TRACE_EVENT("%s\n", __func__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_RAND_CMD_SIZE + L2CAP_MIN_OFFSET))
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_RAND_CMD_SIZE + L2CAP_MIN_OFFSET))
!= NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UNUSED(cmd_code);
SMP_TRACE_EVENT("smp_build_encrypt_info_cmd\n");
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_ENC_INFO_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_ENC_INFO_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, SMP_OPCODE_ENCRYPT_INFO);
SMP_TRACE_EVENT("%s\n", __func__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_MASTER_ID_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_MASTER_ID_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, SMP_OPCODE_MASTER_ID);
UNUSED(p_cb);
SMP_TRACE_EVENT("smp_build_identity_info_cmd\n");
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_ID_INFO_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_ID_INFO_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
BTM_GetDeviceIDRoot(irk);
UNUSED(cmd_code);
UNUSED(p_cb);
SMP_TRACE_EVENT("smp_build_id_addr_cmd\n");
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_ID_ADDR_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_ID_ADDR_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, SMP_OPCODE_ID_ADDR);
UNUSED(cmd_code);
SMP_TRACE_EVENT("smp_build_signing_info_cmd\n");
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_SIGN_INFO_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_SIGN_INFO_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, SMP_OPCODE_SIGN_INFO);
UNUSED(cmd_code);
SMP_TRACE_EVENT("%s\n", __func__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_PAIR_FAIL_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_PAIR_FAIL_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, SMP_OPCODE_PAIRING_FAILED);
UNUSED(cmd_code);
SMP_TRACE_EVENT("%s\n", __func__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + 2 + L2CAP_MIN_OFFSET)) != NULL) {
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + 2 + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UINT8_TO_STREAM (p, SMP_OPCODE_SEC_REQ);
memcpy(p_publ_key, p_cb->loc_publ_key.x, BT_OCTET32_LEN);
memcpy(p_publ_key + BT_OCTET32_LEN, p_cb->loc_publ_key.y, BT_OCTET32_LEN);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) +
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) +
SMP_PAIR_PUBL_KEY_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UNUSED(cmd_code);
SMP_TRACE_EVENT("%s\n", __func__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + SMP_PAIR_COMMITM_SIZE + L2CAP_MIN_OFFSET))
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + SMP_PAIR_COMMITM_SIZE + L2CAP_MIN_OFFSET))
!= NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UNUSED(cmd_code);
SMP_TRACE_EVENT("%s\n", __FUNCTION__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) +
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) +
SMP_PAIR_DHKEY_CHECK_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
UNUSED(cmd_code);
SMP_TRACE_EVENT("%s\n", __FUNCTION__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR)\
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR)\
+ SMP_PAIR_KEYPR_NOTIF_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;
SMP_TRACE_DEBUG ("%s\n", __FUNCTION__);
- if ((p_buf = (BT_HDR *)GKI_getbuf(sizeof(BT_HDR) + \
+ if ((p_buf = (BT_HDR *)osi_malloc(sizeof(BT_HDR) + \
SMP_PAIR_FAIL_SIZE + L2CAP_MIN_OFFSET)) != NULL) {
p = (UINT8 *)(p_buf + 1) + L2CAP_MIN_OFFSET;