#define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
- SDIO_SLAVE_LOGE( "%s", str);\
+ SDIO_SLAVE_LOGE("%s", str);\
return ret_val;\
} }while (0)
void* arg; /* to hold some parameters */
} buf_desc_t;
-typedef STAILQ_HEAD( bufdesc_stailq_head_s, buf_desc_s ) buf_stailq_t;
-typedef TAILQ_HEAD( bufdesc_tailq_head_s, buf_desc_s ) buf_tailq_t;
+typedef STAILQ_HEAD(bufdesc_stailq_head_s, buf_desc_s) buf_stailq_t;
+typedef TAILQ_HEAD(bufdesc_tailq_head_s, buf_desc_s) buf_tailq_t;
typedef struct {
uint8_t* data;
SemaphoreHandle_t remain_cnt;
} sdio_ringbuf_t;
-#define offset_of(type, field) ( (unsigned int)&(((type *)(0))->field) )
+#define offset_of(type, field) ((unsigned int)&(((type *)(0))->field))
typedef enum {
ringbuf_write_ptr = offset_of(sdio_ringbuf_t, write_ptr),
ringbuf_read_ptr = offset_of(sdio_ringbuf_t, read_ptr),
static void sdio_ringbuf_deinit(sdio_ringbuf_t* buf)
{
- if ( buf->remain_cnt != NULL ) vSemaphoreDelete( buf->remain_cnt );
- if ( buf->data != NULL ) free(buf->data);
+ if (buf->remain_cnt != NULL) vSemaphoreDelete(buf->remain_cnt);
+ if (buf->data != NULL) free(buf->data);
*buf = SDIO_RINGBUF_INITIALIZER();
}
static esp_err_t sdio_ringbuf_init(sdio_ringbuf_t* buf, int item_size, int item_cnt)
{
- if (buf->data != NULL ) {
- SDIO_SLAVE_LOGE( "sdio_ringbuf_init: already initialized");
+ if (buf->data != NULL) {
+ SDIO_SLAVE_LOGE("sdio_ringbuf_init: already initialized");
return ESP_ERR_INVALID_STATE;
}
buf->item_size = item_size;
buf->size = item_size * (item_cnt+1);
//apply for resources
buf->data = (uint8_t*)malloc(buf->size);
- if ( buf->data == NULL ) goto no_mem;
- buf->remain_cnt = xSemaphoreCreateCounting( item_cnt, item_cnt );
- if ( buf->remain_cnt == NULL ) goto no_mem;
+ if (buf->data == NULL) goto no_mem;
+ buf->remain_cnt = xSemaphoreCreateCounting(item_cnt, item_cnt);
+ if (buf->remain_cnt == NULL) goto no_mem;
//initialize pointers
buf->write_ptr = buf->data;
buf->read_ptr = buf->data;
}
//calculate a pointer with offset to a original pointer of the specific ringbuffer
-static inline uint8_t* sdio_ringbuf_offset_ptr( sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset )
+static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset)
{
uint8_t *buf_ptr = (uint8_t*)*(uint32_t*)(((uint8_t*)buf)+ptr); //get the specific pointer of the buffer
uint8_t *offset_ptr=buf_ptr+offset;
return offset_ptr;
}
-static esp_err_t sdio_ringbuf_send( sdio_ringbuf_t* buf, esp_err_t (*copy_callback)(uint8_t*, void*), void* arg, TickType_t wait )
+static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t* buf, esp_err_t (*copy_callback)(uint8_t*, void*), void* arg, TickType_t wait)
{
portBASE_TYPE ret = xSemaphoreTake(buf->remain_cnt, wait);
- if ( ret != pdTRUE ) return NULL;
+ if (ret != pdTRUE) return NULL;
- portENTER_CRITICAL( &buf->write_spinlock );
- uint8_t* get_ptr = sdio_ringbuf_offset_ptr( buf, ringbuf_write_ptr, buf->item_size );
+ portENTER_CRITICAL(&buf->write_spinlock);
+ uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, ringbuf_write_ptr, buf->item_size);
esp_err_t err = ESP_OK;
if (copy_callback) (*copy_callback)(get_ptr, arg);
- if ( err != ESP_OK ) {
- portEXIT_CRITICAL( &buf->write_spinlock );
+ if (err != ESP_OK) {
+ portEXIT_CRITICAL(&buf->write_spinlock);
return err;
}
buf->write_ptr = get_ptr;
- portEXIT_CRITICAL( &buf->write_spinlock );
+ portEXIT_CRITICAL(&buf->write_spinlock);
return ESP_OK;
}
// since this is designed to be called in the ISR, no parallel logic
static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t* buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all, TickType_t wait)
{
- assert( buf->free_ptr == buf->read_ptr ); //must return before recv again
+ assert(buf->free_ptr == buf->read_ptr); //must return before recv again
assert(wait == 0); //only implement wait = 0 case now
- if ( start == NULL && end == NULL ) return ESP_ERR_INVALID_ARG; // must have a output
- if ( buf->read_ptr == buf->write_ptr ) return ESP_ERR_NOT_FOUND; // no data
+ if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output
+ if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data
uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, ringbuf_read_ptr, buf->item_size);
- if ( get_all != RINGBUF_GET_ONE ) {
+ if (get_all != RINGBUF_GET_ONE) {
buf->read_ptr = buf->write_ptr;
} else {
buf->read_ptr = get_start;
}
- if ( start != NULL ) *start = get_start;
- if ( end != NULL ) *end = buf->read_ptr;
+ if (start != NULL) *start = get_start;
+ if (end != NULL) *end = buf->read_ptr;
return ESP_OK;
}
static inline void sdio_ringbuf_return_from_isr(sdio_ringbuf_t* buf, uint8_t *ptr, portBASE_TYPE *yield)
{
- assert( sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr );
+ assert(sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr);
int size = (buf->read_ptr + buf->size - buf->free_ptr)%buf->size;
int count = size/buf->item_size;
- assert( count*buf->item_size==size);
+ assert(count*buf->item_size==size);
buf->free_ptr = buf->read_ptr;
- for( int i = 0; i < count; i ++ ) {
- portBASE_TYPE ret = xSemaphoreGiveFromISR( buf->remain_cnt, yield );
- assert( ret == pdTRUE );
+ for(int i = 0; i < count; i++) {
+ portBASE_TYPE ret = xSemaphoreGiveFromISR(buf->remain_cnt, yield);
+ assert(ret == pdTRUE);
}
}
static inline void sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr)
{
- assert( sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr );
+ assert(sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr);
int size = (buf->read_ptr + buf->size - buf->free_ptr)%buf->size;
int count = size/buf->item_size;
- assert( count*buf->item_size==size);
+ assert(count*buf->item_size==size);
buf->free_ptr = buf->read_ptr;
- for( int i = 0; i < count; i ++ ) {
- portBASE_TYPE ret = xSemaphoreGive( buf->remain_cnt );
- assert( ret == pdTRUE );
+ for(int i = 0; i < count; i++) {
+ portBASE_TYPE ret = xSemaphoreGive(buf->remain_cnt);
+ assert(ret == pdTRUE);
}
}
static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf)
{
- if ( buf->read_ptr != buf->write_ptr ) {
+ if (buf->read_ptr != buf->write_ptr) {
return sdio_ringbuf_offset_ptr(buf, ringbuf_read_ptr, buf->item_size);
} else {
return NULL;
}
}
-static inline uint8_t* sdio_ringbuf_peek_rear( sdio_ringbuf_t *buf )
+static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf)
{
return buf->write_ptr;
}
-static inline bool sdio_ringbuf_empty( sdio_ringbuf_t* buf )
+static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf)
{
return (buf->read_ptr == buf->write_ptr? true : false);
}
static inline void show_ll(buf_desc_t *item)
{
- ESP_EARLY_LOGD( TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner );
- ESP_EARLY_LOGD( TAG, " buf: %p, stqe_next: %p, tqe-prev: %p", item->buf, item->qe.stqe_next, item->te.tqe_prev );
+ ESP_EARLY_LOGD(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner);
+ ESP_EARLY_LOGD(TAG, " buf: %p, stqe_next: %p, tqe-prev: %p", item->buf, item->qe.stqe_next, item->te.tqe_prev);
}
static void __attribute((unused)) dump_ll(buf_stailq_t *queue)
{
buf_desc_t *item = NULL;
- ESP_EARLY_LOGD( TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last );
- STAILQ_FOREACH( item, queue, qe ) {
+ ESP_EARLY_LOGD(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last);
+ STAILQ_FOREACH(item, queue, qe) {
show_ll(item);
}
}
static inline void deinit_context()
{
context.config = (sdio_slave_config_t){};
- for( int i = 0; i < 9; i ++ ) {
- if ( context.events[i] != NULL ) {
+ for(int i = 0; i < 9; i++) {
+ if (context.events[i] != NULL) {
vSemaphoreDelete(context.events[i]);
context.events[i] = NULL;
}
}
- if ( context.ret_queue != NULL ) {
+ if (context.ret_queue != NULL) {
vQueueDelete(context.ret_queue);
context.ret_queue = NULL;
}
- sdio_ringbuf_deinit( &context.sendbuf );
+ sdio_ringbuf_deinit(&context.sendbuf);
}
esp_err_t link_desc_to_last(uint8_t* desc, void* arg)
static esp_err_t init_ringbuf()
{
- esp_err_t ret = sdio_ringbuf_init( &context.sendbuf, sizeof(buf_desc_t), context.config.send_queue_size );
- if ( ret != ESP_OK ) return ret;
+ esp_err_t ret = sdio_ringbuf_init(&context.sendbuf, sizeof(buf_desc_t), context.config.send_queue_size);
+ if (ret != ESP_OK) return ret;
esp_err_t rcv_res;
buf_desc_t *first=NULL, *last=NULL;
//no copy for the first descriptor
- ret = sdio_ringbuf_send( &context.sendbuf, NULL, NULL, portMAX_DELAY);
- if ( ret != ESP_OK ) return ret;
+ ret = sdio_ringbuf_send(&context.sendbuf, NULL, NULL, portMAX_DELAY);
+ if (ret != ESP_OK) return ret;
//loop in the ringbuf to link all the desc one after another as a ring
- for ( int i = 0; i < context.config.send_queue_size+1; i++ ) {
- rcv_res = sdio_ringbuf_recv( &context.sendbuf, (uint8_t**)&last, NULL, RINGBUF_GET_ONE, 0 );
- assert ( rcv_res == ESP_OK );
- ret = sdio_ringbuf_send( &context.sendbuf, link_desc_to_last, last, portMAX_DELAY);
- if ( ret != ESP_OK ) return ret;
+ for (int i = 0; i < context.config.send_queue_size+1; i++) {
+ rcv_res = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&last, NULL, RINGBUF_GET_ONE, 0);
+ assert (rcv_res == ESP_OK);
+ ret = sdio_ringbuf_send(&context.sendbuf, link_desc_to_last, last, portMAX_DELAY);
+ if (ret != ESP_OK) return ret;
sdio_ringbuf_return(&context.sendbuf, (uint8_t*)last);
}
first = NULL;
last = NULL;
//clear the queue
- rcv_res = sdio_ringbuf_recv( &context.sendbuf, (uint8_t**)&first, (uint8_t**)&last, RINGBUF_GET_ALL, 0 );
- assert ( rcv_res == ESP_OK );
- assert( first == last ); //there should be only one desc remain
- sdio_ringbuf_return(&context.sendbuf, (uint8_t*)first );
+ rcv_res = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&first, (uint8_t**)&last, RINGBUF_GET_ALL, 0);
+ assert (rcv_res == ESP_OK);
+ assert(first == last); //there should be only one desc remain
+ sdio_ringbuf_return(&context.sendbuf, (uint8_t*)first);
return ESP_OK;
}
static esp_err_t init_context(sdio_slave_config_t *config)
{
- SDIO_SLAVE_CHECK( *(uint32_t*)&context.config == 0, "sdio slave already initialized", ESP_ERR_INVALID_STATE );
+ SDIO_SLAVE_CHECK(*(uint32_t*)&context.config == 0, "sdio slave already initialized", ESP_ERR_INVALID_STATE);
context.config = *config;
// in theory we can queue infinite buffers in the linked list, but for multi-core reason we have to use a queue to
// count the finished buffers.
- context.recv_event = xSemaphoreCreateCounting(UINT32_MAX, 0 );
- for( int i = 0; i < 9; i ++ ) {
- if ( i < 8 ) {
+ context.recv_event = xSemaphoreCreateCounting(UINT32_MAX, 0);
+ for(int i = 0; i < 9; i++) {
+ if (i < 8) {
context.events[i] = xSemaphoreCreateBinary();
} //for 8, already created.
- if ( context.events[i] == NULL ) {
- SDIO_SLAVE_LOGE( "event initialize failed");
+ if (context.events[i] == NULL) {
+ SDIO_SLAVE_LOGE("event initialize failed");
goto no_mem;
}
}
esp_err_t ret = init_ringbuf();
- if ( ret != ESP_OK ) goto no_mem;
+ if (ret != ESP_OK) goto no_mem;
- context.ret_queue = xQueueCreate( config->send_queue_size, sizeof(void*) );
- if ( context.ret_queue == NULL ) goto no_mem;
+ context.ret_queue = xQueueCreate(config->send_queue_size, sizeof(void*));
+ if (context.ret_queue == NULL) goto no_mem;
context.recv_link_list = (buf_stailq_t)STAILQ_HEAD_INITIALIZER(context.recv_link_list);
context.recv_reg_list = (buf_tailq_t)TAILQ_HEAD_INITIALIZER(context.recv_reg_list);
SLC.rx_dscr_conf.slc0_token_no_replace = 1;
HINF.cfg_data1.highspeed_enable = 1;
- switch( config->timing ) {
+ switch(config->timing) {
case SDIO_SLAVE_TIMING_PSEND_PSAMPLE:
HOST.conf.frc_sdio20 = 0xf;
HOST.conf.frc_sdio11 = 0;
intr_handle_t intr_handle = NULL;
const int flags = 0;
r = esp_intr_alloc(ETS_SLC0_INTR_SOURCE, flags, sdio_intr, NULL, &intr_handle);
- if (r != ESP_OK ) return r;
+ if (r != ESP_OK) return r;
r = sdio_slave_hw_init(config);
- if ( r != ESP_OK ) return r;
+ if (r != ESP_OK) return r;
r = init_context(config);
- if ( r != ESP_OK ) return r;
+ if (r != ESP_OK) return r;
context.intr_handle = intr_handle;
sdio_slave_reset();
esp_err_t ret;
HOST.slc0_int_clr.val = UINT32_MAX;//clear all interrupts
ret = send_start();
- if ( ret != ESP_OK ) return ret;
+ if (ret != ESP_OK) return ret;
ret = recv_start();
- if ( ret != ESP_OK ) return ret;
+ if (ret != ESP_OK) return ret;
HINF.cfg_data1.sdio_ioready1 = 1; //set IO ready to 1 to allow host to use
return ESP_OK;
}
{
uint32_t int_val = SLC.slc0_int_st.val;
uint32_t int_raw = SLC.slc0_int_raw.val;
- ESP_EARLY_LOGV( TAG, "sdio_intr: %08X(%08X)", int_val, int_raw );
+ ESP_EARLY_LOGV(TAG, "sdio_intr: %08X(%08X)", int_val, int_raw);
- if ( int_val & SDIO_SLAVE_SLC_INT_RX_MASK ) sdio_intr_send(arg);
- if ( int_val & SDIO_SLAVE_SLC_INT_TX_MASK ) sdio_intr_recv(arg);
- if ( int_val & SDIO_SLAVE_SLC_INT_HOST_MASK ) sdio_intr_host(arg);
+ if (int_val & SDIO_SLAVE_SLC_INT_RX_MASK) sdio_intr_send(arg);
+ if (int_val & SDIO_SLAVE_SLC_INT_TX_MASK) sdio_intr_recv(arg);
+ if (int_val & SDIO_SLAVE_SLC_INT_HOST_MASK) sdio_intr_host(arg);
}
/*---------------------------------------------------------------------------
portBASE_TYPE yield = pdFALSE;
SLC.slc0_int_clr.val = int_val;
- for( int i = 0; i < 8; i ++ ) {
- if ( BIT(i) & int_val ) {
- if ( context.config.event_cb != NULL ) (*context.config.event_cb)(i);
- xSemaphoreGiveFromISR( context.events[i], &yield );
+ for(int i = 0; i < 8; i++) {
+ if (BIT(i) & int_val) {
+ if (context.config.event_cb != NULL) (*context.config.event_cb)(i);
+ xSemaphoreGiveFromISR(context.events[i], &yield);
}
}
- if ( yield ) portYIELD_FROM_ISR();
+ if (yield) portYIELD_FROM_ISR();
}
esp_err_t sdio_slave_wait_int(int pos, TickType_t wait)
{
- SDIO_SLAVE_CHECK( pos >= 0 && pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
- return xSemaphoreTake( context.events[pos], wait );
+ SDIO_SLAVE_CHECK(pos >= 0 && pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
+ return xSemaphoreTake(context.events[pos], wait);
}
uint8_t sdio_slave_read_reg(int pos)
{
- if ( pos >= 28 && pos <= 31 ) SDIO_SLAVE_LOGW( "%s: interrupt reg, for reference", __FUNCTION__ );
- if ( pos < 0 || pos >= 64 ) SDIO_SLAVE_LOGE( "read register address wrong");
+ if (pos >= 28 && pos <= 31) SDIO_SLAVE_LOGW("%s: interrupt reg, for reference", __FUNCTION__);
+ if (pos < 0 || pos >= 64) SDIO_SLAVE_LOGE("read register address wrong");
return *(uint8_t*)(HOST_SLCHOST_CONF_W_REG(pos));
}
esp_err_t sdio_slave_write_reg(int pos, uint8_t reg)
{
- if ( pos >= 28 && pos <= 31 ) {
- SDIO_SLAVE_LOGE( "interrupt reg, please use sdio_slave_clear_int" );
+ if (pos >= 28 && pos <= 31) {
+ SDIO_SLAVE_LOGE("interrupt reg, please use sdio_slave_clear_int");
return ESP_ERR_INVALID_ARG;
}
- if ( pos < 0 || pos >= 64 ) {
- SDIO_SLAVE_LOGE( "write register address wrong");
+ if (pos < 0 || pos >= 64) {
+ SDIO_SLAVE_LOGE("write register address wrong");
return ESP_ERR_INVALID_ARG;
}
uint32_t addr = HOST_SLCHOST_CONF_W_REG(pos) & (~3);
uint32_t shift = (pos % 4)*8;
- portENTER_CRITICAL( &context.reg_spinlock );
+ portENTER_CRITICAL(&context.reg_spinlock);
int val = *(uint32_t*)addr;
*(uint32_t*)addr = (val & ~(0xff << shift)) | (reg<<shift);
- portEXIT_CRITICAL( &context.reg_spinlock );
+ portEXIT_CRITICAL(&context.reg_spinlock);
return ESP_OK;
}
SLC.intvec_tohost.slc0_intvec = mask;
}
-esp_err_t sdio_slave_send_host_int( uint8_t pos )
+esp_err_t sdio_slave_send_host_int(uint8_t pos)
{
- SDIO_SLAVE_CHECK( pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG );
+ SDIO_SLAVE_CHECK(pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
SLC.intvec_tohost.slc0_intvec = BIT(pos);
return ESP_OK;
}
*/
static inline void send_length_write(uint32_t len)
{
- SLC.slc0_len_conf.val = FIELD_TO_VALUE2( SLC_SLC0_LEN_WDATA, len ) | FIELD_TO_VALUE2( SLC_SLC0_LEN_WR, 1 );
- ESP_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", len, HOST.pkt_len.reg_slc0_len );
+ SLC.slc0_len_conf.val = FIELD_TO_VALUE2(SLC_SLC0_LEN_WDATA, len) | FIELD_TO_VALUE2(SLC_SLC0_LEN_WR, 1);
+ ESP_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", len, HOST.pkt_len.reg_slc0_len);
}
static inline void send_start_transmission(const void* desc)
static inline void send_isr_invoker_enable()
{
//force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared.
- send_start_transmission( &start_desc );
+ send_start_transmission(&start_desc);
//wait for rx_done
- while( !SLC.slc0_int_raw.rx_done );
+ while(!SLC.slc0_int_raw.rx_done);
HOST.slc0_int_clr.rx_new_packet = 1;
send_stop_ll_operation();
}
//start hw operation with existing data (if exist)
static esp_err_t send_start()
{
- SDIO_SLAVE_CHECK( send_get_state() == STATE_IDLE,
- "already started", ESP_ERR_INVALID_STATE );
+ SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE,
+ "already started", ESP_ERR_INVALID_STATE);
SLC.slc0_int_clr.rx_eof = 1;
- send_set_state( STATE_WAIT_FOR_START );
+ send_set_state(STATE_WAIT_FOR_START);
send_intr_enable();
return ESP_OK;
}
SLC.slc0_rx_link.stop = 1;
send_intr_disable();
- send_set_state( STATE_IDLE );
+ send_set_state(STATE_IDLE);
}
static inline esp_err_t send_isr_eof(portBASE_TYPE *yield)
// inform app to recycle descs
portBASE_TYPE ret = pdTRUE;
buf_desc_t *desc = context.in_flight;
- assert( desc != NULL );
+ assert(desc != NULL);
do {
ESP_EARLY_LOGV(TAG, "end: %x", desc->arg);
- ret = xQueueSendFromISR( context.ret_queue, &desc->arg, yield );
+ ret = xQueueSendFromISR(context.ret_queue, &desc->arg, yield);
assert(ret == pdTRUE);
buf_desc_t* next = STAILQ_NEXT(desc, qe);
desc = next;
} while(desc!=NULL);
- STAILQ_NEXT( context.in_flight_end, qe ) = context.in_flight_next;
+ STAILQ_NEXT(context.in_flight_end, qe) = context.in_flight_next;
sdio_ringbuf_return_from_isr(&context.sendbuf, (uint8_t*)context.in_flight, yield);
context.in_flight = NULL;
context.in_flight_end = NULL;
// Go to wait for packet state
- send_set_state( STATE_WAIT_FOR_START );
+ send_set_state(STATE_WAIT_FOR_START);
return ESP_OK;
}
esp_err_t ret;
buf_desc_t *start = NULL;
buf_desc_t *end = NULL;
- if ( context.config.sending_mode == SDIO_SLAVE_SEND_PACKET ) {
- ret = sdio_ringbuf_recv( &context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ONE, 0);
+ if (context.config.sending_mode == SDIO_SLAVE_SEND_PACKET) {
+ ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ONE, 0);
} else { //stream mode
- ret = sdio_ringbuf_recv( &context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ALL, 0);
+ ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ALL, 0);
}
- if ( ret == ESP_OK ) {
+ if (ret == ESP_OK) {
context.in_flight = start;
context.in_flight_end = end;
end->eof = 1;
assert(start_desc != NULL && end_desc != NULL);
send_stop_ll_operation();
- send_start_transmission( start_desc );
+ send_start_transmission(start_desc);
// update pkt_len register to allow host reading.
- send_length_write( end_desc->pkt_len );
+ send_length_write(end_desc->pkt_len);
- send_set_state( STATE_SENDING );
+ send_set_state(STATE_SENDING);
ESP_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %d", start_desc, end_desc, end_desc->pkt_len);
return ESP_OK;
portBASE_TYPE yield = pdFALSE;
// this interrupt is abused to get ISR invoked by app
- if ( SLC.slc0_int_st.rx_done ) SLC.slc0_int_ena.rx_done = 0;
+ if (SLC.slc0_int_st.rx_done) SLC.slc0_int_ena.rx_done = 0;
// Goto idle state (cur_start=NULL) if transmission done,
// also update sequence and recycle descs.
- if ( SLC.slc0_int_st.rx_eof ) {
+ if (SLC.slc0_int_st.rx_eof) {
SLC.slc0_int_clr.rx_eof = 1;
//check current state
- assert( send_get_state() == STATE_SENDING );// context.send_start != NOT_YET && context.send_end != NOT_YET );
+ assert(send_get_state() == STATE_SENDING);// context.send_start != NOT_YET && context.send_end != NOT_YET);
send_isr_eof(&yield);
}
// Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready.
// Note we may also enter this state by stopping sending in the app.
- if ( send_get_state() == STATE_WAIT_FOR_START ) {
- if ( context.in_flight == NULL ) send_isr_check_new_pkt(&yield);
+ if (send_get_state() == STATE_WAIT_FOR_START) {
+ if (context.in_flight == NULL) send_isr_check_new_pkt(&yield);
// Go to sending state (cur_start and cur_end != NULL) if has packet to send.
- if ( context.in_flight ) send_isr_new_packet();
+ if (context.in_flight) send_isr_new_packet();
}
- if ( yield ) portYIELD_FROM_ISR();
+ if (yield) portYIELD_FROM_ISR();
}
esp_err_t send_write_desc(uint8_t* desc, void* arg)
//copy and keep the link
STAILQ_NEXT(new_desc, qe) = STAILQ_NEXT((buf_desc_t*)desc, qe);
- memcpy( desc, new_desc, sizeof(buf_desc_t) );
+ memcpy(desc, new_desc, sizeof(buf_desc_t));
return ESP_OK;
}
esp_err_t sdio_slave_send_queue(uint8_t* addr, size_t len, void* arg, TickType_t wait)
{
- SDIO_SLAVE_CHECK( len > 0, "len <= 0", ESP_ERR_INVALID_ARG );
- SDIO_SLAVE_CHECK( esp_ptr_dma_capable(addr) && (uint32_t)addr%4==0, "buffer to send should be DMA capable and 32-bit aligned",
+ SDIO_SLAVE_CHECK(len > 0, "len <= 0", ESP_ERR_INVALID_ARG);
+ SDIO_SLAVE_CHECK(esp_ptr_dma_capable(addr) && (uint32_t)addr%4==0, "buffer to send should be DMA capable and 32-bit aligned",
ESP_ERR_INVALID_ARG);
buf_desc_t new_desc = {
};
esp_err_t ret = sdio_ringbuf_send(&context.sendbuf, send_write_desc, &new_desc, wait);
- if ( ret != ESP_OK ) return ret;
+ if (ret != ESP_OK) return ret;
send_isr_invoke();
return ESP_OK;
esp_err_t sdio_slave_send_get_finished(void** arg, TickType_t wait)
{
- portBASE_TYPE err = xQueueReceive( context.ret_queue, arg, wait );
- if ( err != pdTRUE ) return ESP_ERR_TIMEOUT;
+ portBASE_TYPE err = xQueueReceive(context.ret_queue, arg, wait);
+ if (err != pdTRUE) return ESP_ERR_TIMEOUT;
return ESP_OK;
}
uint32_t timestamp = XTHAL_GET_CCOUNT();
uint32_t ret_stamp;
- esp_err_t err = sdio_slave_send_queue( addr, len, (void*)timestamp, portMAX_DELAY );
- if ( err != ESP_OK ) return err;
- err = sdio_slave_send_get_finished( (void**)&ret_stamp, portMAX_DELAY );
- if ( err != ESP_OK ) return err;
- SDIO_SLAVE_CHECK( ret_stamp == timestamp, "already sent without return before", ESP_ERR_INVALID_STATE);
+ esp_err_t err = sdio_slave_send_queue(addr, len, (void*)timestamp, portMAX_DELAY);
+ if (err != ESP_OK) return err;
+ err = sdio_slave_send_get_finished((void**)&ret_stamp, portMAX_DELAY);
+ if (err != ESP_OK) return err;
+ SDIO_SLAVE_CHECK(ret_stamp == timestamp, "already sent without return before", ESP_ERR_INVALID_STATE);
return ESP_OK;
}
static esp_err_t send_flush_data()
{
//only works in idle state / wait to send state
- SDIO_SLAVE_CHECK( send_get_state() == STATE_IDLE,
- "flush data when transmission started", ESP_ERR_INVALID_STATE );
+ SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE,
+ "flush data when transmission started", ESP_ERR_INVALID_STATE);
HOST.slc0_int_clr.rx_new_packet = 1;
buf_desc_t *last = NULL;
- if ( context.in_flight ) {
+ if (context.in_flight) {
buf_desc_t *desc = context.in_flight;
- while( desc != NULL ) {
- xQueueSend( context.ret_queue, desc->arg, portMAX_DELAY );
+ while(desc != NULL) {
+ xQueueSend(context.ret_queue, desc->arg, portMAX_DELAY);
last = desc;
desc = STAILQ_NEXT(desc, qe);
}
- STAILQ_NEXT( context.in_flight_end, qe ) = context.in_flight_next;
- sdio_ringbuf_return( &context.sendbuf, (uint8_t*)context.in_flight );
+ STAILQ_NEXT(context.in_flight_end, qe) = context.in_flight_next;
+ sdio_ringbuf_return(&context.sendbuf, (uint8_t*)context.in_flight);
context.in_flight = NULL;
context.in_flight_end = NULL;
}
buf_desc_t *head;
esp_err_t ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&head, NULL, RINGBUF_GET_ALL, 0);
- if ( ret == ESP_OK ) {
+ if (ret == ESP_OK) {
buf_desc_t *desc = head;
- while( desc != NULL ) {
- xQueueSend( context.ret_queue, desc->arg, portMAX_DELAY );
+ while(desc != NULL) {
+ xQueueSend(context.ret_queue, desc->arg, portMAX_DELAY);
last = desc;
desc = STAILQ_NEXT(desc, qe);
}
- sdio_ringbuf_return( &context.sendbuf, (uint8_t*)head );
+ sdio_ringbuf_return(&context.sendbuf, (uint8_t*)head);
}
// if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to
// send never queued.
// Go to idle state (cur_end!=NULL and cur_start=NULL)
- send_set_state( STATE_IDLE );
+ send_set_state(STATE_IDLE);
- if ( last == NULL ) last = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf);
+ if (last == NULL) last = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf);
last->pkt_len = send_length_read();
return ESP_OK;
}
//clear counter but keep data
static esp_err_t send_reset_counter()
{
- SDIO_SLAVE_CHECK( send_get_state() == STATE_IDLE,
- "reset counter when transmission started", ESP_ERR_INVALID_STATE );
+ SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE,
+ "reset counter when transmission started", ESP_ERR_INVALID_STATE);
- send_length_write( 0 );
+ send_length_write(0);
uint32_t last_cnt=0;
buf_desc_t *desc = context.in_flight;
buf_desc_t *last = NULL;
- while( desc != NULL ) {
+ while(desc != NULL) {
last_cnt += desc->length;
desc->pkt_len = last_cnt;
last = desc;
// in theory the desc should be the one right next to the last of in_flight,
// but the link of last is NULL, so get the desc from the ringbuf directly.
desc = (buf_desc_t*)sdio_ringbuf_peek_front(&context.sendbuf);
- while( desc != NULL ) {
+ while(desc != NULL) {
last_cnt += desc->length;
desc->pkt_len = last_cnt;
last = desc;
desc = STAILQ_NEXT(desc, qe);
}
- if ( last == NULL ) {
+ if (last == NULL) {
last = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf);
last->pkt_len = 0;
}
*--------------------------------------------------------------------------*/
//strange but the registers for host->slave transfers are really called "tx*".
-#define CHECK_HANDLE_IDLE(desc) do { if ( desc == NULL || !desc->not_receiving ) {\
+#define CHECK_HANDLE_IDLE(desc) do { if (desc == NULL || !desc->not_receiving) {\
return ESP_ERR_INVALID_ARG; } } while(0)
static inline void critical_enter_recv()
{
- portENTER_CRITICAL( &context.recv_spinlock );
+ portENTER_CRITICAL(&context.recv_spinlock);
}
static inline void critical_exit_recv()
{
- portEXIT_CRITICAL( &context.recv_spinlock );
+ portEXIT_CRITICAL(&context.recv_spinlock);
}
static inline void recv_size_inc()
{
// fields wdata and inc_more should be written by the same instruction.
- SLC.slc0_token1.val = FIELD_TO_VALUE2( SLC_SLC0_TOKEN1_WDATA, 1) | FIELD_TO_VALUE2( SLC_SLC0_TOKEN1_INC_MORE, 1 );
+ SLC.slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 1) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_INC_MORE, 1);
}
static inline void recv_size_reset()
{
- SLC.slc0_token1.val = FIELD_TO_VALUE2( SLC_SLC0_TOKEN1_WDATA, 0) | FIELD_TO_VALUE2( SLC_SLC0_TOKEN1_WR, 1 );
+ SLC.slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 0) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WR, 1);
}
static inline buf_desc_t* recv_get_first_empty_buf()
{
buf_stailq_t *const queue = &context.recv_link_list;
buf_desc_t *desc = STAILQ_FIRST(queue);
- while( desc && desc->owner == 0 ) {
- desc = STAILQ_NEXT( desc, qe );
+ while(desc && desc->owner == 0) {
+ desc = STAILQ_NEXT(desc, qe);
}
return desc;
}
critical_enter_recv();
buf_desc_t *desc = recv_get_first_empty_buf();
- if ( !desc ) {
+ if (!desc) {
ESP_LOGD(TAG, "recv: restart without desc");
critical_exit_recv();
return ESP_OK; // if no buffer loaded, return directly.
critical_enter_recv();
buf_desc_t *desc = recv_get_first_empty_buf();
- while ( desc != NULL ) {
- assert( desc->owner == 1 );
+ while (desc != NULL) {
+ assert(desc->owner == 1);
recv_size_inc();
- desc = STAILQ_NEXT( desc, qe );
+ desc = STAILQ_NEXT(desc, qe);
}
critical_exit_recv();
}
critical_enter_recv();
while(1) {
- portBASE_TYPE ret = xSemaphoreTake( context.recv_event, 0 );
- if ( ret == pdFALSE ) break;
+ portBASE_TYPE ret = xSemaphoreTake(context.recv_event, 0);
+ if (ret == pdFALSE) break;
buf_desc_t *desc = STAILQ_FIRST(queue);
- assert ( desc != NULL && desc->owner == 0 );
+ assert (desc != NULL && desc->owner == 0);
STAILQ_REMOVE_HEAD(queue, qe);
desc->owner = 1;
- STAILQ_INSERT_TAIL( queue, desc, qe );
+ STAILQ_INSERT_TAIL(queue, desc, qe);
recv_size_inc();
//we only add it to the tail here, without start the DMA nor increase buffer num.
}
static void sdio_intr_recv(void* arg)
{
portBASE_TYPE yield = 0;
- if ( SLC.slc0_int_raw.tx_done ) {
+ if (SLC.slc0_int_raw.tx_done) {
SLC.slc0_int_clr.tx_done = 1;
- while ( context.recv_cur_ret && context.recv_cur_ret->owner == 0 ) {
+ while (context.recv_cur_ret && context.recv_cur_ret->owner == 0) {
// This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty,
// in this case the ``tx_done`` should happen no longer until new desc is appended.
// The app is responsible to place the pointer to the right place again when appending new desc.
- context.recv_cur_ret = STAILQ_NEXT( context.recv_cur_ret, qe );
- ESP_EARLY_LOGV( TAG, "intr_recv: Give");
- xSemaphoreGiveFromISR( context.recv_event, &yield );
+ context.recv_cur_ret = STAILQ_NEXT(context.recv_cur_ret, qe);
+ ESP_EARLY_LOGV(TAG, "intr_recv: Give");
+ xSemaphoreGiveFromISR(context.recv_event, &yield);
};
}
- if ( yield ) portYIELD_FROM_ISR();
+ if (yield) portYIELD_FROM_ISR();
}
esp_err_t sdio_slave_recv_load_buf(sdio_slave_buf_handle_t handle)
{
buf_desc_t *desc = (buf_desc_t*)handle;
- CHECK_HANDLE_IDLE( desc );
+ CHECK_HANDLE_IDLE(desc);
buf_stailq_t *const queue = &context.recv_link_list;
critical_enter_recv();
- TAILQ_REMOVE( &context.recv_reg_list, desc, te );
+ TAILQ_REMOVE(&context.recv_reg_list, desc, te);
desc->owner = 1;
desc->not_receiving = 0; //manually remove the prev link (by set not_receiving=0), to indicate this is in the queue
buf_desc_t *const tail = STAILQ_LAST(queue, buf_desc_s, qe);
- STAILQ_INSERT_TAIL( queue, desc, qe );
+ STAILQ_INSERT_TAIL(queue, desc, qe);
if (tail == NULL || (tail->owner == 0)) {
//in this case we have to set the ret pointer
if (tail != NULL) {
sdio_slave_buf_handle_t sdio_slave_recv_register_buf(uint8_t *start)
{
- SDIO_SLAVE_CHECK( esp_ptr_dma_capable(start) && (uint32_t)start%4==0,
+ SDIO_SLAVE_CHECK(esp_ptr_dma_capable(start) && (uint32_t)start%4==0,
"buffer to register should be DMA capable and 32-bit aligned", NULL);
buf_desc_t *desc = (buf_desc_t*)malloc(sizeof(buf_desc_t));
- if ( desc == NULL ) {
- SDIO_SLAVE_LOGE( "cannot allocate lldesc for new buffer" );
+ if (desc == NULL) {
+ SDIO_SLAVE_LOGE("cannot allocate lldesc for new buffer");
return NULL;
}
//no length required, eof always=0
};
critical_enter_recv();
- TAILQ_INSERT_TAIL( &context.recv_reg_list, desc, te );
+ TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te);
critical_exit_recv();
return desc;
}
-esp_err_t sdio_slave_recv(sdio_slave_buf_handle_t* handle_ret, uint8_t **start_o, size_t *len_o, TickType_t wait)
+esp_err_t sdio_slave_recv(sdio_slave_buf_handle_t* handle_ret, uint8_t **out_addr, size_t *out_len, TickType_t wait)
{
- SDIO_SLAVE_CHECK( handle_ret != NULL, "handle address cannot be 0", ESP_ERR_INVALID_ARG);
- portBASE_TYPE ret = xSemaphoreTake( context.recv_event, wait );
- if ( ret == pdFALSE ) return ESP_ERR_TIMEOUT;
+ SDIO_SLAVE_CHECK(handle_ret != NULL, "handle address cannot be 0", ESP_ERR_INVALID_ARG);
+ portBASE_TYPE ret = xSemaphoreTake(context.recv_event, wait);
+ if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
buf_stailq_t *const queue = &context.recv_link_list;
//remove from queue, add back to reg list.
buf_desc_t *desc = STAILQ_FIRST(queue);
STAILQ_REMOVE_HEAD(queue, qe);
- TAILQ_INSERT_TAIL( &context.recv_reg_list, desc, te );
+ TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te);
critical_exit_recv();
- assert( desc != NULL && desc->owner == 0 );
+ assert(desc != NULL && desc->owner == 0);
*handle_ret = (sdio_slave_buf_handle_t)desc;
- if ( start_o ) *start_o = desc->buf;
- if ( len_o ) *len_o = desc->length;
+ if (out_addr) *out_addr = desc->buf;
+ if (out_len) *out_len = desc->length;
return ESP_OK;
}
esp_err_t sdio_slave_recv_unregister_buf(sdio_slave_buf_handle_t handle)
{
buf_desc_t *desc = (buf_desc_t*)handle;
- CHECK_HANDLE_IDLE( desc ); //in the queue, fail.
+ CHECK_HANDLE_IDLE(desc); //in the queue, fail.
critical_enter_recv();
- TAILQ_REMOVE( &context.recv_reg_list, desc, te );
+ TAILQ_REMOVE(&context.recv_reg_list, desc, te);
critical_exit_recv();
free(desc);
return ESP_OK;
}
-uint8_t* sdio_slave_recv_get_buf( sdio_slave_buf_handle_t handle, size_t *len_o )
+uint8_t* sdio_slave_recv_get_buf(sdio_slave_buf_handle_t handle, size_t *len_o)
{
buf_desc_t *desc = (buf_desc_t*)handle;
- if ( handle == NULL ) return NULL;
+ if (handle == NULL) return NULL;
- if ( len_o!= NULL ) *len_o= desc->length;
+ if (len_o!= NULL) *len_o= desc->length;
return desc->buf;
}