//If we fail to push data to ring buffer, we will have to stash the data, and send next time.
//Mainly for applications that uses flow control or small ring buffer.
if(pdFALSE == xRingbufferSendFromISR(p_uart->rx_ring_buf, p_uart->rx_data_buf, p_uart->rx_stash_len, &HPTaskAwoken)) {
+ p_uart->rx_buffer_full_flg = true;
uart_disable_intr_mask(uart_num, UART_RXFIFO_TOUT_INT_ENA_M | UART_RXFIFO_FULL_INT_ENA_M);
if (uart_event.type == UART_PATTERN_DET) {
if (rx_fifo_len < pat_num) {
}
}
uart_event.type = UART_BUFFER_FULL;
- p_uart->rx_buffer_full_flg = true;
} else {
UART_ENTER_CRITICAL_ISR(&uart_spinlock[uart_num]);
if (uart_intr_status & UART_AT_CMD_CHAR_DET_INT_ST_M) {
return uart_tx_all(uart_num, src, size, 1, brk_len);
}
+static bool uart_check_buf_full(uart_port_t uart_num)
+{
+ if(p_uart_obj[uart_num]->rx_buffer_full_flg) {
+ BaseType_t res = xRingbufferSend(p_uart_obj[uart_num]->rx_ring_buf, p_uart_obj[uart_num]->rx_data_buf, p_uart_obj[uart_num]->rx_stash_len, 1);
+ if(res == pdTRUE) {
+ UART_ENTER_CRITICAL(&uart_spinlock[uart_num]);
+ p_uart_obj[uart_num]->rx_buffered_len += p_uart_obj[uart_num]->rx_stash_len;
+ p_uart_obj[uart_num]->rx_buffer_full_flg = false;
+ UART_EXIT_CRITICAL(&uart_spinlock[uart_num]);
+ uart_enable_rx_intr(p_uart_obj[uart_num]->uart_num);
+ return true;
+ }
+ }
+ return false;
+}
+
int uart_read_bytes(uart_port_t uart_num, uint8_t* buf, uint32_t length, TickType_t ticks_to_wait)
{
UART_CHECK((uart_num < UART_NUM_MAX), "uart_num error", (-1));
p_uart_obj[uart_num]->rx_ptr = data;
p_uart_obj[uart_num]->rx_cur_remain = size;
} else {
- xSemaphoreGive(p_uart_obj[uart_num]->rx_mux);
- return copy_len;
+ //When using dual cores, `rx_buffer_full_flg` may read and write on different cores at same time,
+ //which may lose synchronization. So we also need to call `uart_check_buf_full` once when ringbuffer is empty
+ //to solve the possible asynchronous issues.
+ if(uart_check_buf_full(uart_num)) {
+ //This condition will never be true if `uart_read_bytes`
+ //and `uart_rx_intr_handler_default` are scheduled on the same core.
+ continue;
+ } else {
+ xSemaphoreGive(p_uart_obj[uart_num]->rx_mux);
+ return copy_len;
+ }
}
}
if(p_uart_obj[uart_num]->rx_cur_remain > length) {
vRingbufferReturnItem(p_uart_obj[uart_num]->rx_ring_buf, p_uart_obj[uart_num]->rx_head_ptr);
p_uart_obj[uart_num]->rx_head_ptr = NULL;
p_uart_obj[uart_num]->rx_ptr = NULL;
- if(p_uart_obj[uart_num]->rx_buffer_full_flg) {
- BaseType_t res = xRingbufferSend(p_uart_obj[uart_num]->rx_ring_buf, p_uart_obj[uart_num]->rx_data_buf, p_uart_obj[uart_num]->rx_stash_len, 1);
- if(res == pdTRUE) {
- UART_ENTER_CRITICAL(&uart_spinlock[uart_num]);
- p_uart_obj[uart_num]->rx_buffered_len += p_uart_obj[uart_num]->rx_stash_len;
- p_uart_obj[uart_num]->rx_buffer_full_flg = false;
- UART_EXIT_CRITICAL(&uart_spinlock[uart_num]);
- uart_enable_rx_intr(p_uart_obj[uart_num]->uart_num);
- }
- }
+ uart_check_buf_full(uart_num);
}
}