1 // Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
19 The whole SDIO slave peripheral consists of three parts: the registers (including the control registers of
20 interrupts and shared registers), the sending FIFO and the receving FIFO. A document ``esp_slave_protocol.rst``
21 describes the functionality of the peripheral detailedly.
22 The host can access only one of those parts at once, and the hardware functions of these parts are totally
23 independent. Hence this driver is designed into these three independent parts. The shared registers are quite
24 simple. As well as the interrupts: when a slave interrupt is written by the host, the slave gets an interrupt;
25 when one of the host interrupt bits is active, slave hardware output interrupt signals on the DAT1 line.
27 For the FIFOs, the peripheral provides counters as registers so that the host can always know whether the slave
28 is ready to send/receive data. The driver resets the counters during initialization, and the host should somehow
29 inform the slave to reset the counters again if it should reboot (or lose the counter value for some reasons).
30 Then the host can read/write the FIFOs by CMD53 commands according to the counters.
32 Since we don't want to copy all the data from the buffer each time we use sending/receving buffer,
33 the buffers are directly loaded onto the sending/receiving linked-list and taken off only after use.
34 Hence the driver takes ownership of the buffer when the buffer is fed to the driver.
36 The driver returns the ownership of buffers when a "finish" function is called. When the hardware finishes
37 the sending/receiving of a buffer, the ISR is invoked and it goes through the linked-list to see how many buffers
38 are freed after last interrupt, and send corresponding signals to the app.
40 The driver of FIFOs works as below:
42 1. The receive driver requires application to "register" a buffer before it's used. The driver
43 dynamically allocate a linked-list descriptor for the buffer, and return the descriptor as a handle
46 Each time the app asks to receive by a buffer, the descriptor of the buffer is loaded onto the linked-list,
47 and the counter of receiving buffers is inreased so that the host will know this by the receiving interrupt.
48 The hardware will automatically go through the linked list and write data into the buffers loaded on the
51 The receiving driver sends a counting semaphore to the app for each buffer finished receiving. A task can only
52 check the linked list and fetch one finished buffer for a received semaphore.
54 2. The sending driver is slightly different due to different hardware working styles.
55 (TODO: re-write this part if the stitch mode is released)
56 The hardware has a cache, so that once a descriptor is loaded onto the linked-list, it cannot be modified
57 until returned (used) by the hardware. This forbids us from loading descriptors onto the linked list during
58 the transfer (or the time waiting for host to start a transfer). However, we use a "ringbuffer" (different from
59 the one in ``freertos/`` folder) holding descriptors to solve this:
61 1. The driver allocates continuous memory for several buffer descriptors (the maximum buffer number) during
62 initialization. Then the driver points the STAILQ_NEXT pointer of all the descriptors except the last one
63 to the next descriptor of each of them. Then the pointer of the last descriptor points back to the first one:
64 now the descriptor is in a ring.
66 2. The "ringbuffer" has a write pointer points to where app can write new descriptor. The app writes the new descriptor
67 indicated by the write pointer without touching the STAILQ_NEXT pointer so that the descriptors are always in a
68 ring-like linked-list. The app never touches the part of linked-list being used by the hardware.
70 3. When the hardware needs some data to send, it automatically pick a part of connected descriptors. According to the mode:
71 - Buffer mode: only pick the next one of the last sent one;
72 - Stream mode: pick the one above to the latest one.
74 The driver removes the STAILQ_NEXT pointer of the last descriptor and put the head of the part to the DMA controller so
75 that it looks like just a linear linked-list rather than a ring to the hardware.
77 4. The counter of sending FIFO can increase when app load new buffers (in STREAM_MODE) or when new transfer should
78 start (in PACKET_MODE).
80 5. When the sending transfer is finished, the driver goes through the descriptors just send in the ISR and push all
81 the ``arg`` member of descriptors to the queue back to the app, so that the app can handle finished buffers. The
82 driver also fix the STAILQ_NEXT pointer of the last descriptor so that the descriptors are now in a ring again.
88 #include "driver/sdio_slave.h"
89 #include "soc/sdio_slave_periph.h"
90 #include "rom/lldesc.h"
92 #include "esp_intr_alloc.h"
93 #include "freertos/FreeRTOS.h"
94 #include "soc/dport_access.h"
95 #include "soc/dport_reg.h"
96 #include "soc/io_mux_reg.h"
97 #include "freertos/semphr.h"
98 #include "xtensa/core-macros.h"
99 #include "driver/periph_ctrl.h"
102 #define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
103 SDIO_SLAVE_LOGE("%s", str);\
107 #define SDIO_SLAVE_LOGE(s, ...) ESP_LOGE(TAG, "%s:%d (%s):"s, __FILE__,__LINE__,__FUNCTION__,##__VA_ARGS__)
108 #define SDIO_SLAVE_LOGW(s, ...) ESP_LOGW(TAG, "%s: "s, __FUNCTION__,##__VA_ARGS__)
111 static const char TAG[] = "sdio_slave";
115 STATE_WAIT_FOR_START = 2,
119 // first 3 WORDs of this struct is defined by and compatible to the DMA link list format.
120 // sdio_slave_buf_handle_t is of type buf_desc_t*;
121 typedef struct buf_desc_s{
122 volatile uint32_t size :12,
124 offset: 5, /* h/w reserved 5bit, s/w use it as offset in buffer */
125 sosf : 1, /* start of sub-frame */
126 eof : 1, /* end of frame */
127 owner : 1; /* hw or sw */
130 TAILQ_ENTRY(buf_desc_s) te; // tailq used by receving
132 STAILQ_ENTRY(buf_desc_s) qe; // stailq used by sending and receiving
135 // shared with the tqe_prev in tailq, happen to be non-zero in the stailq. only
136 // write to 0 when removed from tailq, set to other will bring invalid pointer.
137 uint32_t not_receiving;
141 void* arg; /* to hold some parameters */
144 typedef STAILQ_HEAD(bufdesc_stailq_head_s, buf_desc_s) buf_stailq_t;
145 typedef TAILQ_HEAD(bufdesc_tailq_head_s, buf_desc_s) buf_tailq_t;
154 portMUX_TYPE write_spinlock;
155 SemaphoreHandle_t remain_cnt;
158 #define offset_of(type, field) ((unsigned int)&(((type *)(0))->field))
160 ringbuf_write_ptr = offset_of(sdio_ringbuf_t, write_ptr),
161 ringbuf_read_ptr = offset_of(sdio_ringbuf_t, read_ptr),
162 ringbuf_free_ptr = offset_of(sdio_ringbuf_t, free_ptr),
163 } sdio_ringbuf_pointer_t;
165 #define SDIO_RINGBUF_INITIALIZER() (sdio_ringbuf_t){.write_spinlock = portMUX_INITIALIZER_UNLOCKED,}
168 sdio_slave_config_t config;
169 intr_handle_t intr_handle; //allocated interrupt handle
170 /*------- events ---------------*/
172 SemaphoreHandle_t events[9]; // 0-7 for gp intr
174 SemaphoreHandle_t _events[8];
175 SemaphoreHandle_t recv_event; // 8 for recv
178 portMUX_TYPE reg_spinlock;
179 /*------- sending ---------------*/
180 //desc in the send_link_list are temporary, taken information and space from the ringbuf, return to ringbuf after use.
181 send_state_t send_state;
182 sdio_ringbuf_t sendbuf;
183 QueueHandle_t ret_queue;
184 buf_desc_t* in_flight;
185 buf_desc_t* in_flight_end;
186 buf_desc_t* in_flight_next;
187 /*------- receiving ---------------*/
188 buf_stailq_t recv_link_list; // now ready to/already hold data
189 buf_tailq_t recv_reg_list; // removed from the link list, registered but not used now
190 volatile buf_desc_t* recv_cur_ret; // next desc to return, NULL if all loaded descriptors are returned
191 portMUX_TYPE recv_spinlock;
194 static sdio_context_t context = {
196 /*------- events ---------------*/
198 .reg_spinlock = portMUX_INITIALIZER_UNLOCKED,
199 /*------- sending ---------------*/
200 .send_state = STATE_IDLE,
201 .sendbuf = SDIO_RINGBUF_INITIALIZER(),
204 .in_flight_end = NULL,
205 .in_flight_next = NULL,
206 /*------- receiving ---------------*/
207 .recv_link_list = STAILQ_HEAD_INITIALIZER(context.recv_link_list),
208 .recv_reg_list = TAILQ_HEAD_INITIALIZER(context.recv_reg_list),
209 .recv_cur_ret = NULL,
210 .recv_spinlock = portMUX_INITIALIZER_UNLOCKED,
213 static void sdio_intr(void*);
214 static void sdio_intr_host(void*);
215 static void sdio_intr_send(void*);
216 static void sdio_intr_recv(void*);
218 static esp_err_t send_flush_data();
219 static esp_err_t send_reset_counter();
220 static void recv_flush_data();
221 static void recv_reset_counter();
223 static esp_err_t send_start();
224 static void send_stop();
225 static esp_err_t recv_start();
226 static void recv_stop();
228 static void deinit_context();
231 /**************** Ring buffer for SDIO use *****************/
237 static void sdio_ringbuf_deinit(sdio_ringbuf_t* buf)
239 if (buf->remain_cnt != NULL) vSemaphoreDelete(buf->remain_cnt);
240 if (buf->data != NULL) free(buf->data);
241 *buf = SDIO_RINGBUF_INITIALIZER();
244 static esp_err_t sdio_ringbuf_init(sdio_ringbuf_t* buf, int item_size, int item_cnt)
246 if (buf->data != NULL) {
247 SDIO_SLAVE_LOGE("sdio_ringbuf_init: already initialized");
248 return ESP_ERR_INVALID_STATE;
250 buf->item_size = item_size;
251 //one item is not used.
252 buf->size = item_size * (item_cnt+1);
253 //apply for resources
254 buf->data = (uint8_t*)malloc(buf->size);
255 if (buf->data == NULL) goto no_mem;
256 buf->remain_cnt = xSemaphoreCreateCounting(item_cnt, item_cnt);
257 if (buf->remain_cnt == NULL) goto no_mem;
258 //initialize pointers
259 buf->write_ptr = buf->data;
260 buf->read_ptr = buf->data;
261 buf->free_ptr = buf->data;
264 sdio_ringbuf_deinit(buf);
265 return ESP_ERR_NO_MEM;
268 //calculate a pointer with offset to a original pointer of the specific ringbuffer
269 static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset)
271 uint8_t *buf_ptr = (uint8_t*)*(uint32_t*)(((uint8_t*)buf)+ptr); //get the specific pointer of the buffer
272 uint8_t *offset_ptr=buf_ptr+offset;
273 if (offset_ptr >= buf->data + buf->size) offset_ptr -= buf->size;
277 static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t* buf, esp_err_t (*copy_callback)(uint8_t*, void*), void* arg, TickType_t wait)
279 portBASE_TYPE ret = xSemaphoreTake(buf->remain_cnt, wait);
280 if (ret != pdTRUE) return ESP_ERR_TIMEOUT;
282 portENTER_CRITICAL(&buf->write_spinlock);
283 uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, ringbuf_write_ptr, buf->item_size);
284 esp_err_t err = ESP_OK;
285 if (copy_callback) (*copy_callback)(get_ptr, arg);
287 portEXIT_CRITICAL(&buf->write_spinlock);
290 buf->write_ptr = get_ptr;
291 portEXIT_CRITICAL(&buf->write_spinlock);
295 // this ringbuf is a return-before-recv-again strategy
296 // since this is designed to be called in the ISR, no parallel logic
297 static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t* buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all, TickType_t wait)
299 assert(buf->free_ptr == buf->read_ptr); //must return before recv again
300 assert(wait == 0); //only implement wait = 0 case now
301 if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output
302 if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data
304 uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, ringbuf_read_ptr, buf->item_size);
306 if (get_all != RINGBUF_GET_ONE) {
307 buf->read_ptr = buf->write_ptr;
309 buf->read_ptr = get_start;
312 if (start != NULL) *start = get_start;
313 if (end != NULL) *end = buf->read_ptr;
317 static inline void sdio_ringbuf_return_from_isr(sdio_ringbuf_t* buf, uint8_t *ptr, portBASE_TYPE *yield)
319 assert(sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr);
320 int size = (buf->read_ptr + buf->size - buf->free_ptr)%buf->size;
321 int count = size/buf->item_size;
322 assert(count*buf->item_size==size);
323 buf->free_ptr = buf->read_ptr;
324 for(int i = 0; i < count; i++) {
325 portBASE_TYPE ret = xSemaphoreGiveFromISR(buf->remain_cnt, yield);
326 assert(ret == pdTRUE);
330 static inline void sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr)
332 assert(sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr);
333 int size = (buf->read_ptr + buf->size - buf->free_ptr)%buf->size;
334 int count = size/buf->item_size;
335 assert(count*buf->item_size==size);
336 buf->free_ptr = buf->read_ptr;
337 for(int i = 0; i < count; i++) {
338 portBASE_TYPE ret = xSemaphoreGive(buf->remain_cnt);
339 assert(ret == pdTRUE);
343 static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf)
345 if (buf->read_ptr != buf->write_ptr) {
346 return sdio_ringbuf_offset_ptr(buf, ringbuf_read_ptr, buf->item_size);
352 static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf)
354 return buf->write_ptr;
357 static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf)
359 return (buf->read_ptr == buf->write_ptr? true : false);
361 /**************** End of Ring buffer for SDIO *****************/
363 static inline void show_ll(buf_desc_t *item)
365 ESP_EARLY_LOGD(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner);
366 ESP_EARLY_LOGD(TAG, " buf: %p, stqe_next: %p, tqe-prev: %p", item->buf, item->qe.stqe_next, item->te.tqe_prev);
369 static void __attribute((unused)) dump_ll(buf_stailq_t *queue)
371 buf_desc_t *item = NULL;
372 ESP_EARLY_LOGD(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last);
373 STAILQ_FOREACH(item, queue, qe) {
378 static inline void deinit_context()
380 context.config = (sdio_slave_config_t){};
381 for(int i = 0; i < 9; i++) {
382 if (context.events[i] != NULL) {
383 vSemaphoreDelete(context.events[i]);
384 context.events[i] = NULL;
387 if (context.ret_queue != NULL) {
388 vQueueDelete(context.ret_queue);
389 context.ret_queue = NULL;
391 sdio_ringbuf_deinit(&context.sendbuf);
394 esp_err_t link_desc_to_last(uint8_t* desc, void* arg)
396 STAILQ_NEXT((buf_desc_t*)arg, qe) = (buf_desc_t*)desc;
400 static esp_err_t init_ringbuf()
402 esp_err_t ret = sdio_ringbuf_init(&context.sendbuf, sizeof(buf_desc_t), context.config.send_queue_size);
403 if (ret != ESP_OK) return ret;
406 buf_desc_t *first=NULL, *last=NULL;
408 //no copy for the first descriptor
409 ret = sdio_ringbuf_send(&context.sendbuf, NULL, NULL, portMAX_DELAY);
410 if (ret != ESP_OK) return ret;
412 //loop in the ringbuf to link all the desc one after another as a ring
413 for (int i = 0; i < context.config.send_queue_size+1; i++) {
414 rcv_res = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&last, NULL, RINGBUF_GET_ONE, 0);
415 assert (rcv_res == ESP_OK);
416 ret = sdio_ringbuf_send(&context.sendbuf, link_desc_to_last, last, portMAX_DELAY);
417 if (ret != ESP_OK) return ret;
418 sdio_ringbuf_return(&context.sendbuf, (uint8_t*)last);
423 rcv_res = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&first, (uint8_t**)&last, RINGBUF_GET_ALL, 0);
424 assert (rcv_res == ESP_OK);
425 assert(first == last); //there should be only one desc remain
426 sdio_ringbuf_return(&context.sendbuf, (uint8_t*)first);
430 static esp_err_t init_context(sdio_slave_config_t *config)
432 SDIO_SLAVE_CHECK(*(uint32_t*)&context.config == 0, "sdio slave already initialized", ESP_ERR_INVALID_STATE);
434 context.config = *config;
436 // in theory we can queue infinite buffers in the linked list, but for multi-core reason we have to use a queue to
437 // count the finished buffers.
438 context.recv_event = xSemaphoreCreateCounting(UINT32_MAX, 0);
439 for(int i = 0; i < 9; i++) {
441 context.events[i] = xSemaphoreCreateBinary();
442 } //for 8, already created.
443 if (context.events[i] == NULL) {
444 SDIO_SLAVE_LOGE("event initialize failed");
449 esp_err_t ret = init_ringbuf();
450 if (ret != ESP_OK) goto no_mem;
452 context.ret_queue = xQueueCreate(config->send_queue_size, sizeof(void*));
453 if (context.ret_queue == NULL) goto no_mem;
455 context.recv_link_list = (buf_stailq_t)STAILQ_HEAD_INITIALIZER(context.recv_link_list);
456 context.recv_reg_list = (buf_tailq_t)TAILQ_HEAD_INITIALIZER(context.recv_reg_list);
461 return ESP_ERR_NO_MEM;
464 static void configure_pin(int pin, uint32_t func, bool pullup)
466 const int sdmmc_func = func;
467 const int drive_strength = 3;
469 uint32_t reg = GPIO_PIN_MUX_REG[pin];
470 assert(reg!=UINT32_MAX);
472 PIN_INPUT_ENABLE(reg);
473 PIN_FUNC_SELECT(reg, sdmmc_func);
474 PIN_SET_DRV(reg, drive_strength);
475 gpio_pulldown_dis(pin);
481 static inline esp_err_t sdio_slave_hw_init(sdio_slave_config_t *config)
484 SLC.slc0_int_ena.val = 0;
487 const sdio_slave_slot_info_t *slot = &sdio_slave_slot_info[1];
489 bool pullup = config->flags & SDIO_SLAVE_FLAG_INTERNAL_PULLUP;
490 configure_pin(slot->clk_gpio, slot->func, false); //clk doesn't need a pullup
491 configure_pin(slot->cmd_gpio, slot->func, pullup);
492 configure_pin(slot->d0_gpio, slot->func, pullup);
493 if ((config->flags & SDIO_SLAVE_FLAG_HOST_INTR_DISABLED)==0) {
494 configure_pin(slot->d1_gpio, slot->func, pullup);
496 if ((config->flags & SDIO_SLAVE_FLAG_DAT2_DISABLED)==0) {
497 configure_pin(slot->d2_gpio, slot->func, pullup);
499 configure_pin(slot->d3_gpio, slot->func, pullup);
501 //enable module and config
502 periph_module_reset(PERIPH_SDIO_SLAVE_MODULE);
503 periph_module_enable(PERIPH_SDIO_SLAVE_MODULE);
505 SLC.conf0.slc0_rx_auto_wrback = 1;
506 SLC.conf0.slc0_token_auto_clr = 0;
507 SLC.conf0.slc0_rx_loop_test = 0;
508 SLC.conf0.slc0_tx_loop_test = 0;
510 SLC.conf1.slc0_rx_stitch_en = 0;
511 SLC.conf1.slc0_tx_stitch_en = 0;
512 SLC.conf1.slc0_len_auto_clr = 0;
514 SLC.rx_dscr_conf.slc0_token_no_replace = 1;
515 HINF.cfg_data1.highspeed_enable = 1;
517 switch(config->timing) {
518 case SDIO_SLAVE_TIMING_PSEND_PSAMPLE:
519 HOST.conf.frc_sdio20 = 0x1f;
520 HOST.conf.frc_sdio11 = 0;
521 HOST.conf.frc_pos_samp = 0x1f;
522 HOST.conf.frc_neg_samp = 0;
524 case SDIO_SLAVE_TIMING_PSEND_NSAMPLE:
525 HOST.conf.frc_sdio20 = 0x1f;
526 HOST.conf.frc_sdio11 = 0;
527 HOST.conf.frc_pos_samp = 0;
528 HOST.conf.frc_neg_samp = 0x1f;
530 case SDIO_SLAVE_TIMING_NSEND_PSAMPLE:
531 HOST.conf.frc_sdio20 = 0;
532 HOST.conf.frc_sdio11 = 0x1f;
533 HOST.conf.frc_pos_samp = 0x1f;
534 HOST.conf.frc_neg_samp = 0;
536 case SDIO_SLAVE_TIMING_NSEND_NSAMPLE:
537 HOST.conf.frc_sdio20 = 0;
538 HOST.conf.frc_sdio11 = 0x1f;
539 HOST.conf.frc_pos_samp = 0;
540 HOST.conf.frc_neg_samp = 0x1f;
544 SLC.slc0_int_ena.frhost_bit0 = 1;
545 SLC.slc0_int_ena.frhost_bit1 = 1;
546 SLC.slc0_int_ena.frhost_bit2 = 1;
547 SLC.slc0_int_ena.frhost_bit3 = 1;
548 SLC.slc0_int_ena.frhost_bit4 = 1;
549 SLC.slc0_int_ena.frhost_bit5 = 1;
550 SLC.slc0_int_ena.frhost_bit6 = 1;
551 SLC.slc0_int_ena.frhost_bit7 = 1;
556 esp_err_t sdio_slave_initialize(sdio_slave_config_t *config)
559 intr_handle_t intr_handle = NULL;
561 r = esp_intr_alloc(ETS_SLC0_INTR_SOURCE, flags, sdio_intr, NULL, &intr_handle);
562 if (r != ESP_OK) return r;
564 r = sdio_slave_hw_init(config);
565 if (r != ESP_OK) return r;
566 r = init_context(config);
567 if (r != ESP_OK) return r;
568 context.intr_handle = intr_handle;
574 void sdio_slave_deinit()
576 esp_err_t ret = esp_intr_free(context.intr_handle);
578 context.intr_handle = NULL;
582 esp_err_t sdio_slave_start()
585 HOST.slc0_int_clr.val = UINT32_MAX;//clear all interrupts
587 if (ret != ESP_OK) return ret;
589 if (ret != ESP_OK) return ret;
590 HINF.cfg_data1.sdio_ioready1 = 1; //set IO ready to 1 to allow host to use
594 esp_err_t sdio_slave_reset()
597 send_reset_counter();
599 recv_reset_counter();
603 void sdio_slave_stop()
605 HINF.cfg_data1.sdio_ioready1 = 0; //set IO ready to 1 to stop host from using
610 #define SDIO_SLAVE_SLC_INT_TX_MASK (SLC_SLC0_TX_ERR_EOF_INT_ST_M | SLC_SLC0_TX_DSCR_EMPTY_INT_ST_M | SLC_SLC0_TX_DSCR_ERR_INT_ST_M | SLC_SLC0_TX_SUC_EOF_INT_ST_M | SLC_SLC0_TX_DONE_INT_ST_M | SLC_SLC0_TX_OVF_INT_ST_M | SLC_SLC0_TX_START_INT_ST_M)
611 #define SDIO_SLAVE_SLC_INT_RX_MASK (SLC_SLC0_RX_DSCR_ERR_INT_ST_M | SLC_SLC0_RX_EOF_INT_ST_M | SLC_SLC0_RX_DONE_INT_ST_M | SLC_SLC0_RX_UDF_INT_ST_M | SLC_SLC0_RX_START_INT_ST_M)
612 #define SDIO_SLAVE_SLC_INT_HOST_MASK (SLC_FRHOST_BIT7_INT_ST_M | SLC_FRHOST_BIT6_INT_ST_M | SLC_FRHOST_BIT5_INT_ST_M | SLC_FRHOST_BIT4_INT_ST_M | SLC_FRHOST_BIT3_INT_ST_M | SLC_FRHOST_BIT2_INT_ST_M | SLC_FRHOST_BIT1_INT_ST_M | SLC_FRHOST_BIT0_INT_ST_M)
614 //strange but `tx_*` regs for host->slave transfers while `rx_*` regs for slave->host transfers
615 static void sdio_intr(void* arg)
617 uint32_t int_val = SLC.slc0_int_st.val;
618 uint32_t int_raw = SLC.slc0_int_raw.val;
619 ESP_EARLY_LOGV(TAG, "sdio_intr: %08X(%08X)", int_val, int_raw);
621 if (int_val & SDIO_SLAVE_SLC_INT_RX_MASK) sdio_intr_send(arg);
622 if (int_val & SDIO_SLAVE_SLC_INT_TX_MASK) sdio_intr_recv(arg);
623 if (int_val & SDIO_SLAVE_SLC_INT_HOST_MASK) sdio_intr_host(arg);
626 /*---------------------------------------------------------------------------
628 *--------------------------------------------------------------------------*/
629 static void sdio_intr_host(void* arg)
631 uint8_t int_val = SLC.slc0_int_st.val & 0xff;
633 portBASE_TYPE yield = pdFALSE;
634 SLC.slc0_int_clr.val = int_val;
635 for(int i = 0; i < 8; i++) {
636 if (BIT(i) & int_val) {
637 if (context.config.event_cb != NULL) (*context.config.event_cb)(i);
638 xSemaphoreGiveFromISR(context.events[i], &yield);
641 if (yield) portYIELD_FROM_ISR();
644 esp_err_t sdio_slave_wait_int(int pos, TickType_t wait)
646 SDIO_SLAVE_CHECK(pos >= 0 && pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
647 return xSemaphoreTake(context.events[pos], wait);
651 uint8_t sdio_slave_read_reg(int pos)
653 if (pos >= 28 && pos <= 31) SDIO_SLAVE_LOGW("%s: interrupt reg, for reference", __FUNCTION__);
654 if (pos < 0 || pos >= 64) SDIO_SLAVE_LOGE("read register address wrong");
656 return *(uint8_t*)(HOST_SLCHOST_CONF_W_REG(pos));
659 esp_err_t sdio_slave_write_reg(int pos, uint8_t reg)
661 if (pos >= 28 && pos <= 31) {
662 SDIO_SLAVE_LOGE("interrupt reg, please use sdio_slave_clear_int");
663 return ESP_ERR_INVALID_ARG;
665 if (pos < 0 || pos >= 64) {
666 SDIO_SLAVE_LOGE("write register address wrong");
667 return ESP_ERR_INVALID_ARG;
669 uint32_t addr = HOST_SLCHOST_CONF_W_REG(pos) & (~3);
670 uint32_t shift = (pos % 4)*8;
672 portENTER_CRITICAL(&context.reg_spinlock);
673 int val = *(uint32_t*)addr;
674 *(uint32_t*)addr = (val & ~(0xff << shift)) | (reg<<shift);
675 portEXIT_CRITICAL(&context.reg_spinlock);
679 sdio_slave_hostint_t sdio_slave_get_host_intena()
681 return HOST.slc0_func1_int_ena.val;
684 void sdio_slave_set_host_intena(sdio_slave_hostint_t ena)
686 HOST.slc0_func1_int_ena.val = ena;
689 void sdio_slave_clear_host_int(uint8_t mask)
691 SLC.intvec_tohost.slc0_intvec = mask;
694 esp_err_t sdio_slave_send_host_int(uint8_t pos)
696 SDIO_SLAVE_CHECK(pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
697 SLC.intvec_tohost.slc0_intvec = BIT(pos);
702 /*---------------------------------------------------------------------------
704 *--------------------------------------------------------------------------*/
705 //it's strange but the register is really called 'rx' for slave->host transfers.
706 /* The link list is handled in the app, while counter and pointer processed in ISR.
707 * Driver abuse rx_done bit to invoke ISR.
708 * If driver is stopped, the link list is stopped as well as the ISR invoker.
710 static inline void send_length_write(uint32_t len)
712 SLC.slc0_len_conf.val = FIELD_TO_VALUE2(SLC_SLC0_LEN_WDATA, len) | FIELD_TO_VALUE2(SLC_SLC0_LEN_WR, 1);
713 ESP_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", len, HOST.pkt_len.reg_slc0_len);
716 static inline void send_start_transmission(const void* desc)
718 //reset to flush previous packets
719 SLC.conf0.slc0_rx_rst = 1;
720 SLC.conf0.slc0_rx_rst = 0;
721 SLC.slc0_rx_link.addr = (uint32_t)desc;
722 SLC.slc0_rx_link.start = 1;
725 static inline void send_stop_ll_operation()
727 SLC.slc0_rx_link.stop = 1;
730 static inline uint32_t send_length_read()
732 return HOST.pkt_len.reg_slc0_len;
735 DMA_ATTR static const buf_desc_t start_desc = {
737 .buf = (void*)0x3ffbbbbb, //assign a dma-capable pointer other than NULL, which will not be used
743 static inline void send_isr_invoker_enable()
745 //force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared.
746 send_start_transmission(&start_desc);
748 while(!SLC.slc0_int_raw.rx_done);
749 HOST.slc0_int_clr.rx_new_packet = 1;
750 send_stop_ll_operation();
753 static inline void send_isr_invoker_disable()
755 SLC.slc0_int_clr.rx_done = 1;
758 static inline void send_intr_enable()
760 SLC.slc0_int_ena.rx_eof = 1;
761 send_isr_invoker_enable();
764 static inline void send_intr_disable()
766 send_isr_invoker_disable();
767 SLC.slc0_int_ena.rx_eof = 0;
770 static inline void send_isr_invoke()
772 SLC.slc0_int_ena.rx_done = 1;
775 static inline send_state_t send_get_state()
777 return context.send_state;
780 static inline void send_set_state(send_state_t state)
782 context.send_state = state;
785 //start hw operation with existing data (if exist)
786 static esp_err_t send_start()
788 SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE,
789 "already started", ESP_ERR_INVALID_STATE);
790 SLC.slc0_int_clr.rx_eof = 1;
791 send_set_state(STATE_WAIT_FOR_START);
796 //only stop hw operations, no touch to data as well as counter
797 static void send_stop()
799 SLC.slc0_rx_link.stop = 1;
802 send_set_state(STATE_IDLE);
805 static inline esp_err_t send_isr_eof(portBASE_TYPE *yield)
807 // inform app to recycle descs
808 portBASE_TYPE ret = pdTRUE;
809 buf_desc_t *desc = context.in_flight;
810 assert(desc != NULL);
813 ESP_EARLY_LOGV(TAG, "end: %x", desc->arg);
814 ret = xQueueSendFromISR(context.ret_queue, &desc->arg, yield);
815 assert(ret == pdTRUE);
816 buf_desc_t* next = STAILQ_NEXT(desc, qe);
819 STAILQ_NEXT(context.in_flight_end, qe) = context.in_flight_next;
820 sdio_ringbuf_return_from_isr(&context.sendbuf, (uint8_t*)context.in_flight, yield);
821 context.in_flight = NULL;
822 context.in_flight_end = NULL;
823 // Go to wait for packet state
824 send_set_state(STATE_WAIT_FOR_START);
828 static inline esp_err_t send_isr_check_new_pkt(portBASE_TYPE *yield)
831 buf_desc_t *start = NULL;
832 buf_desc_t *end = NULL;
833 if (context.config.sending_mode == SDIO_SLAVE_SEND_PACKET) {
834 ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ONE, 0);
835 } else { //stream mode
836 ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ALL, 0);
839 context.in_flight = start;
840 context.in_flight_end = end;
842 //temporarily break the link ring here, the ring will be re-connected in ``send_isr_eof()``.
843 context.in_flight_next = STAILQ_NEXT(end, qe);
844 STAILQ_NEXT(end, qe) = NULL;
849 static inline esp_err_t send_isr_new_packet()
851 // since eof is changed, we have to stop and reset the link list,
852 // and restart new link list operation
853 buf_desc_t *const start_desc = context.in_flight;
854 buf_desc_t *const end_desc = context.in_flight_end;
855 assert(start_desc != NULL && end_desc != NULL);
857 send_stop_ll_operation();
858 send_start_transmission(start_desc);
860 // update pkt_len register to allow host reading.
861 send_length_write(end_desc->pkt_len);
863 send_set_state(STATE_SENDING);
865 ESP_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %d", start_desc, end_desc, end_desc->pkt_len);
869 static void sdio_intr_send(void* arg)
871 ESP_EARLY_LOGV(TAG, "intr_send");
872 portBASE_TYPE yield = pdFALSE;
874 // this interrupt is abused to get ISR invoked by app
875 if (SLC.slc0_int_st.rx_done) SLC.slc0_int_ena.rx_done = 0;
877 // Goto idle state (cur_start=NULL) if transmission done,
878 // also update sequence and recycle descs.
879 if (SLC.slc0_int_st.rx_eof) {
880 SLC.slc0_int_clr.rx_eof = 1;
881 //check current state
882 assert(send_get_state() == STATE_SENDING);// context.send_start != NOT_YET && context.send_end != NOT_YET);
883 send_isr_eof(&yield);
886 // Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready.
887 // Note we may also enter this state by stopping sending in the app.
888 if (send_get_state() == STATE_WAIT_FOR_START) {
889 if (context.in_flight == NULL) send_isr_check_new_pkt(&yield);
890 // Go to sending state (cur_start and cur_end != NULL) if has packet to send.
891 if (context.in_flight) send_isr_new_packet();
894 if (yield) portYIELD_FROM_ISR();
897 esp_err_t send_write_desc(uint8_t* desc, void* arg)
899 buf_desc_t *new_desc = (buf_desc_t*)arg;
900 buf_desc_t *tail = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf);
901 new_desc->pkt_len = tail->pkt_len + new_desc->size;
902 //copy and keep the link
903 STAILQ_NEXT(new_desc, qe) = STAILQ_NEXT((buf_desc_t*)desc, qe);
905 memcpy(desc, new_desc, sizeof(buf_desc_t));
909 esp_err_t sdio_slave_send_queue(uint8_t* addr, size_t len, void* arg, TickType_t wait)
911 SDIO_SLAVE_CHECK(len > 0, "len <= 0", ESP_ERR_INVALID_ARG);
912 SDIO_SLAVE_CHECK(esp_ptr_dma_capable(addr) && (uint32_t)addr%4==0, "buffer to send should be DMA capable and 32-bit aligned",
913 ESP_ERR_INVALID_ARG);
915 buf_desc_t new_desc = {
920 // in stream mode, the eof is only appended (in ISR) when new packet is ready to be sent
921 .eof = (context.config.sending_mode == SDIO_SLAVE_SEND_PACKET?1:0),
925 esp_err_t ret = sdio_ringbuf_send(&context.sendbuf, send_write_desc, &new_desc, wait);
926 if (ret != ESP_OK) return ret;
932 esp_err_t sdio_slave_send_get_finished(void** out_arg, TickType_t wait)
935 portBASE_TYPE err = xQueueReceive(context.ret_queue, &arg, wait);
936 if (out_arg) *out_arg = arg;
937 if (err != pdTRUE) return ESP_ERR_TIMEOUT;
941 esp_err_t sdio_slave_transmit(uint8_t* addr, size_t len)
943 uint32_t timestamp = XTHAL_GET_CCOUNT();
946 esp_err_t err = sdio_slave_send_queue(addr, len, (void*)timestamp, portMAX_DELAY);
947 if (err != ESP_OK) return err;
948 err = sdio_slave_send_get_finished((void**)&ret_stamp, portMAX_DELAY);
949 if (err != ESP_OK) return err;
950 SDIO_SLAVE_CHECK(ret_stamp == timestamp, "already sent without return before", ESP_ERR_INVALID_STATE);
955 //clear data but keep counter
956 static esp_err_t send_flush_data()
958 //only works in idle state / wait to send state
959 SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE,
960 "flush data when transmission started", ESP_ERR_INVALID_STATE);
962 HOST.slc0_int_clr.rx_new_packet = 1;
964 buf_desc_t *last = NULL;
965 if (context.in_flight) {
966 buf_desc_t *desc = context.in_flight;
967 while(desc != NULL) {
968 xQueueSend(context.ret_queue, desc->arg, portMAX_DELAY);
970 desc = STAILQ_NEXT(desc, qe);
972 STAILQ_NEXT(context.in_flight_end, qe) = context.in_flight_next;
973 sdio_ringbuf_return(&context.sendbuf, (uint8_t*)context.in_flight);
974 context.in_flight = NULL;
975 context.in_flight_end = NULL;
979 esp_err_t ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&head, NULL, RINGBUF_GET_ALL, 0);
981 buf_desc_t *desc = head;
982 while(desc != NULL) {
983 xQueueSend(context.ret_queue, desc->arg, portMAX_DELAY);
985 desc = STAILQ_NEXT(desc, qe);
987 sdio_ringbuf_return(&context.sendbuf, (uint8_t*)head);
990 // if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to
991 // send never queued.
992 // Go to idle state (cur_end!=NULL and cur_start=NULL)
993 send_set_state(STATE_IDLE);
995 if (last == NULL) last = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf);
996 last->pkt_len = send_length_read();
1000 //clear counter but keep data
1001 static esp_err_t send_reset_counter()
1003 SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE,
1004 "reset counter when transmission started", ESP_ERR_INVALID_STATE);
1006 send_length_write(0);
1008 uint32_t last_cnt=0;
1009 buf_desc_t *desc = context.in_flight;
1010 buf_desc_t *last = NULL;
1011 while(desc != NULL) {
1012 last_cnt += desc->length;
1013 desc->pkt_len = last_cnt;
1015 desc = STAILQ_NEXT(desc, qe);
1017 // in theory the desc should be the one right next to the last of in_flight,
1018 // but the link of last is NULL, so get the desc from the ringbuf directly.
1019 desc = (buf_desc_t*)sdio_ringbuf_peek_front(&context.sendbuf);
1020 while(desc != NULL) {
1021 last_cnt += desc->length;
1022 desc->pkt_len = last_cnt;
1024 desc = STAILQ_NEXT(desc, qe);
1027 last = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf);
1035 /*---------------------------------------------------------------------------
1037 *--------------------------------------------------------------------------*/
1038 //strange but the registers for host->slave transfers are really called "tx*".
1040 #define CHECK_HANDLE_IDLE(desc) do { if (desc == NULL || !desc->not_receiving) {\
1041 return ESP_ERR_INVALID_ARG; } } while(0)
1043 static inline void critical_enter_recv()
1045 portENTER_CRITICAL(&context.recv_spinlock);
1048 static inline void critical_exit_recv()
1050 portEXIT_CRITICAL(&context.recv_spinlock);
1053 static inline void recv_size_inc()
1055 // fields wdata and inc_more should be written by the same instruction.
1056 SLC.slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 1) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_INC_MORE, 1);
1059 static inline void recv_size_reset()
1061 SLC.slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 0) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WR, 1);
1064 static inline buf_desc_t* recv_get_first_empty_buf()
1066 buf_stailq_t *const queue = &context.recv_link_list;
1067 buf_desc_t *desc = STAILQ_FIRST(queue);
1068 while(desc && desc->owner == 0) {
1069 desc = STAILQ_NEXT(desc, qe);
1074 static esp_err_t recv_start()
1076 SLC.conf0.slc0_tx_rst = 1;
1077 SLC.conf0.slc0_tx_rst = 0;
1079 critical_enter_recv();
1080 buf_desc_t *desc = recv_get_first_empty_buf();
1082 ESP_LOGD(TAG, "recv: restart without desc");
1083 critical_exit_recv();
1084 return ESP_OK; // if no buffer loaded, return directly.
1086 //the counter is handled when add/flush/reset
1087 SLC.slc0_tx_link.addr = (uint32_t)desc;
1088 SLC.slc0_tx_link.start = 1;
1089 critical_exit_recv();
1091 SLC.slc0_int_ena.tx_done = 1;
1095 static void recv_stop()
1097 SLC.slc0_tx_link.stop = 1;
1098 SLC.slc0_int_ena.tx_done = 0;
1101 // reset the counter, but keep the data
1102 static void recv_reset_counter()
1106 critical_enter_recv();
1107 buf_desc_t *desc = recv_get_first_empty_buf();
1108 while (desc != NULL) {
1109 assert(desc->owner == 1);
1111 desc = STAILQ_NEXT(desc, qe);
1113 critical_exit_recv();
1116 // remove data, still increase the counter
1117 static void recv_flush_data()
1119 buf_stailq_t *const queue = &context.recv_link_list;
1121 critical_enter_recv();
1123 portBASE_TYPE ret = xSemaphoreTake(context.recv_event, 0);
1124 if (ret == pdFALSE) break;
1126 buf_desc_t *desc = STAILQ_FIRST(queue);
1127 assert (desc != NULL && desc->owner == 0);
1128 STAILQ_REMOVE_HEAD(queue, qe);
1130 STAILQ_INSERT_TAIL(queue, desc, qe);
1132 //we only add it to the tail here, without start the DMA nor increase buffer num.
1134 critical_exit_recv();
1137 static void sdio_intr_recv(void* arg)
1139 portBASE_TYPE yield = 0;
1140 if (SLC.slc0_int_raw.tx_done) {
1141 SLC.slc0_int_clr.tx_done = 1;
1142 while (context.recv_cur_ret && context.recv_cur_ret->owner == 0) {
1143 // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty,
1144 // in this case the ``tx_done`` should happen no longer until new desc is appended.
1145 // The app is responsible to place the pointer to the right place again when appending new desc.
1146 context.recv_cur_ret = STAILQ_NEXT(context.recv_cur_ret, qe);
1147 ESP_EARLY_LOGV(TAG, "intr_recv: Give");
1148 xSemaphoreGiveFromISR(context.recv_event, &yield);
1151 if (yield) portYIELD_FROM_ISR();
1154 esp_err_t sdio_slave_recv_load_buf(sdio_slave_buf_handle_t handle)
1156 buf_desc_t *desc = (buf_desc_t*)handle;
1157 CHECK_HANDLE_IDLE(desc);
1159 buf_stailq_t *const queue = &context.recv_link_list;
1161 critical_enter_recv();
1162 TAILQ_REMOVE(&context.recv_reg_list, desc, te);
1164 desc->not_receiving = 0; //manually remove the prev link (by set not_receiving=0), to indicate this is in the queue
1166 buf_desc_t *const tail = STAILQ_LAST(queue, buf_desc_s, qe);
1168 STAILQ_INSERT_TAIL(queue, desc, qe);
1169 if (tail == NULL || (tail->owner == 0)) {
1170 //in this case we have to set the ret pointer
1172 /* if the owner of the tail is returned to the software, the ISR is
1173 * expect to write this pointer to NULL in a short time, wait until
1174 * that and set new value for this pointer
1176 while (context.recv_cur_ret != NULL) {}
1178 assert(context.recv_cur_ret == NULL);
1179 context.recv_cur_ret = desc;
1181 assert(context.recv_cur_ret != NULL);
1184 //no one in the ll, start new ll operation.
1185 SLC.slc0_tx_link.addr = (uint32_t)desc;
1186 SLC.slc0_tx_link.start = 1;
1187 ESP_LOGV(TAG, "recv_load_buf: start new");
1189 //restart former ll operation
1190 SLC.slc0_tx_link.restart = 1;
1191 ESP_LOGV(TAG, "recv_load_buf: restart");
1193 critical_exit_recv();
1199 sdio_slave_buf_handle_t sdio_slave_recv_register_buf(uint8_t *start)
1201 SDIO_SLAVE_CHECK(esp_ptr_dma_capable(start) && (uint32_t)start%4==0,
1202 "buffer to register should be DMA capable and 32-bit aligned", NULL);
1203 buf_desc_t *desc = (buf_desc_t*)malloc(sizeof(buf_desc_t));
1205 SDIO_SLAVE_LOGE("cannot allocate lldesc for new buffer");
1209 //initially in the reg list
1210 *desc = (buf_desc_t) {
1211 .size = context.config.recv_buffer_size,
1213 //no length required, eof always=0
1215 critical_enter_recv();
1216 TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te);
1217 critical_exit_recv();
1221 esp_err_t sdio_slave_recv(sdio_slave_buf_handle_t* handle_ret, uint8_t **out_addr, size_t *out_len, TickType_t wait)
1223 SDIO_SLAVE_CHECK(handle_ret != NULL, "handle address cannot be 0", ESP_ERR_INVALID_ARG);
1224 portBASE_TYPE ret = xSemaphoreTake(context.recv_event, wait);
1225 if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
1227 buf_stailq_t *const queue = &context.recv_link_list;
1229 critical_enter_recv();
1230 //remove from queue, add back to reg list.
1231 buf_desc_t *desc = STAILQ_FIRST(queue);
1232 STAILQ_REMOVE_HEAD(queue, qe);
1233 TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te);
1234 critical_exit_recv();
1236 assert(desc != NULL && desc->owner == 0);
1237 *handle_ret = (sdio_slave_buf_handle_t)desc;
1238 if (out_addr) *out_addr = desc->buf;
1239 if (out_len) *out_len = desc->length;
1243 esp_err_t sdio_slave_recv_unregister_buf(sdio_slave_buf_handle_t handle)
1245 buf_desc_t *desc = (buf_desc_t*)handle;
1246 CHECK_HANDLE_IDLE(desc); //in the queue, fail.
1248 critical_enter_recv();
1249 TAILQ_REMOVE(&context.recv_reg_list, desc, te);
1250 critical_exit_recv();
1255 uint8_t* sdio_slave_recv_get_buf(sdio_slave_buf_handle_t handle, size_t *len_o)
1257 buf_desc_t *desc = (buf_desc_t*)handle;
1258 if (handle == NULL) return NULL;
1260 if (len_o!= NULL) *len_o= desc->length;