1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
20 #include <freertos/FreeRTOS.h>
21 #include <freertos/task.h>
22 #include <freertos/semphr.h>
23 #include <rom/spi_flash.h>
24 #include <rom/cache.h>
26 #include <soc/dport_reg.h>
27 #include "sdkconfig.h"
30 #include "esp_spi_flash.h"
31 #include "esp_flash_encrypt.h"
33 #include "cache_utils.h"
34 #include "esp_spiram.h"
37 // Enable built-in checks in queue.h in debug builds
40 #include "rom/queue.h"
42 #define REGIONS_COUNT 4
43 #define PAGES_PER_REGION 64
44 #define INVALID_ENTRY_VAL 0x100
45 #define VADDR0_START_ADDR 0x3F400000
46 #define VADDR1_START_ADDR 0x40000000
47 #define VADDR1_FIRST_USABLE_ADDR 0x400D0000
48 #define PRO_IRAM0_FIRST_USABLE_PAGE ((VADDR1_FIRST_USABLE_ADDR - VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + 64)
50 /* Ensure pages in a region haven't been marked as written via
51 spi_flash_mark_modified_region(). If the page has
52 been written, flush the entire flash cache before returning.
54 This ensures stale cache entries are never read after fresh calls
55 to spi_flash_mmap(), while keeping the number of cache flushes to a
58 Returns true if cache was flushed.
61 static bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length);
63 typedef struct mmap_entry_{
67 LIST_ENTRY(mmap_entry_) entries;
71 static LIST_HEAD(mmap_entries_head, mmap_entry_) s_mmap_entries_head =
72 LIST_HEAD_INITIALIZER(s_mmap_entries_head);
73 static uint8_t s_mmap_page_refcnt[REGIONS_COUNT * PAGES_PER_REGION] = {0};
74 static uint32_t s_mmap_last_handle = 0;
77 static void IRAM_ATTR spi_flash_mmap_init()
79 if (s_mmap_page_refcnt[0] != 0) {
80 return; /* mmap data already initialised */
82 DPORT_INTERRUPT_DISABLE();
83 for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
84 uint32_t entry_pro = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
85 uint32_t entry_app = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[i]);
87 if (entry_pro != entry_app) {
88 // clean up entries used by boot loader
89 entry_pro = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
90 DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
92 if ((entry_pro & INVALID_ENTRY_VAL) == 0 && (i == 0 || i == PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) {
93 s_mmap_page_refcnt[i] = 1;
95 DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
96 DPORT_APP_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
99 DPORT_INTERRUPT_RESTORE();
102 static void IRAM_ATTR get_mmu_region(spi_flash_mmap_memory_t memory, int* out_begin, int* out_size,uint32_t* region_addr)
104 if (memory == SPI_FLASH_MMAP_DATA) {
108 *region_addr = VADDR0_START_ADDR;
110 // only part of VAddr1 is usable, so adjust for that
111 *out_begin = PRO_IRAM0_FIRST_USABLE_PAGE;
112 *out_size = 3 * 64 - *out_begin;
113 *region_addr = VADDR1_FIRST_USABLE_ADDR;
117 esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
118 const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
121 if (src_addr & 0xffff) {
122 return ESP_ERR_INVALID_ARG;
124 if (src_addr + size > g_rom_flashchip.chip_size) {
125 return ESP_ERR_INVALID_ARG;
127 // region which should be mapped
128 int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
129 int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
130 // prepare a linear pages array to feed into spi_flash_mmap_pages
131 int *pages = heap_caps_malloc(sizeof(int)*page_count, MALLOC_CAP_INTERNAL);
133 return ESP_ERR_NO_MEM;
135 for (int i = 0; i < page_count; i++) {
136 pages[i] = phys_page+i;
138 ret = spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
143 esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
144 const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
147 bool did_flush, need_flush = false;
149 return ESP_ERR_INVALID_ARG;
151 if (!esp_ptr_internal(pages)) {
152 return ESP_ERR_INVALID_ARG;
154 for (int i = 0; i < page_count; i++) {
155 if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) {
156 return ESP_ERR_INVALID_ARG;
159 mmap_entry_t* new_entry = (mmap_entry_t*) heap_caps_malloc(sizeof(mmap_entry_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
160 if (new_entry == 0) {
161 return ESP_ERR_NO_MEM;
164 spi_flash_disable_interrupts_caches_and_other_cpu();
167 for (int i = 0; i < page_count; i++) {
168 if (spi_flash_ensure_unmodified_region(pages[i]*SPI_FLASH_MMU_PAGE_SIZE, SPI_FLASH_MMU_PAGE_SIZE)) {
172 spi_flash_mmap_init();
173 // figure out the memory region where we should look for pages
174 int region_begin; // first page to check
175 int region_size; // number of pages to check
176 uint32_t region_addr; // base address of memory region
177 get_mmu_region(memory,®ion_begin,®ion_size,®ion_addr);
178 if (region_size < page_count) {
179 return ESP_ERR_NO_MEM;
181 // The following part searches for a range of MMU entries which can be used.
182 // Algorithm is essentially naïve strstr algorithm, except that unused MMU
183 // entries are treated as wildcards.
185 // the " + 1" is a fix when loop the MMU table pages, because the last MMU page
186 // is valid as well if it have not been used
187 int end = region_begin + region_size - page_count + 1;
188 for (start = region_begin; start < end; ++start) {
191 DPORT_INTERRUPT_DISABLE();
192 for (pos = start; pos < start + page_count; ++pos, ++pageno) {
193 int table_val = (int) DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[pos]);
194 uint8_t refcnt = s_mmap_page_refcnt[pos];
195 if (refcnt != 0 && table_val != pages[pageno]) {
199 DPORT_INTERRUPT_RESTORE();
200 // whole mapping range matched, bail out
201 if (pos - start == page_count) {
205 // checked all the region(s) and haven't found anything?
209 ret = ESP_ERR_NO_MEM;
211 // set up mapping using pages
213 DPORT_INTERRUPT_DISABLE();
214 for (int i = start; i != start + page_count; ++i, ++pageno) {
215 // sanity check: we won't reconfigure entries with non-zero reference count
216 uint32_t entry_pro = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
217 uint32_t entry_app = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[i]);
218 assert(s_mmap_page_refcnt[i] == 0 ||
219 (entry_pro == pages[pageno] &&
220 entry_app == pages[pageno]));
221 if (s_mmap_page_refcnt[i] == 0) {
222 if (entry_pro != pages[pageno] || entry_app != pages[pageno]) {
223 DPORT_PRO_FLASH_MMU_TABLE[i] = pages[pageno];
224 DPORT_APP_FLASH_MMU_TABLE[i] = pages[pageno];
228 ++s_mmap_page_refcnt[i];
230 DPORT_INTERRUPT_RESTORE();
231 LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries);
232 new_entry->page = start;
233 new_entry->count = page_count;
234 new_entry->handle = ++s_mmap_last_handle;
235 *out_handle = new_entry->handle;
236 *out_ptr = (void*) (region_addr + (start - region_begin) * SPI_FLASH_MMU_PAGE_SIZE);
240 /* This is a temporary fix for an issue where some
241 cache reads may see stale data.
243 Working on a long term fix that doesn't require invalidating
246 if (!did_flush && need_flush) {
247 #if CONFIG_SPIRAM_SUPPORT
248 esp_spiram_writeback_cache();
254 spi_flash_enable_interrupts_caches_and_other_cpu();
255 if (*out_ptr == NULL) {
261 void IRAM_ATTR spi_flash_munmap(spi_flash_mmap_handle_t handle)
263 spi_flash_disable_interrupts_caches_and_other_cpu();
265 // look for handle in linked list
266 for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
267 if (it->handle == handle) {
268 // for each page, decrement reference counter
269 // if reference count is zero, disable MMU table entry to
270 // facilitate debugging of use-after-free conditions
271 for (int i = it->page; i < it->page + it->count; ++i) {
272 assert(s_mmap_page_refcnt[i] > 0);
273 if (--s_mmap_page_refcnt[i] == 0) {
274 DPORT_PRO_FLASH_MMU_TABLE[i] = INVALID_ENTRY_VAL;
275 DPORT_APP_FLASH_MMU_TABLE[i] = INVALID_ENTRY_VAL;
278 LIST_REMOVE(it, entries);
282 spi_flash_enable_interrupts_caches_and_other_cpu();
284 assert(0 && "invalid handle, or handle already unmapped");
289 void spi_flash_mmap_dump()
291 spi_flash_mmap_init();
293 for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
294 printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
296 for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
297 if (s_mmap_page_refcnt[i] != 0) {
298 printf("page %d: refcnt=%d paddr=%d\n",
299 i, (int) s_mmap_page_refcnt[i], DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]));
304 uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
306 spi_flash_mmap_init();
308 int region_begin; // first page to check
309 int region_size; // number of pages to check
310 uint32_t region_addr; // base address of memory region
311 get_mmu_region(memory,®ion_begin,®ion_size,®ion_addr);
312 DPORT_INTERRUPT_DISABLE();
313 for (int i = region_begin; i < region_begin + region_size; ++i) {
314 if (s_mmap_page_refcnt[i] == 0 && DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == INVALID_ENTRY_VAL) {
318 DPORT_INTERRUPT_RESTORE();
322 /* 256-bit (up to 16MB of 64KB pages) bitset of all flash pages
323 that have been written to since last cache flush.
325 Before mmaping a page, need to flush caches if that page has been
328 Note: It's possible to do some additional performance tweaks to
329 this algorithm, as we actually only need to flush caches if a page
330 was first mmapped, then written to, then is about to be mmaped a
331 second time. This is a fair bit more complex though, so unless
332 there's an access pattern that this would significantly boost then
333 it's probably not worth it.
335 static uint32_t written_pages[256/32];
337 static bool update_written_pages(size_t start_addr, size_t length, bool mark);
339 void IRAM_ATTR spi_flash_mark_modified_region(size_t start_addr, size_t length)
341 update_written_pages(start_addr, length, true);
344 static IRAM_ATTR bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length)
346 return update_written_pages(start_addr, length, false);
349 /* generic implementation for the previous two functions */
350 static inline IRAM_ATTR bool update_written_pages(size_t start_addr, size_t length, bool mark)
352 /* align start_addr & length to full MMU pages */
353 uint32_t page_start_addr = start_addr & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
354 length += (start_addr - page_start_addr);
355 length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
356 for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
357 int page = addr / SPI_FLASH_MMU_PAGE_SIZE;
359 return false; /* invalid address */
363 uint32_t bit = 1 << (page % 32);
366 written_pages[idx] |= bit;
367 } else if (written_pages[idx] & bit) {
368 /* it is tempting to write a version of this that only
369 flushes each CPU's cache as needed. However this is
370 tricky because mmaped memory can be used on un-pinned
371 cores, or the pointer passed between CPUs.
373 #if CONFIG_SPIRAM_SUPPORT
374 esp_spiram_writeback_cache();
377 #ifndef CONFIG_FREERTOS_UNICORE
380 bzero(written_pages, sizeof(written_pages));
388 uint32_t spi_flash_cache2phys(const void *cached)
390 intptr_t c = (intptr_t)cached;
392 if (c >= VADDR1_START_ADDR && c < VADDR1_FIRST_USABLE_ADDR) {
393 /* IRAM address, doesn't map to flash */
394 return SPI_FLASH_CACHE2PHYS_FAIL;
396 else if (c < VADDR1_FIRST_USABLE_ADDR) {
397 /* expect cache is in DROM */
398 cache_page = (c - VADDR0_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE;
400 /* expect cache is in IROM */
401 cache_page = (c - VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + 64;
404 if (cache_page >= 256) {
405 /* cached address was not in IROM or DROM */
406 return SPI_FLASH_CACHE2PHYS_FAIL;
408 uint32_t phys_page = DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[cache_page]);
409 if (phys_page == INVALID_ENTRY_VAL) {
410 /* page is not mapped */
411 return SPI_FLASH_CACHE2PHYS_FAIL;
413 uint32_t phys_offs = phys_page * SPI_FLASH_MMU_PAGE_SIZE;
414 return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
418 const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
420 uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
421 int start, end, page_delta;
424 if (memory == SPI_FLASH_MMAP_DATA) {
427 base = VADDR0_START_ADDR;
430 start = PRO_IRAM0_FIRST_USABLE_PAGE;
432 base = VADDR1_START_ADDR;
435 DPORT_INTERRUPT_DISABLE();
436 for (int i = start; i < end; i++) {
437 if (DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == phys_page) {
439 intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
440 DPORT_INTERRUPT_RESTORE();
441 return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
444 DPORT_INTERRUPT_RESTORE();