1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
20 #include <freertos/FreeRTOS.h>
21 #include <freertos/task.h>
22 #include <freertos/semphr.h>
23 #include <rom/spi_flash.h>
24 #include <rom/cache.h>
26 #include <soc/dport_reg.h>
27 #include "sdkconfig.h"
30 #include "esp_spi_flash.h"
31 #include "esp_flash_encrypt.h"
33 #include "cache_utils.h"
36 // Enable built-in checks in queue.h in debug builds
39 #include "rom/queue.h"
41 #define REGIONS_COUNT 4
42 #define PAGES_PER_REGION 64
43 #define INVALID_ENTRY_VAL 0x100
44 #define VADDR0_START_ADDR 0x3F400000
45 #define VADDR1_START_ADDR 0x40000000
46 #define VADDR1_FIRST_USABLE_ADDR 0x400D0000
47 #define PRO_IRAM0_FIRST_USABLE_PAGE ((VADDR1_FIRST_USABLE_ADDR - VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + 64)
49 /* Ensure pages in a region haven't been marked as written via
50 spi_flash_mark_modified_region(). If the page has
51 been written, flush the entire flash cache before returning.
53 This ensures stale cache entries are never read after fresh calls
54 to spi_flash_mmap(), while keeping the number of cache flushes to a
57 Returns true if cache was flushed.
59 static bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length);
61 typedef struct mmap_entry_{
65 LIST_ENTRY(mmap_entry_) entries;
69 static LIST_HEAD(mmap_entries_head, mmap_entry_) s_mmap_entries_head =
70 LIST_HEAD_INITIALIZER(s_mmap_entries_head);
71 static uint8_t s_mmap_page_refcnt[REGIONS_COUNT * PAGES_PER_REGION] = {0};
72 static uint32_t s_mmap_last_handle = 0;
75 static void IRAM_ATTR spi_flash_mmap_init()
77 if (s_mmap_page_refcnt[0] != 0) {
78 return; /* mmap data already initialised */
81 for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
82 uint32_t entry_pro = DPORT_PRO_FLASH_MMU_TABLE[i];
83 uint32_t entry_app = DPORT_APP_FLASH_MMU_TABLE[i];
84 if (entry_pro != entry_app) {
85 // clean up entries used by boot loader
86 entry_pro = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
87 DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
89 if ((entry_pro & INVALID_ENTRY_VAL) == 0 && (i == 0 || i == PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) {
90 s_mmap_page_refcnt[i] = 1;
92 DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
93 DPORT_APP_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
98 esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
99 const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
102 if (src_addr & 0xffff) {
103 return ESP_ERR_INVALID_ARG;
105 if (src_addr + size > g_rom_flashchip.chip_size) {
106 return ESP_ERR_INVALID_ARG;
108 // region which should be mapped
109 int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
110 int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
111 //prepare a linear pages array to feed into spi_flash_mmap_pages
112 int *pages=malloc(sizeof(int)*page_count);
114 return ESP_ERR_NO_MEM;
116 for (int i = 0; i < page_count; i++) {
117 pages[i] = phys_page+i;
119 ret=spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
124 esp_err_t IRAM_ATTR spi_flash_mmap_pages(int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
125 const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
128 bool did_flush, need_flush = false;
130 return ESP_ERR_INVALID_ARG;
132 for (int i = 0; i < page_count; i++) {
133 if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) {
134 return ESP_ERR_INVALID_ARG;
137 mmap_entry_t* new_entry = (mmap_entry_t*) malloc(sizeof(mmap_entry_t));
138 if (new_entry == 0) {
139 return ESP_ERR_NO_MEM;
142 spi_flash_disable_interrupts_caches_and_other_cpu();
145 for (int i = 0; i < page_count; i++) {
146 if (spi_flash_ensure_unmodified_region(pages[i]*SPI_FLASH_MMU_PAGE_SIZE, SPI_FLASH_MMU_PAGE_SIZE)) {
150 spi_flash_mmap_init();
151 // figure out the memory region where we should look for pages
152 int region_begin; // first page to check
153 int region_size; // number of pages to check
154 uint32_t region_addr; // base address of memory region
155 if (memory == SPI_FLASH_MMAP_DATA) {
159 region_addr = VADDR0_START_ADDR;
161 // only part of VAddr1 is usable, so adjust for that
162 region_begin = PRO_IRAM0_FIRST_USABLE_PAGE;
163 region_size = 3 * 64 - region_begin;
164 region_addr = VADDR1_FIRST_USABLE_ADDR;
166 if (region_size < page_count) {
167 return ESP_ERR_NO_MEM;
169 // The following part searches for a range of MMU entries which can be used.
170 // Algorithm is essentially naïve strstr algorithm, except that unused MMU
171 // entries are treated as wildcards.
173 int end = region_begin + region_size - page_count;
174 for (start = region_begin; start < end; ++start) {
177 for (pos = start; pos < start + page_count; ++pos, ++pageno) {
178 int table_val = (int) DPORT_PRO_FLASH_MMU_TABLE[pos];
179 uint8_t refcnt = s_mmap_page_refcnt[pos];
180 if (refcnt != 0 && table_val != pages[pageno]) {
184 // whole mapping range matched, bail out
185 if (pos - start == page_count) {
189 // checked all the region(s) and haven't found anything?
193 ret = ESP_ERR_NO_MEM;
195 // set up mapping using pages
197 for (int i = start; i != start + page_count; ++i, ++pageno) {
198 // sanity check: we won't reconfigure entries with non-zero reference count
199 assert(s_mmap_page_refcnt[i] == 0 ||
200 (DPORT_PRO_FLASH_MMU_TABLE[i] == pages[pageno] &&
201 DPORT_APP_FLASH_MMU_TABLE[i] == pages[pageno]));
202 if (s_mmap_page_refcnt[i] == 0) {
203 if (DPORT_PRO_FLASH_MMU_TABLE[i] != pages[pageno] || DPORT_APP_FLASH_MMU_TABLE[i] != pages[pageno]) {
204 DPORT_PRO_FLASH_MMU_TABLE[i] = pages[pageno];
205 DPORT_APP_FLASH_MMU_TABLE[i] = pages[pageno];
209 ++s_mmap_page_refcnt[i];
212 LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries);
213 new_entry->page = start;
214 new_entry->count = page_count;
215 new_entry->handle = ++s_mmap_last_handle;
216 *out_handle = new_entry->handle;
217 *out_ptr = (void*) (region_addr + (start - region_begin) * SPI_FLASH_MMU_PAGE_SIZE);
221 /* This is a temporary fix for an issue where some
222 cache reads may see stale data.
224 Working on a long term fix that doesn't require invalidating
227 if (!did_flush && need_flush) {
232 spi_flash_enable_interrupts_caches_and_other_cpu();
233 if (*out_ptr == NULL) {
239 void IRAM_ATTR spi_flash_munmap(spi_flash_mmap_handle_t handle)
241 spi_flash_disable_interrupts_caches_and_other_cpu();
243 // look for handle in linked list
244 for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
245 if (it->handle == handle) {
246 // for each page, decrement reference counter
247 // if reference count is zero, disable MMU table entry to
248 // facilitate debugging of use-after-free conditions
249 for (int i = it->page; i < it->page + it->count; ++i) {
250 assert(s_mmap_page_refcnt[i] > 0);
251 if (--s_mmap_page_refcnt[i] == 0) {
252 DPORT_PRO_FLASH_MMU_TABLE[i] = INVALID_ENTRY_VAL;
253 DPORT_APP_FLASH_MMU_TABLE[i] = INVALID_ENTRY_VAL;
256 LIST_REMOVE(it, entries);
260 spi_flash_enable_interrupts_caches_and_other_cpu();
262 assert(0 && "invalid handle, or handle already unmapped");
267 void spi_flash_mmap_dump()
269 spi_flash_mmap_init();
271 for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
272 printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
274 for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
275 if (s_mmap_page_refcnt[i] != 0) {
276 printf("page %d: refcnt=%d paddr=%d\n",
277 i, (int) s_mmap_page_refcnt[i], DPORT_PRO_FLASH_MMU_TABLE[i]);
282 /* 256-bit (up to 16MB of 64KB pages) bitset of all flash pages
283 that have been written to since last cache flush.
285 Before mmaping a page, need to flush caches if that page has been
288 Note: It's possible to do some additional performance tweaks to
289 this algorithm, as we actually only need to flush caches if a page
290 was first mmapped, then written to, then is about to be mmaped a
291 second time. This is a fair bit more complex though, so unless
292 there's an access pattern that this would significantly boost then
293 it's probably not worth it.
295 static uint32_t written_pages[256/32];
297 static bool update_written_pages(size_t start_addr, size_t length, bool mark);
299 void IRAM_ATTR spi_flash_mark_modified_region(size_t start_addr, size_t length)
301 update_written_pages(start_addr, length, true);
304 static IRAM_ATTR bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length)
306 return update_written_pages(start_addr, length, false);
309 /* generic implementation for the previous two functions */
310 static inline IRAM_ATTR bool update_written_pages(size_t start_addr, size_t length, bool mark)
312 /* align start_addr & length to full MMU pages */
313 uint32_t page_start_addr = start_addr & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
314 length += (start_addr - page_start_addr);
315 length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
316 for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
317 int page = addr / SPI_FLASH_MMU_PAGE_SIZE;
319 return false; /* invalid address */
323 uint32_t bit = 1 << (page % 32);
326 written_pages[idx] |= bit;
327 } else if (written_pages[idx] & bit) {
328 /* it is tempting to write a version of this that only
329 flushes each CPU's cache as needed. However this is
330 tricky because mmaped memory can be used on un-pinned
331 cores, or the pointer passed between CPUs.
334 #ifndef CONFIG_FREERTOS_UNICORE
337 bzero(written_pages, sizeof(written_pages));
345 uint32_t spi_flash_cache2phys(const void *cached)
347 intptr_t c = (intptr_t)cached;
349 if (c >= VADDR1_START_ADDR && c < VADDR1_FIRST_USABLE_ADDR) {
350 /* IRAM address, doesn't map to flash */
351 return SPI_FLASH_CACHE2PHYS_FAIL;
353 else if (c < VADDR1_FIRST_USABLE_ADDR) {
354 /* expect cache is in DROM */
355 cache_page = (c - VADDR0_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE;
357 /* expect cache is in IROM */
358 cache_page = (c - VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + 64;
361 if (cache_page >= 256) {
362 /* cached address was not in IROM or DROM */
363 return SPI_FLASH_CACHE2PHYS_FAIL;
365 uint32_t phys_page = DPORT_PRO_FLASH_MMU_TABLE[cache_page];
366 if (phys_page == INVALID_ENTRY_VAL) {
367 /* page is not mapped */
368 return SPI_FLASH_CACHE2PHYS_FAIL;
370 uint32_t phys_offs = phys_page * SPI_FLASH_MMU_PAGE_SIZE;
371 return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
375 const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
377 uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
378 int start, end, page_delta;
381 if (memory == SPI_FLASH_MMAP_DATA) {
384 base = VADDR0_START_ADDR;
387 start = PRO_IRAM0_FIRST_USABLE_PAGE;
389 base = VADDR1_START_ADDR;
393 for (int i = start; i < end; i++) {
394 if (DPORT_PRO_FLASH_MMU_TABLE[i] == phys_page) {
396 intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
397 return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));