#include "esp_ipc.h"
#include "esp_attr.h"
#include "esp_spi_flash.h"
+#include "esp_flash_encrypt.h"
#include "esp_log.h"
#include "cache_utils.h"
+#include "esp_spiram.h"
#ifndef NDEBUG
// Enable built-in checks in queue.h in debug builds
This ensures stale cache entries are never read after fresh calls
to spi_flash_mmap(), while keeping the number of cache flushes to a
minimum.
+
+ Returns true if cache was flushed.
*/
-static void spi_flash_ensure_unmodified_region(size_t start_addr, size_t length);
+static bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length);
typedef struct mmap_entry_{
uint32_t handle;
static void IRAM_ATTR spi_flash_mmap_init()
{
+ if (s_mmap_page_refcnt[0] != 0) {
+ return; /* mmap data already initialised */
+ }
+
for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
uint32_t entry_pro = DPORT_PRO_FLASH_MMU_TABLE[i];
uint32_t entry_app = DPORT_APP_FLASH_MMU_TABLE[i];
if (entry_pro != entry_app) {
// clean up entries used by boot loader
- entry_pro = 0;
- DPORT_PRO_FLASH_MMU_TABLE[i] = 0;
+ entry_pro = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
+ DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
}
- if ((entry_pro & 0x100) == 0 && (i == 0 || i == PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) {
+ if ((entry_pro & INVALID_ENTRY_VAL) == 0 && (i == 0 || i == PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) {
s_mmap_page_refcnt[i] = 1;
+ } else {
+ DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
+ DPORT_APP_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
}
}
}
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
{
esp_err_t ret;
- mmap_entry_t* new_entry = (mmap_entry_t*) malloc(sizeof(mmap_entry_t));
- if (new_entry == 0) {
- return ESP_ERR_NO_MEM;
- }
if (src_addr & 0xffff) {
return ESP_ERR_INVALID_ARG;
}
if (src_addr + size > g_rom_flashchip.chip_size) {
return ESP_ERR_INVALID_ARG;
}
+ // region which should be mapped
+ int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
+ int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
+ //prepare a linear pages array to feed into spi_flash_mmap_pages
+ int *pages=malloc(sizeof(int)*page_count);
+ if (pages==NULL) {
+ return ESP_ERR_NO_MEM;
+ }
+ for (int i = 0; i < page_count; i++) {
+ pages[i] = phys_page+i;
+ }
+ ret=spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
+ free(pages);
+ return ret;
+}
- spi_flash_disable_interrupts_caches_and_other_cpu();
+esp_err_t IRAM_ATTR spi_flash_mmap_pages(int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
+ const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
+{
+ esp_err_t ret;
+ bool did_flush, need_flush = false;
+ if (!page_count) {
+ return ESP_ERR_INVALID_ARG;
+ }
+ for (int i = 0; i < page_count; i++) {
+ if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) {
+ return ESP_ERR_INVALID_ARG;
+ }
+ }
+ mmap_entry_t* new_entry = (mmap_entry_t*) heap_caps_malloc(sizeof(mmap_entry_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
+ if (new_entry == 0) {
+ return ESP_ERR_NO_MEM;
+ }
- spi_flash_ensure_unmodified_region(src_addr, size);
+ spi_flash_disable_interrupts_caches_and_other_cpu();
- if (s_mmap_page_refcnt[0] == 0) {
- spi_flash_mmap_init();
+ did_flush = 0;
+ for (int i = 0; i < page_count; i++) {
+ if (spi_flash_ensure_unmodified_region(pages[i]*SPI_FLASH_MMU_PAGE_SIZE, SPI_FLASH_MMU_PAGE_SIZE)) {
+ did_flush = 1;
+ }
}
+ spi_flash_mmap_init();
// figure out the memory region where we should look for pages
int region_begin; // first page to check
int region_size; // number of pages to check
region_addr = VADDR0_START_ADDR;
} else {
// only part of VAddr1 is usable, so adjust for that
- region_begin = VADDR1_FIRST_USABLE_ADDR;
+ region_begin = PRO_IRAM0_FIRST_USABLE_PAGE;
region_size = 3 * 64 - region_begin;
region_addr = VADDR1_FIRST_USABLE_ADDR;
}
- // region which should be mapped
- int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
- int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
+ if (region_size < page_count) {
+ return ESP_ERR_NO_MEM;
+ }
// The following part searches for a range of MMU entries which can be used.
// Algorithm is essentially naïve strstr algorithm, except that unused MMU
// entries are treated as wildcards.
int start;
int end = region_begin + region_size - page_count;
for (start = region_begin; start < end; ++start) {
- int page = phys_page;
+ int pageno = 0;
int pos;
- for (pos = start; pos < start + page_count; ++pos, ++page) {
+ for (pos = start; pos < start + page_count; ++pos, ++pageno) {
int table_val = (int) DPORT_PRO_FLASH_MMU_TABLE[pos];
- uint8_t refcnt = s_mmap_page_refcnt[pos];
- if (refcnt != 0 && table_val != page) {
+ uint8_t refcnt = s_mmap_page_refcnt[pos];
+ if (refcnt != 0 && table_val != pages[pageno]) {
break;
}
}
*out_ptr = NULL;
ret = ESP_ERR_NO_MEM;
} else {
- // set up mapping using pages [start, start + page_count)
- uint32_t entry_val = (uint32_t) phys_page;
- for (int i = start; i != start + page_count; ++i, ++entry_val) {
+ // set up mapping using pages
+ uint32_t pageno = 0;
+ for (int i = start; i != start + page_count; ++i, ++pageno) {
// sanity check: we won't reconfigure entries with non-zero reference count
assert(s_mmap_page_refcnt[i] == 0 ||
- (DPORT_PRO_FLASH_MMU_TABLE[i] == entry_val &&
- DPORT_APP_FLASH_MMU_TABLE[i] == entry_val));
+ (DPORT_PRO_FLASH_MMU_TABLE[i] == pages[pageno] &&
+ DPORT_APP_FLASH_MMU_TABLE[i] == pages[pageno]));
if (s_mmap_page_refcnt[i] == 0) {
- DPORT_PRO_FLASH_MMU_TABLE[i] = entry_val;
- DPORT_APP_FLASH_MMU_TABLE[i] = entry_val;
+ if (DPORT_PRO_FLASH_MMU_TABLE[i] != pages[pageno] || DPORT_APP_FLASH_MMU_TABLE[i] != pages[pageno]) {
+ DPORT_PRO_FLASH_MMU_TABLE[i] = pages[pageno];
+ DPORT_APP_FLASH_MMU_TABLE[i] = pages[pageno];
+ need_flush = true;
+ }
}
++s_mmap_page_refcnt[i];
}
new_entry->count = page_count;
new_entry->handle = ++s_mmap_last_handle;
*out_handle = new_entry->handle;
- *out_ptr = (void*) (region_addr + start * SPI_FLASH_MMU_PAGE_SIZE);
+ *out_ptr = (void*) (region_addr + (start - region_begin) * SPI_FLASH_MMU_PAGE_SIZE);
ret = ESP_OK;
}
+
+ /* This is a temporary fix for an issue where some
+ cache reads may see stale data.
+
+ Working on a long term fix that doesn't require invalidating
+ entire cache.
+ */
+ if (!did_flush && need_flush) {
+#if CONFIG_SPIRAM_SUPPORT
+ esp_spiram_writeback_cache();
+#endif
+ Cache_Flush(0);
+ Cache_Flush(1);
+ }
+
spi_flash_enable_interrupts_caches_and_other_cpu();
if (*out_ptr == NULL) {
free(new_entry);
void spi_flash_mmap_dump()
{
- if (s_mmap_page_refcnt[0] == 0) {
- spi_flash_mmap_init();
- }
+ spi_flash_mmap_init();
mmap_entry_t* it;
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
*/
static uint32_t written_pages[256/32];
-static void update_written_pages(size_t start_addr, size_t length, bool mark);
+static bool update_written_pages(size_t start_addr, size_t length, bool mark);
void IRAM_ATTR spi_flash_mark_modified_region(size_t start_addr, size_t length)
{
update_written_pages(start_addr, length, true);
}
-static void IRAM_ATTR spi_flash_ensure_unmodified_region(size_t start_addr, size_t length)
+static IRAM_ATTR bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length)
{
- update_written_pages(start_addr, length, false);
+ return update_written_pages(start_addr, length, false);
}
/* generic implementation for the previous two functions */
-static inline IRAM_ATTR void update_written_pages(size_t start_addr, size_t length, bool mark)
+static inline IRAM_ATTR bool update_written_pages(size_t start_addr, size_t length, bool mark)
{
- for (uint32_t addr = start_addr; addr < start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
+ /* align start_addr & length to full MMU pages */
+ uint32_t page_start_addr = start_addr & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
+ length += (start_addr - page_start_addr);
+ length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
+ for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
int page = addr / SPI_FLASH_MMU_PAGE_SIZE;
if (page >= 256) {
- return; /* invalid address */
+ return false; /* invalid address */
}
int idx = page / 32;
tricky because mmaped memory can be used on un-pinned
cores, or the pointer passed between CPUs.
*/
+#if CONFIG_SPIRAM_SUPPORT
+ esp_spiram_writeback_cache();
+#endif
Cache_Flush(0);
#ifndef CONFIG_FREERTOS_UNICORE
Cache_Flush(1);
#endif
bzero(written_pages, sizeof(written_pages));
+ return true;
+ }
+ }
+ return false;
+}
+
+
+uint32_t spi_flash_cache2phys(const void *cached)
+{
+ intptr_t c = (intptr_t)cached;
+ size_t cache_page;
+ if (c >= VADDR1_START_ADDR && c < VADDR1_FIRST_USABLE_ADDR) {
+ /* IRAM address, doesn't map to flash */
+ return SPI_FLASH_CACHE2PHYS_FAIL;
+ }
+ else if (c < VADDR1_FIRST_USABLE_ADDR) {
+ /* expect cache is in DROM */
+ cache_page = (c - VADDR0_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE;
+ } else {
+ /* expect cache is in IROM */
+ cache_page = (c - VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + 64;
+ }
+
+ if (cache_page >= 256) {
+ /* cached address was not in IROM or DROM */
+ return SPI_FLASH_CACHE2PHYS_FAIL;
+ }
+ uint32_t phys_page = DPORT_PRO_FLASH_MMU_TABLE[cache_page];
+ if (phys_page == INVALID_ENTRY_VAL) {
+ /* page is not mapped */
+ return SPI_FLASH_CACHE2PHYS_FAIL;
+ }
+ uint32_t phys_offs = phys_page * SPI_FLASH_MMU_PAGE_SIZE;
+ return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
+}
+
+
+const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
+{
+ uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
+ int start, end, page_delta;
+ intptr_t base;
+
+ if (memory == SPI_FLASH_MMAP_DATA) {
+ start = 0;
+ end = 64;
+ base = VADDR0_START_ADDR;
+ page_delta = 0;
+ } else {
+ start = PRO_IRAM0_FIRST_USABLE_PAGE;
+ end = 256;
+ base = VADDR1_START_ADDR;
+ page_delta = 64;
+ }
+
+ for (int i = start; i < end; i++) {
+ if (DPORT_PRO_FLASH_MMU_TABLE[i] == phys_page) {
+ i -= page_delta;
+ intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
+ return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
}
}
+ return NULL;
}