Returns true if cache was flushed.
*/
+
static bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length);
typedef struct mmap_entry_{
if (s_mmap_page_refcnt[0] != 0) {
return; /* mmap data already initialised */
}
-
+
+ DPORT_STALL_OTHER_CPU_START();
for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
uint32_t entry_pro = DPORT_PRO_FLASH_MMU_TABLE[i];
uint32_t entry_app = DPORT_APP_FLASH_MMU_TABLE[i];
+
if (entry_pro != entry_app) {
// clean up entries used by boot loader
entry_pro = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
DPORT_APP_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
}
}
+ DPORT_STALL_OTHER_CPU_END();
}
esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
for (start = region_begin; start < end; ++start) {
int pageno = 0;
int pos;
+ DPORT_STALL_OTHER_CPU_START();
for (pos = start; pos < start + page_count; ++pos, ++pageno) {
int table_val = (int) DPORT_PRO_FLASH_MMU_TABLE[pos];
uint8_t refcnt = s_mmap_page_refcnt[pos];
break;
}
}
+ DPORT_STALL_OTHER_CPU_END();
// whole mapping range matched, bail out
if (pos - start == page_count) {
break;
} else {
// set up mapping using pages
uint32_t pageno = 0;
+ DPORT_STALL_OTHER_CPU_START();
for (int i = start; i != start + page_count; ++i, ++pageno) {
// sanity check: we won't reconfigure entries with non-zero reference count
assert(s_mmap_page_refcnt[i] == 0 ||
}
++s_mmap_page_refcnt[i];
}
-
+ DPORT_STALL_OTHER_CPU_END();
LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries);
new_entry->page = start;
new_entry->count = page_count;
// for each page, decrement reference counter
// if reference count is zero, disable MMU table entry to
// facilitate debugging of use-after-free conditions
+ DPORT_STALL_OTHER_CPU_START();
for (int i = it->page; i < it->page + it->count; ++i) {
assert(s_mmap_page_refcnt[i] > 0);
if (--s_mmap_page_refcnt[i] == 0) {
DPORT_APP_FLASH_MMU_TABLE[i] = INVALID_ENTRY_VAL;
}
}
+ DPORT_STALL_OTHER_CPU_END();
LIST_REMOVE(it, entries);
break;
}
/* cached address was not in IROM or DROM */
return SPI_FLASH_CACHE2PHYS_FAIL;
}
+ DPORT_STALL_OTHER_CPU_START();
uint32_t phys_page = DPORT_PRO_FLASH_MMU_TABLE[cache_page];
+ DPORT_STALL_OTHER_CPU_END();
if (phys_page == INVALID_ENTRY_VAL) {
/* page is not mapped */
return SPI_FLASH_CACHE2PHYS_FAIL;
base = VADDR1_START_ADDR;
page_delta = 64;
}
-
+
+ DPORT_STALL_OTHER_CPU_START();
for (int i = start; i < end; i++) {
if (DPORT_PRO_FLASH_MMU_TABLE[i] == phys_page) {
i -= page_delta;
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
+ DPORT_STALL_OTHER_CPU_END();
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
}
}
+ DPORT_STALL_OTHER_CPU_END();
return NULL;
}