if (s_mmap_page_refcnt[0] != 0) {
return; /* mmap data already initialised */
}
-
- DPORT_STALL_OTHER_CPU_START();
+ DPORT_INTERRUPT_DISABLE();
for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
- uint32_t entry_pro = DPORT_PRO_FLASH_MMU_TABLE[i];
- uint32_t entry_app = DPORT_APP_FLASH_MMU_TABLE[i];
+ uint32_t entry_pro = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
+ uint32_t entry_app = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[i]);
if (entry_pro != entry_app) {
// clean up entries used by boot loader
DPORT_APP_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
}
}
- DPORT_STALL_OTHER_CPU_END();
+ DPORT_INTERRUPT_RESTORE();
}
static void IRAM_ATTR get_mmu_region(spi_flash_mmap_memory_t memory, int* out_begin, int* out_size,uint32_t* region_addr)
// region which should be mapped
int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
- //prepare a linear pages array to feed into spi_flash_mmap_pages
- int *pages=malloc(sizeof(int)*page_count);
- if (pages==NULL) {
+ // prepare a linear pages array to feed into spi_flash_mmap_pages
+ int *pages = heap_caps_malloc(sizeof(int)*page_count, MALLOC_CAP_INTERNAL);
+ if (pages == NULL) {
return ESP_ERR_NO_MEM;
}
for (int i = 0; i < page_count; i++) {
pages[i] = phys_page+i;
}
- ret=spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
+ ret = spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
free(pages);
return ret;
}
-esp_err_t IRAM_ATTR spi_flash_mmap_pages(int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
+esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
{
esp_err_t ret;
if (!page_count) {
return ESP_ERR_INVALID_ARG;
}
+ if (!esp_ptr_internal(pages)) {
+ return ESP_ERR_INVALID_ARG;
+ }
for (int i = 0; i < page_count; i++) {
if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) {
return ESP_ERR_INVALID_ARG;
for (start = region_begin; start < end; ++start) {
int pageno = 0;
int pos;
- DPORT_STALL_OTHER_CPU_START();
+ DPORT_INTERRUPT_DISABLE();
for (pos = start; pos < start + page_count; ++pos, ++pageno) {
- int table_val = (int) DPORT_PRO_FLASH_MMU_TABLE[pos];
+ int table_val = (int) DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[pos]);
uint8_t refcnt = s_mmap_page_refcnt[pos];
if (refcnt != 0 && table_val != pages[pageno]) {
break;
}
}
- DPORT_STALL_OTHER_CPU_END();
+ DPORT_INTERRUPT_RESTORE();
// whole mapping range matched, bail out
if (pos - start == page_count) {
break;
} else {
// set up mapping using pages
uint32_t pageno = 0;
- DPORT_STALL_OTHER_CPU_START();
+ DPORT_INTERRUPT_DISABLE();
for (int i = start; i != start + page_count; ++i, ++pageno) {
// sanity check: we won't reconfigure entries with non-zero reference count
+ uint32_t entry_pro = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
+ uint32_t entry_app = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[i]);
assert(s_mmap_page_refcnt[i] == 0 ||
- (DPORT_PRO_FLASH_MMU_TABLE[i] == pages[pageno] &&
- DPORT_APP_FLASH_MMU_TABLE[i] == pages[pageno]));
+ (entry_pro == pages[pageno] &&
+ entry_app == pages[pageno]));
if (s_mmap_page_refcnt[i] == 0) {
- if (DPORT_PRO_FLASH_MMU_TABLE[i] != pages[pageno] || DPORT_APP_FLASH_MMU_TABLE[i] != pages[pageno]) {
+ if (entry_pro != pages[pageno] || entry_app != pages[pageno]) {
DPORT_PRO_FLASH_MMU_TABLE[i] = pages[pageno];
DPORT_APP_FLASH_MMU_TABLE[i] = pages[pageno];
need_flush = true;
}
++s_mmap_page_refcnt[i];
}
- DPORT_STALL_OTHER_CPU_END();
+ DPORT_INTERRUPT_RESTORE();
LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries);
new_entry->page = start;
new_entry->count = page_count;
// for each page, decrement reference counter
// if reference count is zero, disable MMU table entry to
// facilitate debugging of use-after-free conditions
- DPORT_STALL_OTHER_CPU_START();
for (int i = it->page; i < it->page + it->count; ++i) {
assert(s_mmap_page_refcnt[i] > 0);
if (--s_mmap_page_refcnt[i] == 0) {
DPORT_APP_FLASH_MMU_TABLE[i] = INVALID_ENTRY_VAL;
}
}
- DPORT_STALL_OTHER_CPU_END();
LIST_REMOVE(it, entries);
break;
}
free(it);
}
-void spi_flash_mmap_dump()
+static void IRAM_ATTR NOINLINE_ATTR spi_flash_protected_mmap_init()
{
+ spi_flash_disable_interrupts_caches_and_other_cpu();
spi_flash_mmap_init();
+ spi_flash_enable_interrupts_caches_and_other_cpu();
+}
+
+static uint32_t IRAM_ATTR NOINLINE_ATTR spi_flash_protected_read_mmu_entry(int index)
+{
+ uint32_t value;
+ spi_flash_disable_interrupts_caches_and_other_cpu();
+ value = DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[index]);
+ spi_flash_enable_interrupts_caches_and_other_cpu();
+ return value;
+}
+
+void spi_flash_mmap_dump()
+{
+ spi_flash_protected_mmap_init();
+
mmap_entry_t* it;
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
}
for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
if (s_mmap_page_refcnt[i] != 0) {
- printf("page %d: refcnt=%d paddr=%d\n",
- i, (int) s_mmap_page_refcnt[i], DPORT_PRO_FLASH_MMU_TABLE[i]);
+ uint32_t paddr = spi_flash_protected_read_mmu_entry(i);
+ printf("page %d: refcnt=%d paddr=%d\n", i, (int) s_mmap_page_refcnt[i], paddr);
}
}
}
-uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
+uint32_t IRAM_ATTR spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
{
+ spi_flash_disable_interrupts_caches_and_other_cpu();
spi_flash_mmap_init();
int count = 0;
int region_begin; // first page to check
int region_size; // number of pages to check
uint32_t region_addr; // base address of memory region
get_mmu_region(memory,®ion_begin,®ion_size,®ion_addr);
- DPORT_STALL_OTHER_CPU_START();
+ DPORT_INTERRUPT_DISABLE();
for (int i = region_begin; i < region_begin + region_size; ++i) {
- if (s_mmap_page_refcnt[i] == 0 && DPORT_PRO_FLASH_MMU_TABLE[i] == INVALID_ENTRY_VAL) {
+ if (s_mmap_page_refcnt[i] == 0 && DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == INVALID_ENTRY_VAL) {
count++;
}
}
- DPORT_STALL_OTHER_CPU_END();
+ DPORT_INTERRUPT_RESTORE();
+ spi_flash_enable_interrupts_caches_and_other_cpu();
return count;
}
return false;
}
-
uint32_t spi_flash_cache2phys(const void *cached)
{
intptr_t c = (intptr_t)cached;
/* cached address was not in IROM or DROM */
return SPI_FLASH_CACHE2PHYS_FAIL;
}
- DPORT_STALL_OTHER_CPU_START();
- uint32_t phys_page = DPORT_PRO_FLASH_MMU_TABLE[cache_page];
- DPORT_STALL_OTHER_CPU_END();
+ uint32_t phys_page = spi_flash_protected_read_mmu_entry(cache_page);
if (phys_page == INVALID_ENTRY_VAL) {
/* page is not mapped */
return SPI_FLASH_CACHE2PHYS_FAIL;
return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
}
-
-const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
+const void *IRAM_ATTR spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
{
uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
int start, end, page_delta;
base = VADDR1_START_ADDR;
page_delta = 64;
}
-
- DPORT_STALL_OTHER_CPU_START();
+ spi_flash_disable_interrupts_caches_and_other_cpu();
+ DPORT_INTERRUPT_DISABLE();
for (int i = start; i < end; i++) {
- if (DPORT_PRO_FLASH_MMU_TABLE[i] == phys_page) {
+ if (DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == phys_page) {
i -= page_delta;
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
- DPORT_STALL_OTHER_CPU_END();
+ DPORT_INTERRUPT_RESTORE();
+ spi_flash_enable_interrupts_caches_and_other_cpu();
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
}
}
- DPORT_STALL_OTHER_CPU_END();
+ DPORT_INTERRUPT_RESTORE();
+ spi_flash_enable_interrupts_caches_and_other_cpu();
return NULL;
}