1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "heap_private.h"
20 #include "multi_heap.h"
21 #include "esp_heap_caps_init.h"
22 #include "soc/soc_memory_layout.h"
24 #include "freertos/FreeRTOS.h"
25 #include "freertos/task.h"
27 static const char *TAG = "heap_init";
29 /* Linked-list of registered heaps */
30 struct registered_heap_ll registered_heaps;
32 static void register_heap(heap_t *region)
34 region->heap = multi_heap_register((void *)region->start, region->end - region->start);
35 if (region->heap != NULL) {
36 ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
40 void heap_caps_enable_nonos_stack_heaps()
43 SLIST_FOREACH(heap, ®istered_heaps, next) {
44 // Assume any not-yet-registered heap is
46 if (heap->heap == NULL) {
48 if (heap->heap != NULL) {
49 multi_heap_set_lock(heap->heap, &heap->heap_mux);
55 //Modify regions array to disable the given range of memory.
56 static void disable_mem_region(soc_memory_region_t *regions, intptr_t from, intptr_t to)
58 //Align from and to on word boundaries
62 for (int i = 0; i < soc_memory_region_count; i++) {
63 soc_memory_region_t *region = ®ions[i];
65 intptr_t regStart = region->start;
66 intptr_t regEnd = region->start + region->size;
67 if (regStart >= from && regEnd <= to) {
68 //Entire region falls in the range. Disable entirely.
70 } else if (regStart >= from && regEnd > to && regStart < to) {
71 //Start of the region falls in the range. Modify address/len.
72 intptr_t overlap = to - regStart;
73 region->start += overlap;
74 region->size -= overlap;
75 if (region->iram_address) {
76 region->iram_address += overlap;
78 } else if (regStart < from && regEnd > from && regEnd <= to) {
79 //End of the region falls in the range. Modify length.
80 region->size -= regEnd - from;
81 } else if (regStart < from && regEnd > to) {
82 //Range punches a hole in the region! We do not support this.
83 ESP_EARLY_LOGE(TAG, "region %d: hole punching is not supported!", i);
84 regions->type = -1; //Just disable memory region. That'll teach them!
90 Warning: These variables are assumed to have the start and end of the data and iram
91 area used statically by the program, respectively. These variables are defined in the ld
94 extern int _data_start, _heap_start, _init_start, _iram_text_end;
97 Initialize the heap allocator. We pass it a bunch of region descriptors, but we need to modify those first to accommodate for
98 the data as loaded by the bootloader.
99 ToDo: The regions are different when stuff like trace memory, BT, ... is used. Modify the regions struct on the fly for this.
100 Same with loading of apps. Same with using SPI RAM.
102 void heap_caps_init()
104 /* Copy the soc_memory_regions data to the stack, so we can
106 soc_memory_region_t regions[soc_memory_region_count];
107 memcpy(regions, soc_memory_regions, sizeof(soc_memory_region_t)*soc_memory_region_count);
109 //Disable the bits of memory where this code is loaded.
110 disable_mem_region(regions, (intptr_t)&_data_start, (intptr_t)&_heap_start); //DRAM used by bss/data static variables
111 disable_mem_region(regions, (intptr_t)&_init_start, (intptr_t)&_iram_text_end); //IRAM used by code
113 // Disable all regions reserved on this SoC
114 for (int i = 0; i < soc_reserved_region_count; i++) {
115 disable_mem_region(regions, soc_reserved_regions[i].start,
116 soc_reserved_regions[i].end);
119 //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
120 //it's useful to coalesce adjacent regions that have the same type.
122 for (int i = 1; i < soc_memory_region_count; i++) {
123 soc_memory_region_t *a = ®ions[i - 1];
124 soc_memory_region_t *b = ®ions[i];
125 if (b->start == a->start + a->size && b->type == a->type ) {
132 /* Count the heaps left after merging */
133 size_t num_heaps = 0;
134 for (int i = 0; i < soc_memory_region_count; i++) {
135 if (regions[i].type != -1) {
140 /* Start by allocating the registered heap data on the stack.
142 Once we have a heap to copy it to, we will copy it to a heap buffer.
144 heap_t temp_heaps[num_heaps];
147 ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
148 for (int i = 0; i < soc_memory_region_count; i++) {
149 soc_memory_region_t *region = ®ions[i];
150 const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
151 heap_t *heap = &temp_heaps[heap_idx];
152 if (region->type == -1) {
156 assert(heap_idx <= num_heaps);
158 memcpy(heap->caps, type->caps, sizeof(heap->caps));
159 heap->start = region->start;
160 heap->end = region->start + region->size;
161 vPortCPUInitializeMutex(&heap->heap_mux);
162 if (type->startup_stack) {
163 /* Will be registered when OS scheduler starts */
168 SLIST_NEXT(heap, next) = NULL;
170 ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
171 region->start, region->size, region->size / 1024, type->name);
174 assert(heap_idx == num_heaps);
176 /* Allocate the permanent heap data that we'll use as a linked list at runtime.
178 Allocate this part of data contiguously, even though it's a linked list... */
179 assert(SLIST_EMPTY(®istered_heaps));
181 heap_t *heaps_array = NULL;
182 for (int i = 0; i < num_heaps; i++) {
183 if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
184 /* use the first DRAM heap which can fit the data */
185 heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
186 if (heaps_array != NULL) {
191 assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
193 memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
195 /* Iterate the heaps and set their locks, also add them to the linked list. */
196 for (int i = 0; i < num_heaps; i++) {
197 if (heaps_array[i].heap != NULL) {
198 multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
201 SLIST_INSERT_HEAD(®istered_heaps, &heaps_array[0], next);
203 SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
208 esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
211 return ESP_ERR_INVALID_ARG;
214 for (int i = 0; i < soc_memory_region_count; i++) {
215 const soc_memory_region_t *region = &soc_memory_regions[i];
216 // Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
217 if (region->start <= start && (region->start + region->size) > start) {
218 const uint32_t *caps = soc_memory_types[region->type].caps;
219 return heap_caps_add_region_with_caps(caps, start, end);
223 return ESP_ERR_NOT_FOUND;
226 esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
228 esp_err_t err = ESP_FAIL;
229 if (caps == NULL || start == 0 || end == 0 || end < start) {
230 return ESP_ERR_INVALID_ARG;
233 heap_t *p_new = malloc(sizeof(heap_t));
235 err = ESP_ERR_NO_MEM;
238 memcpy(p_new->caps, caps, sizeof(p_new->caps));
239 p_new->start = start;
241 vPortCPUInitializeMutex(&p_new->heap_mux);
242 p_new->heap = multi_heap_register((void *)start, end - start);
243 SLIST_NEXT(p_new, next) = NULL;
244 if (p_new->heap == NULL) {
248 multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
250 /* (This insertion is atomic to registered_heaps, so
251 we don't need to worry about thread safety for readers,
253 static _lock_t registered_heaps_write_lock;
254 _lock_acquire(®istered_heaps_write_lock);
255 SLIST_INSERT_HEAD(®istered_heaps, p_new, next);
256 _lock_release(®istered_heaps_write_lock);