3 # esp-idf alternative to "size" to print ELF file sizes, also analyzes
4 # the linker map file to dump higher resolution details.
6 # Includes information which is not shown in "xtensa-esp32-elf-size",
7 # or easy to parse from "xtensa-esp32-elf-objdump" or raw map files.
9 # Copyright 2017-2018 Espressif Systems (Shanghai) PTE LTD
11 # Licensed under the Apache License, Version 2.0 (the "License");
12 # you may not use this file except in compliance with the License.
13 # You may obtain a copy of the License at
15 # http://www.apache.org/licenses/LICENSE-2.0
17 # Unless required by applicable law or agreed to in writing, software
18 # distributed under the License is distributed on an "AS IS" BASIS,
19 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 # See the License for the specific language governing permissions and
21 # limitations under the License.
23 from __future__ import print_function
24 from __future__ import unicode_literals
25 from __future__ import division
33 DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
37 "total_iram": 0x20000,
38 "total_irom": 0x330000,
39 "total_drom": 0x800000,
40 # total dram is determined from objdump output
46 """ Pretty-print JSON object to stdout """
47 json.dump(obj, sys.stdout, indent=4)
51 def scan_to_header(f, header_line):
52 """ Scan forward in a file until you reach 'header_line', then return """
54 if line.strip() == header_line:
56 raise RuntimeError("Didn't find line '%s' in file" % header_line)
59 def load_map_data(map_file):
60 memory_config = load_memory_config(map_file)
61 sections = load_sections(map_file)
62 return memory_config, sections
65 def load_memory_config(map_file):
66 """ Memory Configuration section is the total size of each output section """
68 scan_to_header(map_file, "Memory Configuration")
69 RE_MEMORY_SECTION = r"(?P<name>[^ ]+) +0x(?P<origin>[\da-f]+) +0x(?P<length>[\da-f]+)"
71 m = re.match(RE_MEMORY_SECTION, line)
74 continue # whitespace or a header, before the content we want
76 return result # we're at the end of the Memory Configuration
78 "name": m.group("name"),
79 "origin": int(m.group("origin"), 16),
80 "length": int(m.group("length"), 16),
82 if section["name"] != "*default*":
83 result[section["name"]] = section
84 raise RuntimeError("End of file while scanning memory configuration?")
87 def load_sections(map_file):
88 """ Load section size information from the MAP file.
90 Returns a dict of 'sections', where each key is a section name and the value
91 is a dict with details about this section, including a "sources" key which holds a list of source file line
92 information for each symbol linked into the section.
94 scan_to_header(map_file, "Linker script and memory map")
99 # output section header, ie '.iram0.text 0x0000000040080400 0x129a5'
100 RE_SECTION_HEADER = r"(?P<name>[^ ]+) +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+)$"
101 m = re.match(RE_SECTION_HEADER, line)
102 if m is not None: # start of a new section
104 "name": m.group("name"),
105 "address": int(m.group("address"), 16),
106 "size": int(m.group("size"), 16),
109 sections[section["name"]] = section
112 # source file line, ie
113 # 0x0000000040080400 0xa4 /home/gus/esp/32/idf/examples/get-started/hello_world/build/esp32/libesp32.a(cpu_start.o)
114 RE_SOURCE_LINE = r"\s*(?P<sym_name>\S*).* +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+) (?P<archive>.+\.a)\((?P<object_file>.+\.ob?j?)\)"
116 m = re.match(RE_SOURCE_LINE, line, re.M)
118 # cmake build system links some object files directly, not part of any archive
119 RE_SOURCE_LINE = r"\s*(?P<sym_name>\S*).* +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+) (?P<object_file>.+\.ob?j?)"
120 m = re.match(RE_SOURCE_LINE, line)
121 if section is not None and m is not None: # input source file details=ma,e
122 sym_name = m.group("sym_name") if len(m.group("sym_name")) > 0 else sym_backup
124 archive = m.group("archive")
129 "size": int(m.group("size"), 16),
130 "address": int(m.group("address"), 16),
131 "archive": os.path.basename(archive),
132 "object_file": os.path.basename(m.group("object_file")),
133 "sym_name": sym_name,
135 source["file"] = "%s:%s" % (source["archive"], source["object_file"])
136 section["sources"] += [source]
138 # In some cases the section name appears on the previous line, back it up in here
139 RE_SYMBOL_ONLY_LINE = r"^ (?P<sym_name>\S*)$"
140 m = re.match(RE_SYMBOL_ONLY_LINE, line)
141 if section is not None and m is not None:
142 sym_backup = m.group("sym_name")
147 def sizes_by_key(sections, key):
148 """ Takes a dict of sections (from load_sections) and returns
149 a dict keyed by 'key' with aggregate output size information.
151 Key can be either "archive" (for per-archive data) or "file" (for per-file data) in the result.
154 for section in sections.values():
155 for s in section["sources"]:
156 if not s[key] in result:
158 archive = result[s[key]]
159 if not section["name"] in archive:
160 archive[section["name"]] = 0
161 archive[section["name"]] += s["size"]
166 parser = argparse.ArgumentParser("idf_size - a tool to print IDF elf file sizes")
169 '--toolchain-prefix',
170 help="Triplet prefix to add before objdump executable",
171 default=DEFAULT_TOOLCHAIN_PREFIX)
175 help="Output results as JSON",
179 'map_file', help='MAP file produced by linker',
180 type=argparse.FileType('r'))
183 '--archives', help='Print per-archive sizes', action='store_true')
186 '--archive_details', help='Print detailed symbols per archive')
189 '--files', help='Print per-file sizes', action='store_true')
191 args = parser.parse_args()
193 memory_config, sections = load_map_data(args.map_file)
194 if not args.json or not (args.archives or args.files or args.archive_details):
195 print_summary(memory_config, sections, args.json)
198 print_detailed_sizes(sections, "archive", "Archive File", args.json)
200 print_detailed_sizes(sections, "file", "Object File", args.json)
201 if args.archive_details:
202 print_archive_symbols(sections, args.archive_details, args.json)
205 def print_summary(memory_config, sections, as_json=False):
206 def get_size(section):
208 return sections[section]["size"]
212 # if linker script changes, these need to change
213 total_iram = memory_config["iram0_0_seg"]["length"]
214 total_dram = memory_config["dram0_0_seg"]["length"]
215 used_data = get_size(".dram0.data")
216 used_bss = get_size(".dram0.bss")
217 used_dram = used_data + used_bss
219 used_dram_ratio = used_dram / total_dram
220 except ZeroDivisionError:
221 used_dram_ratio = float('nan')
222 used_iram = sum(get_size(s) for s in sections if s.startswith(".iram0"))
224 used_iram_ratio = used_iram / total_iram
225 except ZeroDivisionError:
226 used_iram_ratio = float('nan')
227 flash_code = get_size(".flash.text")
228 flash_rodata = get_size(".flash.rodata")
229 total_size = used_data + used_iram + flash_code + flash_rodata
232 _json_dump(collections.OrderedDict([
233 ("dram_data", used_data),
234 ("dram_bss", used_bss),
235 ("used_dram", used_dram),
236 ("available_dram", total_dram - used_dram),
237 ("used_dram_ratio", used_dram_ratio),
238 ("used_iram", used_iram),
239 ("available_iram", total_iram - used_iram),
240 ("used_iram_ratio", used_iram_ratio),
241 ("flash_code", flash_code),
242 ("flash_rodata", flash_rodata),
243 ("total_size", total_size)
246 print("Total sizes:")
247 print(" DRAM .data size: %7d bytes" % used_data)
248 print(" DRAM .bss size: %7d bytes" % used_bss)
249 print("Used static DRAM: %7d bytes (%7d available, %.1f%% used)" %
250 (used_dram, total_dram - used_dram, 100.0 * used_dram_ratio))
251 print("Used static IRAM: %7d bytes (%7d available, %.1f%% used)" %
252 (used_iram, total_iram - used_iram, 100.0 * used_iram_ratio))
253 print(" Flash code: %7d bytes" % flash_code)
254 print(" Flash rodata: %7d bytes" % flash_rodata)
255 print("Total image size:~%7d bytes (.bin may be padded larger)" % (total_size))
258 def print_detailed_sizes(sections, key, header, as_json=False):
259 sizes = sizes_by_key(sections, key)
264 result[k] = collections.OrderedDict()
265 result[k]["data"] = v.get(".dram0.data", 0)
266 result[k]["bss"] = v.get(".dram0.bss", 0)
267 result[k]["iram"] = sum(t for (s,t) in v.items() if s.startswith(".iram0"))
268 result[k]["flash_text"] = v.get(".flash.text", 0)
269 result[k]["flash_rodata"] = v.get(".flash.rodata", 0)
270 result[k]["total"] = sum(result[k].values())
272 def return_total_size(elem):
276 def return_header(elem):
278 s = sorted(list(result.items()), key=return_header)
280 # do a secondary sort in order to have consistent order (for diff-ing the output)
281 s = sorted(s, key=return_total_size, reverse=True)
284 _json_dump(collections.OrderedDict(s))
286 print("Per-%s contributions to ELF file:" % key)
294 header_format = "%24s %10d %6d %6d %10d %8d %7d"
295 print(header_format.replace("d", "s") % headings)
298 if ":" in k: # print subheadings for key of format archive:file
300 print(header_format % (k[:24],
309 def print_archive_symbols(sections, archive, as_json=False):
310 interested_sections = [".dram0.data", ".dram0.bss", ".iram0.text", ".iram0.vectors", ".flash.text", ".flash.rodata"]
312 for t in interested_sections:
314 for section in sections.values():
315 section_name = section["name"]
316 if section_name not in interested_sections:
318 for s in section["sources"]:
319 if archive != s["archive"]:
321 s["sym_name"] = re.sub("(.text.|.literal.|.data.|.bss.|.rodata.)", "", s["sym_name"])
322 result[section_name][s["sym_name"]] = result[section_name].get(s["sym_name"], 0) + s["size"]
324 # build a new ordered dict of each section, where each entry is an ordereddict of symbols to sizes
325 section_symbols = collections.OrderedDict()
326 for t in interested_sections:
327 s = sorted(list(result[t].items()), key=lambda k_v: k_v[0])
328 # do a secondary sort in order to have consistent order (for diff-ing the output)
329 s = sorted(s, key=lambda k_v: k_v[1], reverse=True)
330 section_symbols[t] = collections.OrderedDict(s)
333 _json_dump(section_symbols)
335 print("Symbols within the archive: %s (Not all symbols may be reported)" % (archive))
336 for t,s in section_symbols.items():
338 print("\nSymbols from section:", t)
339 for key, val in s.items():
340 print(("%s(%d)" % (key.replace(t + ".", ""), val)), end=' ')
342 print("\nSection total:",section_total)
345 if __name__ == "__main__":