2 * Copyright (c) 2013 Luca Clementi <luca.clementi@gmail.com>
3 * Copyright (c) 2013-2018 The strace developers.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "largefile_wrappers.h"
32 #include "mmap_cache.h"
35 static unsigned int mmap_cache_generation;
36 static bool use_mmap_cache;
38 extern void mmap_cache_enable(void)
40 use_mmap_cache = true;
43 extern bool mmap_cache_is_enabled(void)
45 return use_mmap_cache;
49 * caching of /proc/ID/maps for each process to speed up stack tracing
51 * The cache must be refreshed after syscalls that affect memory mappings,
52 * e.g. mmap, mprotect, munmap, execve.
55 build_mmap_cache(struct tcb *tcp)
58 struct mmap_cache_t *cache_head = NULL;
59 /* start with a small dynamically-allocated array and then expand it */
60 size_t cur_array_size = 0;
61 char filename[sizeof("/proc/4294967296/maps")];
62 char buffer[PATH_MAX + 80];
64 xsprintf(filename, "/proc/%u/maps", tcp->pid);
65 fp = fopen_stream(filename, "r");
67 perror_msg("fopen: %s", filename);
71 while (fgets(buffer, sizeof(buffer), fp) != NULL) {
72 struct mmap_cache_t *entry;
73 unsigned long start_addr, end_addr, mmap_offset;
78 unsigned long major, minor;
79 char binary_path[sizeof(buffer)];
81 if (sscanf(buffer, "%lx-%lx %c%c%c%c %lx %lx:%lx %*d %[^\n]",
82 &start_addr, &end_addr,
83 &read_bit, &write_bit, &exec_bit, &shared_bit,
89 /* skip mappings that have unknown protection */
90 if (!(read_bit == '-' || read_bit == 'r'))
92 if (!(write_bit == '-' || write_bit == 'w'))
94 if (!(exec_bit == '-' || exec_bit == 'x'))
96 if (!(shared_bit == 'p' || shared_bit == 's'))
99 if (end_addr < start_addr) {
100 error_msg("%s: unrecognized file format", filename);
105 * sanity check to make sure that we're storing
106 * non-overlapping regions in ascending order
108 if (tcp->mmap_cache_size > 0) {
109 entry = &cache_head[tcp->mmap_cache_size - 1];
110 if (entry->start_addr == start_addr &&
111 entry->end_addr == end_addr) {
112 /* duplicate entry, e.g. [vsyscall] */
115 if (start_addr <= entry->start_addr ||
116 start_addr < entry->end_addr) {
117 debug_msg("%s: overlapping memory region: "
118 "\"%s\" [%08lx-%08lx] overlaps with "
119 "\"%s\" [%08lx-%08lx]",
120 filename, binary_path, start_addr,
121 end_addr, entry->binary_filename,
122 entry->start_addr, entry->end_addr);
127 if (tcp->mmap_cache_size >= cur_array_size)
128 cache_head = xgrowarray(cache_head, &cur_array_size,
129 sizeof(*cache_head));
131 entry = &cache_head[tcp->mmap_cache_size];
132 entry->start_addr = start_addr;
133 entry->end_addr = end_addr;
134 entry->mmap_offset = mmap_offset;
135 entry->protections = (
137 | ((read_bit == 'r')? MMAP_CACHE_PROT_READABLE : 0)
138 | ((write_bit == 'w')? MMAP_CACHE_PROT_WRITABLE : 0)
139 | ((exec_bit == 'x')? MMAP_CACHE_PROT_EXECUTABLE: 0)
140 | ((shared_bit == 's')? MMAP_CACHE_PROT_SHARED : 0)
142 entry->major = major;
143 entry->minor = minor;
144 entry->binary_filename = xstrdup(binary_path);
145 tcp->mmap_cache_size++;
148 tcp->mmap_cache = cache_head;
149 tcp->mmap_cache_generation = mmap_cache_generation;
151 debug_func_msg("tgen=%u, ggen=%u, tcp=%p, cache=%p",
152 tcp->mmap_cache_generation,
153 mmap_cache_generation,
154 tcp, tcp->mmap_cache);
157 /* deleting the cache */
159 mmap_cache_delete(struct tcb *tcp, const char *caller)
163 debug_func_msg("tgen=%u, ggen=%u, tcp=%p, cache=%p, caller=%s",
164 tcp->mmap_cache_generation,
165 mmap_cache_generation,
166 tcp, tcp->mmap_cache, caller);
168 for (i = 0; i < tcp->mmap_cache_size; i++) {
169 free(tcp->mmap_cache[i].binary_filename);
170 tcp->mmap_cache[i].binary_filename = NULL;
172 free(tcp->mmap_cache);
173 tcp->mmap_cache = NULL;
174 tcp->mmap_cache_size = 0;
177 extern enum mmap_cache_rebuild_result
178 mmap_cache_rebuild_if_invalid(struct tcb *tcp, const char *caller)
180 enum mmap_cache_rebuild_result r = MMAP_CACHE_REBUILD_READY;
181 if ((tcp->mmap_cache_generation != mmap_cache_generation)
183 mmap_cache_delete(tcp, caller);
185 if (!tcp->mmap_cache) {
186 r = MMAP_CACHE_REBUILD_RENEWED;
187 build_mmap_cache(tcp);
190 if (!(tcp->mmap_cache && tcp->mmap_cache_size))
191 r = MMAP_CACHE_REBUILD_NOCACHE;
197 mmap_cache_invalidate(struct tcb *tcp)
199 #if SUPPORTED_PERSONALITIES > 1
200 if (tcp->currpers != DEFAULT_PERSONALITY) {
201 /* disable stack trace */
205 mmap_cache_generation++;
206 debug_func_msg("tgen=%u, ggen=%u, tcp=%p, cache=%p",
207 tcp->mmap_cache_generation,
208 mmap_cache_generation,
209 tcp, tcp->mmap_cache);
212 struct mmap_cache_t *
213 mmap_cache_search(struct tcb *tcp, unsigned long ip)
216 int upper = (int) tcp->mmap_cache_size - 1;
218 while (lower <= upper) {
219 struct mmap_cache_t *cur_mmap_cache;
220 int mid = (upper + lower) / 2;
222 cur_mmap_cache = &tcp->mmap_cache[mid];
224 if (ip >= cur_mmap_cache->start_addr &&
225 ip < cur_mmap_cache->end_addr)
226 return cur_mmap_cache;
227 else if (ip < cur_mmap_cache->start_addr)