2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
19 #if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
20 && !defined(MSWINCE) && !defined(SN_TARGET_ORBIS) \
21 && !defined(SN_TARGET_PSP2) && !defined(__CC_ARM)
22 # include <sys/types.h>
23 # if !defined(MSWIN32) && !defined(MSWIN_XBOX1)
29 #if defined(MSWINCE) || defined(SN_TARGET_PS3)
30 # define SIGSEGV 0 /* value is irrelevant */
35 #if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(NACL) \
40 #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
44 /* Blatantly OS dependent routines, except for those that are related */
45 /* to dynamic loading. */
49 # include "extra/AmigaOS.c"
53 #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
54 # ifndef WIN32_LEAN_AND_MEAN
55 # define WIN32_LEAN_AND_MEAN 1
59 /* It's not clear this is completely kosher under Cygwin. But it */
60 /* allows us to get a working GC_get_stack_base. */
64 # include <Processes.h>
69 # include <malloc.h> /* for locking */
72 #if defined(MMAP_SUPPORTED) || defined(ADD_HEAP_GUARD_PAGES)
73 # if defined(USE_MUNMAP) && !defined(USE_MMAP) && !defined(CPPCHECK)
74 # error Invalid config: USE_MUNMAP requires USE_MMAP
76 # include <sys/types.h>
77 # include <sys/mman.h>
78 # include <sys/stat.h>
83 /* for get_etext and friends */
84 # include <mach-o/getsect.h>
88 /* Apparently necessary for djgpp 2.01. May cause problems with */
90 typedef long unsigned int caddr_t;
94 # include "il/PCR_IL.h"
95 # include "th/PCR_ThCtl.h"
96 # include "mm/PCR_MM.h"
99 #if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB)
100 /* Declare GC_mprotect_stop and GC_mprotect_resume as extern "C". */
101 # include "private/darwin_stop_world.h"
104 #if !defined(NO_EXECUTE_PERMISSION)
105 STATIC GC_bool GC_pages_executable = TRUE;
107 STATIC GC_bool GC_pages_executable = FALSE;
109 #define IGNORE_PAGES_EXECUTABLE 1
110 /* Undefined on GC_pages_executable real use. */
112 #ifdef NEED_PROC_MAPS
113 /* We need to parse /proc/self/maps, either to find dynamic libraries, */
114 /* and/or to find the register backing store base (IA64). Do it once */
117 /* Repeatedly perform a read call until the buffer is filled or */
118 /* we encounter EOF. */
119 STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
124 ASSERT_CANCEL_DISABLED();
125 while (num_read < count) {
126 ssize_t result = READ(fd, buf + num_read, count - num_read);
128 if (result < 0) return result;
129 if (result == 0) break;
137 /* Determine the length of a file by incrementally reading it into a */
138 /* buffer. This would be silly to use it on a file supporting lseek, */
139 /* but Linux /proc files usually do not. */
140 STATIC size_t GC_get_file_len(int f)
144 # define GET_FILE_LEN_BUF_SZ 500
145 char buf[GET_FILE_LEN_BUF_SZ];
148 result = read(f, buf, GET_FILE_LEN_BUF_SZ);
149 if (result == -1) return 0;
151 } while (result > 0);
155 STATIC size_t GC_get_maps_len(void)
157 int f = open("/proc/self/maps", O_RDONLY);
159 if (f < 0) return 0; /* treat missing file as empty */
160 result = GC_get_file_len(f);
166 /* Copy the contents of /proc/self/maps to a buffer in our address */
167 /* space. Return the address of the buffer, or zero on failure. */
168 /* This code could be simplified if we could determine its size ahead */
170 GC_INNER char * GC_get_maps(void)
173 static char *maps_buf = NULL;
174 static size_t maps_buf_sz = 1;
177 size_t old_maps_size = 0;
180 /* The buffer is essentially static, so there must be a single client. */
181 GC_ASSERT(I_HOLD_LOCK());
183 /* Note that in the presence of threads, the maps file can */
184 /* essentially shrink asynchronously and unexpectedly as */
185 /* threads that we already think of as dead release their */
186 /* stacks. And there is no easy way to read the entire */
187 /* file atomically. This is arguably a misfeature of the */
188 /* /proc/.../maps interface. */
189 /* Since we expect the file can grow asynchronously in rare */
190 /* cases, it should suffice to first determine */
191 /* the size (using lseek or read), and then to reread the */
192 /* file. If the size is inconsistent we have to retry. */
193 /* This only matters with threads enabled, and if we use */
194 /* this to locate roots (not the default). */
197 /* Determine the initial size of /proc/self/maps. */
198 /* Note that lseek doesn't work, at least as of 2.6.15. */
199 maps_size = GC_get_maps_len();
200 if (0 == maps_size) return 0;
202 maps_size = 4000; /* Guess */
205 /* Read /proc/self/maps, growing maps_buf as necessary. */
206 /* Note that we may not allocate conventionally, and */
207 /* thus can't use stdio. */
211 while (maps_size >= maps_buf_sz) {
212 GC_scratch_recycle_no_gww(maps_buf, maps_buf_sz);
213 /* Grow only by powers of 2, since we leak "too small" buffers.*/
214 while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
215 maps_buf = GC_scratch_alloc(maps_buf_sz);
217 /* Recompute initial length, since we allocated. */
218 /* This can only happen a few times per program */
220 maps_size = GC_get_maps_len();
221 if (0 == maps_size) return 0;
223 if (maps_buf == 0) return 0;
225 GC_ASSERT(maps_buf_sz >= maps_size + 1);
226 f = open("/proc/self/maps", O_RDONLY);
227 if (-1 == f) return 0;
229 old_maps_size = maps_size;
233 result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
237 } while ((size_t)result == maps_buf_sz-1);
242 if (maps_size > old_maps_size) {
243 /* This might be caused by e.g. thread creation. */
244 WARN("Unexpected asynchronous /proc/self/maps growth"
245 " (to %" WARN_PRIdPTR " bytes)\n", maps_size);
248 } while (maps_size >= maps_buf_sz
250 || maps_size < old_maps_size
253 maps_buf[maps_size] = '\0';
258 * GC_parse_map_entry parses an entry from /proc/self/maps so we can
259 * locate all writable data segments that belong to shared libraries.
260 * The format of one of these entries and the fields we care about
262 * XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
263 * ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
264 * start end prot maj_dev
266 * Note that since about august 2003 kernels, the columns no longer have
267 * fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
268 * anywhere, which is safer anyway.
271 /* Assign various fields of the first line in buf_ptr to (*start), */
272 /* (*end), (*prot), (*maj_dev) and (*mapping_name). mapping_name may */
273 /* be NULL. (*prot) and (*mapping_name) are assigned pointers into the */
274 /* original buffer. */
275 #if (defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES)) \
276 || defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) \
277 || defined(REDIRECT_MALLOC)
278 GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
279 char **prot, unsigned int *maj_dev,
282 unsigned char *start_start, *end_start, *maj_dev_start;
283 unsigned char *p; /* unsigned for isspace, isxdigit */
285 if (buf_ptr == NULL || *buf_ptr == '\0') {
289 p = (unsigned char *)buf_ptr;
290 while (isspace(*p)) ++p;
292 GC_ASSERT(isxdigit(*start_start));
293 *start = (ptr_t)strtoul((char *)start_start, (char **)&p, 16);
298 GC_ASSERT(isxdigit(*end_start));
299 *end = (ptr_t)strtoul((char *)end_start, (char **)&p, 16);
300 GC_ASSERT(isspace(*p));
302 while (isspace(*p)) ++p;
303 GC_ASSERT(*p == 'r' || *p == '-');
305 /* Skip past protection field to offset field */
306 while (!isspace(*p)) ++p;
307 while (isspace(*p)) p++;
308 GC_ASSERT(isxdigit(*p));
309 /* Skip past offset field, which we ignore */
310 while (!isspace(*p)) ++p;
311 while (isspace(*p)) p++;
313 GC_ASSERT(isxdigit(*maj_dev_start));
314 *maj_dev = strtoul((char *)maj_dev_start, NULL, 16);
316 if (mapping_name == 0) {
317 while (*p && *p++ != '\n');
319 while (*p && *p != '\n' && *p != '/' && *p != '[') p++;
320 *mapping_name = (char *)p;
321 while (*p && *p++ != '\n');
325 #endif /* REDIRECT_MALLOC || DYNAMIC_LOADING || IA64 || ... */
327 #if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
328 /* Try to read the backing store base from /proc/self/maps. */
329 /* Return the bounds of the writable mapping with a 0 major device, */
330 /* which includes the address passed as data. */
331 /* Return FALSE if there is no such mapping. */
332 GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, ptr_t *startp,
336 ptr_t my_start, my_end;
337 unsigned int maj_dev;
338 char *maps = GC_get_maps();
339 char *buf_ptr = maps;
341 if (0 == maps) return(FALSE);
343 buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
346 if (buf_ptr == NULL) return FALSE;
347 if (prot[1] == 'w' && maj_dev == 0) {
348 if ((word)my_end > (word)addr && (word)my_start <= (word)addr) {
357 #endif /* IA64 || INCLUDE_LINUX_THREAD_DESCR */
359 #if defined(REDIRECT_MALLOC)
360 /* Find the text(code) mapping for the library whose name, after */
361 /* stripping the directory part, starts with nm. */
362 GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp)
364 size_t nm_len = strlen(nm);
367 ptr_t my_start, my_end;
368 unsigned int maj_dev;
369 char *maps = GC_get_maps();
370 char *buf_ptr = maps;
372 if (0 == maps) return(FALSE);
374 buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
375 &prot, &maj_dev, &map_path);
377 if (buf_ptr == NULL) return FALSE;
378 if (prot[0] == 'r' && prot[1] == '-' && prot[2] == 'x') {
380 /* Set p to point just past last slash, if any. */
381 while (*p != '\0' && *p != '\n' && *p != ' ' && *p != '\t') ++p;
382 while (*p != '/' && (word)p >= (word)map_path) --p;
384 if (strncmp(nm, p, nm_len) == 0) {
393 #endif /* REDIRECT_MALLOC */
396 static ptr_t backing_store_base_from_proc(void)
398 ptr_t my_start, my_end;
399 if (!GC_enclosing_mapping(GC_save_regs_in_stack(), &my_start, &my_end)) {
400 GC_COND_LOG_PRINTF("Failed to find backing store base from /proc\n");
407 #endif /* NEED_PROC_MAPS */
409 #if defined(SEARCH_FOR_DATA_START)
410 /* The I386 case can be handled without a search. The Alpha case */
411 /* used to be handled differently as well, but the rules changed */
412 /* for recent Linux versions. This seems to be the easiest way to */
413 /* cover all versions. */
415 # if defined(LINUX) || defined(HURD)
416 /* Some Linux distributions arrange to define __data_start. Some */
417 /* define data_start as a weak symbol. The latter is technically */
418 /* broken, since the user program may define data_start, in which */
419 /* case we lose. Nonetheless, we try both, preferring __data_start.*/
420 /* We assume gcc-compatible pragmas. */
422 # pragma weak __data_start
423 # pragma weak data_start
424 extern int __data_start[], data_start[];
428 ptr_t GC_data_start = NULL;
430 GC_INNER void GC_init_linux_data_start(void)
432 ptr_t data_end = DATAEND;
434 # if (defined(LINUX) || defined(HURD)) && !defined(IGNORE_PROG_DATA_START)
435 /* Try the easy approaches first: */
436 if (COVERT_DATAFLOW(__data_start) != 0) {
437 GC_data_start = (ptr_t)(__data_start);
439 GC_data_start = (ptr_t)(data_start);
441 if (COVERT_DATAFLOW(GC_data_start) != 0) {
442 if ((word)GC_data_start > (word)data_end)
443 ABORT_ARG2("Wrong __data_start/_end pair",
444 ": %p .. %p", (void *)GC_data_start, (void *)data_end);
447 # ifdef DEBUG_ADD_DEL_ROOTS
448 GC_log_printf("__data_start not provided\n");
453 /* Not needed, avoids the SIGSEGV caused by */
454 /* GC_find_limit which complicates debugging. */
455 GC_data_start = data_end; /* set data root size to 0 */
459 GC_data_start = (ptr_t)GC_find_limit(data_end, FALSE);
461 #endif /* SEARCH_FOR_DATA_START */
465 # ifndef ECOS_GC_MEMORY_SIZE
466 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
467 # endif /* ECOS_GC_MEMORY_SIZE */
469 /* TODO: This is a simple way of allocating memory which is */
470 /* compatible with ECOS early releases. Later releases use a more */
471 /* sophisticated means of allocating memory than this simple static */
472 /* allocator, but this method is at least bound to work. */
473 static char ecos_gc_memory[ECOS_GC_MEMORY_SIZE];
474 static char *ecos_gc_brk = ecos_gc_memory;
476 static void *tiny_sbrk(ptrdiff_t increment)
478 void *p = ecos_gc_brk;
479 ecos_gc_brk += increment;
480 if ((word)ecos_gc_brk > (word)(ecos_gc_memory + sizeof(ecos_gc_memory))) {
481 ecos_gc_brk -= increment;
486 # define sbrk tiny_sbrk
489 #if defined(NETBSD) && defined(__ELF__)
490 ptr_t GC_data_start = NULL;
493 extern char **environ;
496 GC_INNER void GC_init_netbsd_elf(void)
498 /* This may need to be environ, without the underscore, for */
500 GC_data_start = (ptr_t)GC_find_limit(&environ, FALSE);
504 #if defined(ADDRESS_SANITIZER) && (defined(UNIX_LIKE) \
505 || defined(NEED_FIND_LIMIT) || defined(MPROTECT_VDB)) \
506 && !defined(CUSTOM_ASAN_DEF_OPTIONS)
507 /* To tell ASan to allow GC to use its own SIGBUS/SEGV handlers. */
508 /* The function is exported just to be visible to ASan library. */
509 GC_API const char *__asan_default_options(void)
511 return "allow_user_segv_handler=1";
516 static struct sigaction old_segv_act;
517 STATIC JMP_BUF GC_jmp_buf_openbsd;
520 # include <sys/syscall.h>
522 extern sigset_t __syscall(quad_t, ...);
526 /* Don't use GC_find_limit() because siglongjmp() outside of the */
527 /* signal handler by-passes our userland pthreads lib, leaving */
528 /* SIGSEGV and SIGPROF masked. Instead, use this custom one that */
529 /* works-around the issues. */
531 STATIC void GC_fault_handler_openbsd(int sig GC_ATTR_UNUSED)
533 LONGJMP(GC_jmp_buf_openbsd, 1);
536 /* Return the first non-addressable location > p or bound. */
537 /* Requires the allocation lock. */
538 STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound)
540 static volatile ptr_t result;
541 /* Safer if static, since otherwise it may not be */
542 /* preserved across the longjmp. Can safely be */
543 /* static since it's only called with the */
544 /* allocation lock held. */
546 struct sigaction act;
547 word pgsz = (word)sysconf(_SC_PAGESIZE);
549 GC_ASSERT((word)bound >= pgsz);
550 GC_ASSERT(I_HOLD_LOCK());
552 act.sa_handler = GC_fault_handler_openbsd;
553 sigemptyset(&act.sa_mask);
554 act.sa_flags = SA_NODEFER | SA_RESTART;
555 /* act.sa_restorer is deprecated and should not be initialized. */
556 sigaction(SIGSEGV, &act, &old_segv_act);
558 if (SETJMP(GC_jmp_buf_openbsd) == 0) {
559 result = (ptr_t)((word)p & ~(pgsz-1));
561 if ((word)result >= (word)bound - pgsz) {
565 result += pgsz; /* no overflow expected */
566 GC_noop1((word)(*result));
571 /* Due to the siglongjump we need to manually unmask SIGPROF. */
572 __syscall(SYS_sigprocmask, SIG_UNBLOCK, sigmask(SIGPROF));
575 sigaction(SIGSEGV, &old_segv_act, 0);
579 /* Return first addressable location > p or bound. */
580 /* Requires the allocation lock. */
581 STATIC ptr_t GC_skip_hole_openbsd(ptr_t p, ptr_t bound)
583 static volatile ptr_t result;
584 static volatile int firstpass;
586 struct sigaction act;
587 word pgsz = (word)sysconf(_SC_PAGESIZE);
589 GC_ASSERT((word)bound >= pgsz);
590 GC_ASSERT(I_HOLD_LOCK());
592 act.sa_handler = GC_fault_handler_openbsd;
593 sigemptyset(&act.sa_mask);
594 act.sa_flags = SA_NODEFER | SA_RESTART;
595 /* act.sa_restorer is deprecated and should not be initialized. */
596 sigaction(SIGSEGV, &act, &old_segv_act);
599 result = (ptr_t)((word)p & ~(pgsz-1));
600 if (SETJMP(GC_jmp_buf_openbsd) != 0 || firstpass) {
602 if ((word)result >= (word)bound - pgsz) {
605 result += pgsz; /* no overflow expected */
606 GC_noop1((word)(*result));
610 sigaction(SIGSEGV, &old_segv_act, 0);
619 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
622 unsigned short magic_number;
623 unsigned short padding[29];
627 #define E_MAGIC(x) (x).magic_number
628 #define EMAGIC 0x5A4D
629 #define E_LFANEW(x) (x).new_exe_offset
632 unsigned char magic_number[2];
633 unsigned char byte_order;
634 unsigned char word_order;
635 unsigned long exe_format_level;
638 unsigned long padding1[13];
639 unsigned long object_table_offset;
640 unsigned long object_count;
641 unsigned long padding2[31];
644 #define E32_MAGIC1(x) (x).magic_number[0]
645 #define E32MAGIC1 'L'
646 #define E32_MAGIC2(x) (x).magic_number[1]
647 #define E32MAGIC2 'X'
648 #define E32_BORDER(x) (x).byte_order
650 #define E32_WORDER(x) (x).word_order
652 #define E32_CPU(x) (x).cpu
654 #define E32_OBJTAB(x) (x).object_table_offset
655 #define E32_OBJCNT(x) (x).object_count
661 unsigned long pagemap;
662 unsigned long mapsize;
663 unsigned long reserved;
666 #define O32_FLAGS(x) (x).flags
667 #define OBJREAD 0x0001L
668 #define OBJWRITE 0x0002L
669 #define OBJINVALID 0x0080L
670 #define O32_SIZE(x) (x).size
671 #define O32_BASE(x) (x).base
673 # else /* IBM's compiler */
675 /* A kludge to get around what appears to be a header file bug */
677 # define WORD unsigned short
680 # define DWORD unsigned long
687 # endif /* __IBMC__ */
689 # define INCL_DOSEXCEPTIONS
690 # define INCL_DOSPROCESS
691 # define INCL_DOSERRORS
692 # define INCL_DOSMODULEMGR
693 # define INCL_DOSMEMMGR
698 /* Find the page size */
699 GC_INNER size_t GC_page_size = 0;
701 #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
702 # ifndef VER_PLATFORM_WIN32_CE
703 # define VER_PLATFORM_WIN32_CE 3
706 # if defined(MSWINCE) && defined(THREADS)
707 GC_INNER GC_bool GC_dont_query_stack_min = FALSE;
710 GC_INNER SYSTEM_INFO GC_sysinfo;
712 GC_INNER void GC_setpagesize(void)
714 GetSystemInfo(&GC_sysinfo);
715 # if defined(CYGWIN32) && (defined(MPROTECT_VDB) || defined(USE_MUNMAP))
716 /* Allocations made with mmap() are aligned to the allocation */
717 /* granularity, which (at least on 64-bit Windows OS) is not the */
718 /* same as the page size. Probably a separate variable could */
719 /* be added to distinguish the allocation granularity from the */
720 /* actual page size, but in practice there is no good reason to */
721 /* make allocations smaller than dwAllocationGranularity, so we */
722 /* just use it instead of the actual page size here (as Cygwin */
723 /* itself does in many cases). */
724 GC_page_size = (size_t)GC_sysinfo.dwAllocationGranularity;
725 GC_ASSERT(GC_page_size >= (size_t)GC_sysinfo.dwPageSize);
727 GC_page_size = (size_t)GC_sysinfo.dwPageSize;
729 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
731 OSVERSIONINFO verInfo;
732 /* Check the current WinCE version. */
733 verInfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
734 if (!GetVersionEx(&verInfo))
735 ABORT("GetVersionEx failed");
736 if (verInfo.dwPlatformId == VER_PLATFORM_WIN32_CE &&
737 verInfo.dwMajorVersion < 6) {
738 /* Only the first 32 MB of address space belongs to the */
739 /* current process (unless WinCE 6.0+ or emulation). */
740 GC_sysinfo.lpMaximumApplicationAddress = (LPVOID)((word)32 << 20);
742 /* On some old WinCE versions, it's observed that */
743 /* VirtualQuery calls don't work properly when used to */
744 /* get thread current stack committed minimum. */
745 if (verInfo.dwMajorVersion < 5)
746 GC_dont_query_stack_min = TRUE;
754 # define is_writable(prot) ((prot) == PAGE_READWRITE \
755 || (prot) == PAGE_WRITECOPY \
756 || (prot) == PAGE_EXECUTE_READWRITE \
757 || (prot) == PAGE_EXECUTE_WRITECOPY)
758 /* Return the number of bytes that are writable starting at p. */
759 /* The pointer p is assumed to be page aligned. */
760 /* If base is not 0, *base becomes the beginning of the */
761 /* allocation region containing p. */
762 STATIC word GC_get_writable_length(ptr_t p, ptr_t *base)
764 MEMORY_BASIC_INFORMATION buf;
768 result = VirtualQuery(p, &buf, sizeof(buf));
769 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
770 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
771 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
772 if (!is_writable(protect)) {
775 if (buf.State != MEM_COMMIT) return(0);
776 return(buf.RegionSize);
779 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
784 /* Set page size if it is not ready (so client can use this */
785 /* function even before GC is initialized). */
786 if (!GC_page_size) GC_setpagesize();
788 trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1));
789 /* FIXME: This won't work if called from a deeply recursive */
790 /* client code (and the committed stack space has grown). */
791 size = GC_get_writable_length(trunc_sp, 0);
792 GC_ASSERT(size != 0);
793 sb -> mem_base = trunc_sp + size;
796 # else /* CYGWIN32 */
797 /* An alternate version for Cygwin (adapted from Dave Korn's */
798 /* gcc version of boehm-gc). */
799 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
802 sb -> mem_base = ((NT_TIB*)NtCurrentTeb())->StackBase;
806 __asm__ ("movl %%fs:4, %0"
808 sb -> mem_base = _tlsbase;
812 # endif /* CYGWIN32 */
813 # define HAVE_GET_STACK_BASE
816 GC_INNER void GC_setpagesize(void)
818 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
819 GC_page_size = (size_t)GETPAGESIZE();
820 # if !defined(CPPCHECK)
821 if (0 == GC_page_size)
822 ABORT("getpagesize failed");
825 /* It's acceptable to fake it. */
826 GC_page_size = HBLKSIZE;
829 #endif /* !MSWIN32 */
832 # include <kernel/OS.h>
834 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
837 get_thread_info(find_thread(NULL),&th);
838 sb->mem_base = th.stack_end;
841 # define HAVE_GET_STACK_BASE
845 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
847 PTIB ptib; /* thread information block */
849 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
850 WARN("DosGetInfoBlocks failed\n", 0);
851 return GC_UNIMPLEMENTED;
853 sb->mem_base = ptib->tib_pstacklimit;
856 # define HAVE_GET_STACK_BASE
861 # include "extra/AmigaOS.c"
863 # define GET_MAIN_STACKBASE_SPECIAL
866 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
868 typedef void (*GC_fault_handler_t)(int);
870 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
871 || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
873 static struct sigaction old_segv_act;
874 # if defined(_sigargs) /* !Irix6.x */ \
875 || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
876 static struct sigaction old_bus_act;
879 static GC_fault_handler_t old_segv_handler;
881 static GC_fault_handler_t old_bus_handler;
885 GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t h)
887 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
888 || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
890 struct sigaction act;
893 # ifdef SIGACTION_FLAGS_NODEFER_HACK
894 /* Was necessary for Solaris 2.3 and very temporary */
896 act.sa_flags = SA_RESTART | SA_NODEFER;
898 act.sa_flags = SA_RESTART;
901 (void) sigemptyset(&act.sa_mask);
902 /* act.sa_restorer is deprecated and should not be initialized. */
903 # ifdef GC_IRIX_THREADS
904 /* Older versions have a bug related to retrieving and */
905 /* and setting a handler at the same time. */
906 (void) sigaction(SIGSEGV, 0, &old_segv_act);
907 (void) sigaction(SIGSEGV, &act, 0);
909 (void) sigaction(SIGSEGV, &act, &old_segv_act);
910 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
911 || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
912 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
913 /* Pthreads doesn't exist under Irix 5.x, so we */
914 /* don't have to worry in the threads case. */
915 (void) sigaction(SIGBUS, &act, &old_bus_act);
917 # endif /* !GC_IRIX_THREADS */
919 old_segv_handler = signal(SIGSEGV, h);
921 old_bus_handler = signal(SIGBUS, h);
924 # if defined(CPPCHECK) && defined(ADDRESS_SANITIZER)
925 GC_noop1((word)&__asan_default_options);
928 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
930 # if defined(NEED_FIND_LIMIT) \
931 || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))
932 /* Some tools to implement HEURISTIC2 */
933 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
935 GC_INNER JMP_BUF GC_jmp_buf;
937 STATIC void GC_fault_handler(int sig GC_ATTR_UNUSED)
939 LONGJMP(GC_jmp_buf, 1);
942 GC_INNER void GC_setup_temporary_fault_handler(void)
944 /* Handler is process-wide, so this should only happen in */
945 /* one thread at a time. */
946 GC_ASSERT(I_HOLD_LOCK());
947 GC_set_and_save_fault_handler(GC_fault_handler);
950 GC_INNER void GC_reset_fault_handler(void)
952 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
953 || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
955 (void) sigaction(SIGSEGV, &old_segv_act, 0);
956 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
957 || defined(HURD) || defined(NETBSD)
958 (void) sigaction(SIGBUS, &old_bus_act, 0);
961 (void) signal(SIGSEGV, old_segv_handler);
963 (void) signal(SIGBUS, old_bus_handler);
968 /* Return the first non-addressable location > p (up) or */
969 /* the smallest location q s.t. [q,p) is addressable (!up). */
970 /* We assume that p (up) or p-1 (!up) is addressable. */
971 /* Requires allocation lock. */
972 STATIC ptr_t GC_find_limit_with_bound(ptr_t p, GC_bool up, ptr_t bound)
974 static volatile ptr_t result;
975 /* Safer if static, since otherwise it may not be */
976 /* preserved across the longjmp. Can safely be */
977 /* static since it's only called with the */
978 /* allocation lock held. */
980 GC_ASSERT(up ? (word)bound >= MIN_PAGE_SIZE
981 : (word)bound <= ~(word)MIN_PAGE_SIZE);
982 GC_ASSERT(I_HOLD_LOCK());
983 GC_setup_temporary_fault_handler();
984 if (SETJMP(GC_jmp_buf) == 0) {
985 result = (ptr_t)(((word)(p))
986 & ~(MIN_PAGE_SIZE-1));
989 if ((word)result >= (word)bound - MIN_PAGE_SIZE) {
993 result += MIN_PAGE_SIZE; /* no overflow expected */
995 if ((word)result <= (word)bound + MIN_PAGE_SIZE) {
996 result = bound - MIN_PAGE_SIZE;
997 /* This is to compensate */
998 /* further result increment (we */
999 /* do not modify "up" variable */
1000 /* since it might be clobbered */
1001 /* by setjmp otherwise). */
1004 result -= MIN_PAGE_SIZE; /* no underflow expected */
1006 GC_noop1((word)(*result));
1009 GC_reset_fault_handler();
1011 result += MIN_PAGE_SIZE;
1016 void * GC_find_limit(void * p, int up)
1018 return GC_find_limit_with_bound((ptr_t)p, (GC_bool)up,
1019 up ? (ptr_t)GC_WORD_MAX : 0);
1021 # endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES */
1023 #ifdef HPUX_STACKBOTTOM
1025 #include <sys/param.h>
1026 #include <sys/pstat.h>
1028 GC_INNER ptr_t GC_get_register_stack_base(void)
1030 struct pst_vm_status vm_status;
1033 while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
1034 if (vm_status.pst_type == PS_RSESTACK) {
1035 return (ptr_t) vm_status.pst_vaddr;
1039 /* old way to get the register stackbottom */
1040 return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
1041 & ~(BACKING_STORE_ALIGNMENT - 1));
1044 #endif /* HPUX_STACK_BOTTOM */
1046 #ifdef LINUX_STACKBOTTOM
1048 # include <sys/types.h>
1049 # include <sys/stat.h>
1051 # define STAT_SKIP 27 /* Number of fields preceding startstack */
1052 /* field in /proc/self/stat */
1054 # ifdef USE_LIBC_PRIVATES
1056 # pragma weak __libc_stack_end
1057 extern ptr_t __libc_stack_end;
1059 # pragma weak __libc_ia64_register_backing_store_base
1060 extern ptr_t __libc_ia64_register_backing_store_base;
1066 GC_INNER ptr_t GC_get_register_stack_base(void)
1070 # ifdef USE_LIBC_PRIVATES
1071 if (0 != &__libc_ia64_register_backing_store_base
1072 && 0 != __libc_ia64_register_backing_store_base) {
1073 /* Glibc 2.2.4 has a bug such that for dynamically linked */
1074 /* executables __libc_ia64_register_backing_store_base is */
1075 /* defined but uninitialized during constructor calls. */
1076 /* Hence we check for both nonzero address and value. */
1077 return __libc_ia64_register_backing_store_base;
1080 result = backing_store_base_from_proc();
1082 result = (ptr_t)GC_find_limit(GC_save_regs_in_stack(), FALSE);
1083 /* This works better than a constant displacement heuristic. */
1089 STATIC ptr_t GC_linux_main_stack_base(void)
1091 /* We read the stack bottom value from /proc/self/stat. We do this */
1092 /* using direct I/O system calls in order to avoid calling malloc */
1093 /* in case REDIRECT_MALLOC is defined. */
1095 /* Also defined in pthread_support.c. */
1096 # define STAT_BUF_SIZE 4096
1097 # define STAT_READ read
1099 /* Should probably call the real read, if read is wrapped. */
1100 char stat_buf[STAT_BUF_SIZE];
1103 int i, buf_offset = 0, len;
1105 /* First try the easy way. This should work for glibc 2.2 */
1106 /* This fails in a prelinked ("prelink" command) executable */
1107 /* since the correct value of __libc_stack_end never */
1108 /* becomes visible to us. The second test works around */
1110 # ifdef USE_LIBC_PRIVATES
1111 if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
1113 /* Some versions of glibc set the address 16 bytes too */
1114 /* low while the initialization code is running. */
1115 if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
1116 return __libc_stack_end + 0x10;
1117 } /* Otherwise it's not safe to add 16 bytes and we fall */
1118 /* back to using /proc. */
1119 # elif defined(SPARC)
1120 /* Older versions of glibc for 64-bit SPARC do not set this */
1121 /* variable correctly, it gets set to either zero or one. */
1122 if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
1123 return __libc_stack_end;
1125 return __libc_stack_end;
1129 f = open("/proc/self/stat", O_RDONLY);
1131 ABORT("Couldn't read /proc/self/stat");
1132 len = STAT_READ(f, stat_buf, STAT_BUF_SIZE);
1135 /* Skip the required number of fields. This number is hopefully */
1136 /* constant across all Linux implementations. */
1137 for (i = 0; i < STAT_SKIP; ++i) {
1138 while (buf_offset < len && isspace(stat_buf[buf_offset++])) {
1141 while (buf_offset < len && !isspace(stat_buf[buf_offset++])) {
1146 while (buf_offset < len && isspace(stat_buf[buf_offset])) {
1149 /* Find the end of the number and cut the buffer there. */
1150 for (i = 0; buf_offset + i < len; i++) {
1151 if (!isdigit(stat_buf[buf_offset + i])) break;
1153 if (buf_offset + i >= len) ABORT("Could not parse /proc/self/stat");
1154 stat_buf[buf_offset + i] = '\0';
1156 result = (word)STRTOULL(&stat_buf[buf_offset], NULL, 10);
1157 if (result < 0x100000 || (result & (sizeof(word) - 1)) != 0)
1158 ABORT("Absurd stack bottom value");
1159 return (ptr_t)result;
1161 #endif /* LINUX_STACKBOTTOM */
1163 #ifdef FREEBSD_STACKBOTTOM
1164 /* This uses an undocumented sysctl call, but at least one expert */
1165 /* believes it will stay. */
1167 # include <unistd.h>
1168 # include <sys/types.h>
1169 # include <sys/sysctl.h>
1171 STATIC ptr_t GC_freebsd_main_stack_base(void)
1173 int nm[2] = {CTL_KERN, KERN_USRSTACK};
1175 size_t len = sizeof(ptr_t);
1176 int r = sysctl(nm, 2, &base, &len, NULL, 0);
1177 if (r) ABORT("Error getting main stack base");
1180 #endif /* FREEBSD_STACKBOTTOM */
1182 #if defined(ECOS) || defined(NOSYS)
1183 ptr_t GC_get_main_stack_base(void)
1187 # define GET_MAIN_STACKBASE_SPECIAL
1188 #elif defined(SYMBIAN)
1190 extern int GC_get_main_symbian_stack_base(void);
1193 ptr_t GC_get_main_stack_base(void)
1195 return (ptr_t)GC_get_main_symbian_stack_base();
1197 # define GET_MAIN_STACKBASE_SPECIAL
1198 #elif !defined(AMIGA) && !defined(HAIKU) && !defined(OS2) \
1199 && !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) \
1200 && !defined(GC_OPENBSD_THREADS) \
1201 && (!defined(GC_SOLARIS_THREADS) || defined(_STRICT_STDC))
1203 # if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
1204 && (defined(THREADS) || defined(USE_GET_STACKBASE_FOR_MAIN))
1205 # include <pthread.h>
1206 # ifdef HAVE_PTHREAD_NP_H
1207 # include <pthread_np.h> /* for pthread_attr_get_np() */
1209 # elif defined(DARWIN) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1210 /* We could use pthread_get_stackaddr_np even in case of a */
1211 /* single-threaded gclib (there is no -lpthread on Darwin). */
1212 # include <pthread.h>
1214 # define STACKBOTTOM (ptr_t)pthread_get_stackaddr_np(pthread_self())
1217 ptr_t GC_get_main_stack_base(void)
1220 # if (defined(HAVE_PTHREAD_ATTR_GET_NP) \
1221 || defined(HAVE_PTHREAD_GETATTR_NP)) \
1222 && (defined(USE_GET_STACKBASE_FOR_MAIN) \
1223 || (defined(THREADS) && !defined(REDIRECT_MALLOC)))
1224 pthread_attr_t attr;
1228 # ifdef HAVE_PTHREAD_ATTR_GET_NP
1229 if (pthread_attr_init(&attr) == 0
1230 && (pthread_attr_get_np(pthread_self(), &attr) == 0
1231 ? TRUE : (pthread_attr_destroy(&attr), FALSE)))
1232 # else /* HAVE_PTHREAD_GETATTR_NP */
1233 if (pthread_getattr_np(pthread_self(), &attr) == 0)
1236 if (pthread_attr_getstack(&attr, &stackaddr, &size) == 0
1237 && stackaddr != NULL) {
1238 (void)pthread_attr_destroy(&attr);
1239 # ifdef STACK_GROWS_DOWN
1240 stackaddr = (char *)stackaddr + size;
1242 return (ptr_t)stackaddr;
1244 (void)pthread_attr_destroy(&attr);
1246 WARN("pthread_getattr_np or pthread_attr_getstack failed"
1247 " for main thread\n", 0);
1250 result = STACKBOTTOM;
1253 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
1254 # ifdef STACK_GROWS_DOWN
1255 result = (ptr_t)(((word)GC_approx_sp() + STACKBOTTOM_ALIGNMENT_M1)
1256 & ~STACKBOTTOM_ALIGNMENT_M1);
1258 result = (ptr_t)((word)GC_approx_sp() & ~STACKBOTTOM_ALIGNMENT_M1);
1260 # elif defined(LINUX_STACKBOTTOM)
1261 result = GC_linux_main_stack_base();
1262 # elif defined(FREEBSD_STACKBOTTOM)
1263 result = GC_freebsd_main_stack_base();
1264 # elif defined(HEURISTIC2)
1266 ptr_t sp = GC_approx_sp();
1267 # ifdef STACK_GROWS_DOWN
1268 result = (ptr_t)GC_find_limit(sp, TRUE);
1269 # if defined(HEURISTIC2_LIMIT) && !defined(CPPCHECK)
1270 if ((word)result > (word)HEURISTIC2_LIMIT
1271 && (word)sp < (word)HEURISTIC2_LIMIT) {
1272 result = HEURISTIC2_LIMIT;
1276 result = (ptr_t)GC_find_limit(sp, FALSE);
1277 # if defined(HEURISTIC2_LIMIT) && !defined(CPPCHECK)
1278 if ((word)result < (word)HEURISTIC2_LIMIT
1279 && (word)sp > (word)HEURISTIC2_LIMIT) {
1280 result = HEURISTIC2_LIMIT;
1285 # elif defined(STACK_NOT_SCANNED) || defined(CPPCHECK)
1288 # error None of HEURISTIC* and *STACKBOTTOM defined!
1290 # if defined(STACK_GROWS_DOWN) && !defined(CPPCHECK)
1292 result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1295 # if !defined(CPPCHECK)
1296 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)result);
1300 # define GET_MAIN_STACKBASE_SPECIAL
1301 #endif /* !AMIGA, !HAIKU, !OPENBSD, !OS2, !Windows */
1303 #if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
1304 && defined(THREADS) && !defined(HAVE_GET_STACK_BASE)
1305 # include <pthread.h>
1306 # ifdef HAVE_PTHREAD_NP_H
1307 # include <pthread_np.h>
1310 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1312 pthread_attr_t attr;
1318 # ifdef HAVE_PTHREAD_ATTR_GET_NP
1319 if (pthread_attr_init(&attr) != 0)
1320 ABORT("pthread_attr_init failed");
1321 if (pthread_attr_get_np(pthread_self(), &attr) != 0) {
1322 WARN("pthread_attr_get_np failed\n", 0);
1323 (void)pthread_attr_destroy(&attr);
1324 return GC_UNIMPLEMENTED;
1326 # else /* HAVE_PTHREAD_GETATTR_NP */
1327 if (pthread_getattr_np(pthread_self(), &attr) != 0) {
1328 WARN("pthread_getattr_np failed\n", 0);
1329 return GC_UNIMPLEMENTED;
1332 if (pthread_attr_getstack(&attr, &(b -> mem_base), &size) != 0) {
1333 ABORT("pthread_attr_getstack failed");
1335 (void)pthread_attr_destroy(&attr);
1336 # ifdef STACK_GROWS_DOWN
1337 b -> mem_base = (char *)(b -> mem_base) + size;
1340 /* We could try backing_store_base_from_proc, but that's safe */
1341 /* only if no mappings are being asynchronously created. */
1342 /* Subtracting the size from the stack base doesn't work for at */
1343 /* least the main thread. */
1346 IF_CANCEL(int cancel_state;)
1350 DISABLE_CANCEL(cancel_state);
1351 bsp = GC_save_regs_in_stack();
1352 next_stack = GC_greatest_stack_base_below(bsp);
1353 if (0 == next_stack) {
1354 b -> reg_base = GC_find_limit(bsp, FALSE);
1356 /* Avoid walking backwards into preceding memory stack and */
1358 b -> reg_base = GC_find_limit_with_bound(bsp, FALSE, next_stack);
1360 RESTORE_CANCEL(cancel_state);
1366 # define HAVE_GET_STACK_BASE
1367 #endif /* THREADS && (HAVE_PTHREAD_ATTR_GET_NP || HAVE_PTHREAD_GETATTR_NP) */
1369 #if defined(GC_DARWIN_THREADS) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1370 # include <pthread.h>
1372 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1374 /* pthread_get_stackaddr_np() should return stack bottom (highest */
1375 /* stack address plus 1). */
1376 b->mem_base = pthread_get_stackaddr_np(pthread_self());
1377 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)b->mem_base);
1380 # define HAVE_GET_STACK_BASE
1381 #endif /* GC_DARWIN_THREADS */
1383 #ifdef GC_OPENBSD_THREADS
1384 # include <sys/signal.h>
1385 # include <pthread.h>
1386 # include <pthread_np.h>
1388 /* Find the stack using pthread_stackseg_np(). */
1389 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1392 if (pthread_stackseg_np(pthread_self(), &stack))
1393 ABORT("pthread_stackseg_np(self) failed");
1394 sb->mem_base = stack.ss_sp;
1397 # define HAVE_GET_STACK_BASE
1398 #endif /* GC_OPENBSD_THREADS */
1400 #if defined(GC_SOLARIS_THREADS) && !defined(_STRICT_STDC)
1402 # include <thread.h>
1403 # include <signal.h>
1404 # include <pthread.h>
1406 /* These variables are used to cache ss_sp value for the primordial */
1407 /* thread (it's better not to call thr_stksegment() twice for this */
1408 /* thread - see JDK bug #4352906). */
1409 static pthread_t stackbase_main_self = 0;
1410 /* 0 means stackbase_main_ss_sp value is unset. */
1411 static void *stackbase_main_ss_sp = NULL;
1413 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1416 pthread_t self = pthread_self();
1418 if (self == stackbase_main_self)
1420 /* If the client calls GC_get_stack_base() from the main thread */
1421 /* then just return the cached value. */
1422 b -> mem_base = stackbase_main_ss_sp;
1423 GC_ASSERT(b -> mem_base != NULL);
1427 if (thr_stksegment(&s)) {
1428 /* According to the manual, the only failure error code returned */
1429 /* is EAGAIN meaning "the information is not available due to the */
1430 /* thread is not yet completely initialized or it is an internal */
1431 /* thread" - this shouldn't happen here. */
1432 ABORT("thr_stksegment failed");
1434 /* s.ss_sp holds the pointer to the stack bottom. */
1435 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)s.ss_sp);
1437 if (!stackbase_main_self && thr_main() != 0)
1439 /* Cache the stack bottom pointer for the primordial thread */
1440 /* (this is done during GC_init, so there is no race). */
1441 stackbase_main_ss_sp = s.ss_sp;
1442 stackbase_main_self = self;
1445 b -> mem_base = s.ss_sp;
1448 # define HAVE_GET_STACK_BASE
1449 #endif /* GC_SOLARIS_THREADS */
1451 #ifdef GC_RTEMS_PTHREADS
1452 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1454 sb->mem_base = rtems_get_stack_bottom();
1457 # define HAVE_GET_STACK_BASE
1458 #endif /* GC_RTEMS_PTHREADS */
1460 #ifndef HAVE_GET_STACK_BASE
1461 # ifdef NEED_FIND_LIMIT
1462 /* Retrieve the stack bottom. */
1463 /* Using the GC_find_limit version is risky. */
1464 /* On IA64, for example, there is no guard page between the */
1465 /* stack of one thread and the register backing store of the */
1466 /* next. Thus this is likely to identify way too large a */
1467 /* "stack" and thus at least result in disastrous performance. */
1468 /* TODO: Implement better strategies here. */
1469 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1471 IF_CANCEL(int cancel_state;)
1475 DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
1476 # ifdef STACK_GROWS_DOWN
1477 b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
1479 b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
1482 b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
1484 RESTORE_CANCEL(cancel_state);
1489 GC_API int GC_CALL GC_get_stack_base(
1490 struct GC_stack_base *b GC_ATTR_UNUSED)
1492 # if defined(GET_MAIN_STACKBASE_SPECIAL) && !defined(THREADS) \
1494 b->mem_base = GC_get_main_stack_base();
1497 return GC_UNIMPLEMENTED;
1500 # endif /* !NEED_FIND_LIMIT */
1501 #endif /* !HAVE_GET_STACK_BASE */
1503 #ifndef GET_MAIN_STACKBASE_SPECIAL
1504 /* This is always called from the main thread. Default implementation. */
1505 ptr_t GC_get_main_stack_base(void)
1507 struct GC_stack_base sb;
1509 if (GC_get_stack_base(&sb) != GC_SUCCESS)
1510 ABORT("GC_get_stack_base failed");
1511 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)sb.mem_base);
1512 return (ptr_t)sb.mem_base;
1514 #endif /* !GET_MAIN_STACKBASE_SPECIAL */
1516 /* Register static data segment(s) as roots. If more data segments are */
1517 /* added later then they need to be registered at that point (as we do */
1518 /* with SunOS dynamic loading), or GC_mark_roots needs to check for */
1519 /* them (as we do with PCR). Called with allocator lock held. */
1522 void GC_register_data_segments(void)
1526 HMODULE module_handle;
1527 # define PBUFSIZ 512
1528 UCHAR path[PBUFSIZ];
1530 struct exe_hdr hdrdos; /* MSDOS header. */
1531 struct e32_exe hdr386; /* Real header for my executable */
1532 struct o32_obj seg; /* Current segment */
1535 # if defined(CPPCHECK)
1536 hdrdos.padding[0] = 0; /* to prevent "field unused" warnings */
1537 hdr386.exe_format_level = 0;
1539 hdr386.padding1[0] = 0;
1540 hdr386.padding2[0] = 0;
1545 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1546 ABORT("DosGetInfoBlocks failed");
1548 module_handle = ppib -> pib_hmte;
1549 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1550 ABORT("DosQueryModuleName failed");
1552 myexefile = fopen(path, "rb");
1553 if (myexefile == 0) {
1554 ABORT_ARG1("Failed to open executable", ": %s", path);
1556 if (fread((char *)(&hdrdos), 1, sizeof(hdrdos), myexefile)
1558 ABORT_ARG1("Could not read MSDOS header", " from: %s", path);
1560 if (E_MAGIC(hdrdos) != EMAGIC) {
1561 ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
1563 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1564 ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
1566 if (fread((char *)(&hdr386), 1, sizeof(hdr386), myexefile)
1568 ABORT_ARG1("Could not read OS/2 header", " from: %s", path);
1570 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1571 ABORT_ARG1("Bad OS/2 magic number", " in file: %s", path);
1573 if (E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1574 ABORT_ARG1("Bad byte order in executable", " file: %s", path);
1576 if (E32_CPU(hdr386) == E32CPU286) {
1577 ABORT_ARG1("GC cannot handle 80286 executables", ": %s", path);
1579 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1581 ABORT_ARG1("Seek to object table failed", " in file: %s", path);
1583 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1585 if (fread((char *)(&seg), 1, sizeof(seg), myexefile) < sizeof(seg)) {
1586 ABORT_ARG1("Could not read obj table entry", " from file: %s", path);
1588 flags = O32_FLAGS(seg);
1589 if (!(flags & OBJWRITE)) continue;
1590 if (!(flags & OBJREAD)) continue;
1591 if (flags & OBJINVALID) {
1592 GC_err_printf("Object with invalid pages?\n");
1595 GC_add_roots_inner((ptr_t)O32_BASE(seg),
1596 (ptr_t)(O32_BASE(seg)+O32_SIZE(seg)), FALSE);
1598 (void)fclose(myexefile);
1603 # if defined(GWW_VDB)
1604 # ifndef MEM_WRITE_WATCH
1605 # define MEM_WRITE_WATCH 0x200000
1607 # ifndef WRITE_WATCH_FLAG_RESET
1608 # define WRITE_WATCH_FLAG_RESET 1
1611 /* Since we can't easily check whether ULONG_PTR and SIZE_T are */
1612 /* defined in Win32 basetsd.h, we define own ULONG_PTR. */
1613 # define GC_ULONG_PTR word
1615 typedef UINT (WINAPI * GetWriteWatch_type)(
1616 DWORD, PVOID, GC_ULONG_PTR /* SIZE_T */,
1617 PVOID *, GC_ULONG_PTR *, PULONG);
1618 static GetWriteWatch_type GetWriteWatch_func;
1619 static DWORD GetWriteWatch_alloc_flag;
1621 # define GC_GWW_AVAILABLE() (GetWriteWatch_func != NULL)
1623 static void detect_GetWriteWatch(void)
1625 static GC_bool done;
1630 # if defined(MPROTECT_VDB)
1632 char * str = GETENV("GC_USE_GETWRITEWATCH");
1633 # if defined(GC_PREFER_MPROTECT_VDB)
1634 if (str == NULL || (*str == '0' && *(str + 1) == '\0')) {
1635 /* GC_USE_GETWRITEWATCH is unset or set to "0". */
1636 done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1637 /* This should work as if GWW_VDB is undefined. */
1641 if (str != NULL && *str == '0' && *(str + 1) == '\0') {
1642 /* GC_USE_GETWRITEWATCH is set "0". */
1643 done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1650 # ifdef MSWINRT_FLAVOR
1652 MEMORY_BASIC_INFORMATION memInfo;
1653 SIZE_T result = VirtualQuery(GetProcAddress,
1654 &memInfo, sizeof(memInfo));
1655 if (result != sizeof(memInfo))
1656 ABORT("Weird VirtualQuery result");
1657 hK32 = (HMODULE)memInfo.AllocationBase;
1660 hK32 = GetModuleHandle(TEXT("kernel32.dll"));
1662 if (hK32 != (HMODULE)0 &&
1663 (GetWriteWatch_func = (GetWriteWatch_type)GetProcAddress(hK32,
1664 "GetWriteWatch")) != NULL) {
1665 /* Also check whether VirtualAlloc accepts MEM_WRITE_WATCH, */
1666 /* as some versions of kernel32.dll have one but not the */
1667 /* other, making the feature completely broken. */
1668 void * page = VirtualAlloc(NULL, GC_page_size,
1669 MEM_WRITE_WATCH | MEM_RESERVE,
1673 GC_ULONG_PTR count = 16;
1675 /* Check that it actually works. In spite of some */
1676 /* documentation it actually seems to exist on Win2K. */
1677 /* This test may be unnecessary, but ... */
1678 if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
1683 /* GetWriteWatch always fails. */
1684 GetWriteWatch_func = NULL;
1686 GetWriteWatch_alloc_flag = MEM_WRITE_WATCH;
1688 VirtualFree(page, 0 /* dwSize */, MEM_RELEASE);
1690 /* GetWriteWatch will be useless. */
1691 GetWriteWatch_func = NULL;
1694 # ifndef SMALL_CONFIG
1695 if (GetWriteWatch_func == NULL) {
1696 GC_COND_LOG_PRINTF("Did not find a usable GetWriteWatch()\n");
1698 GC_COND_LOG_PRINTF("Using GetWriteWatch()\n");
1705 # define GetWriteWatch_alloc_flag 0
1706 # endif /* !GWW_VDB */
1708 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1711 /* Unfortunately, we have to handle win32s very differently from NT, */
1712 /* Since VirtualQuery has very different semantics. In particular, */
1713 /* under win32s a VirtualQuery call on an unmapped page returns an */
1714 /* invalid result. Under NT, GC_register_data_segments is a no-op */
1715 /* and all real work is done by GC_register_dynamic_libraries. Under */
1716 /* win32s, we cannot find the data segments associated with dll's. */
1717 /* We register the main data segment here. */
1718 GC_INNER GC_bool GC_no_win32_dlls = FALSE;
1719 /* This used to be set for gcc, to avoid dealing with */
1720 /* the structured exception handling issues. But we now have */
1721 /* assembly code to do that right. */
1723 GC_INNER GC_bool GC_wnt = FALSE;
1724 /* This is a Windows NT derivative, i.e. NT, Win2K, XP or later. */
1726 GC_INNER void GC_init_win32(void)
1728 # if defined(_WIN64) || (defined(_MSC_VER) && _MSC_VER >= 1800)
1729 /* MS Visual Studio 2013 deprecates GetVersion, but on the other */
1730 /* hand it cannot be used to target pre-Win2K. */
1733 /* Set GC_wnt. If we're running under win32s, assume that no */
1734 /* DLLs will be loaded. I doubt anyone still runs win32s, but... */
1735 DWORD v = GetVersion();
1737 GC_wnt = !(v & 0x80000000);
1738 GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
1741 if (GC_no_win32_dlls) {
1742 /* Turn off unmapping for safety (since may not work well with */
1744 GC_unmap_threshold = 0;
1749 /* Return the smallest address a such that VirtualQuery */
1750 /* returns correct results for all addresses between a and start. */
1751 /* Assumes VirtualQuery returns correct information for start. */
1752 STATIC ptr_t GC_least_described_address(ptr_t start)
1754 MEMORY_BASIC_INFORMATION buf;
1758 limit = GC_sysinfo.lpMinimumApplicationAddress;
1759 p = (ptr_t)((word)start & ~(GC_page_size - 1));
1762 LPVOID q = (LPVOID)(p - GC_page_size);
1764 if ((word)q > (word)p /* underflow */ || (word)q < (word)limit) break;
1765 result = VirtualQuery(q, &buf, sizeof(buf));
1766 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1767 p = (ptr_t)(buf.AllocationBase);
1771 # endif /* MSWIN32 */
1773 # ifndef REDIRECT_MALLOC
1774 /* We maintain a linked list of AllocationBase values that we know */
1775 /* correspond to malloc heap sections. Currently this is only called */
1776 /* during a GC. But there is some hope that for long running */
1777 /* programs we will eventually see most heap sections. */
1779 /* In the long run, it would be more reliable to occasionally walk */
1780 /* the malloc heap with HeapWalk on the default heap. But that */
1781 /* apparently works only for NT-based Windows. */
1783 STATIC size_t GC_max_root_size = 100000; /* Appr. largest root size. */
1785 # ifdef USE_WINALLOC
1786 /* In the long run, a better data structure would also be nice ... */
1787 STATIC struct GC_malloc_heap_list {
1788 void * allocation_base;
1789 struct GC_malloc_heap_list *next;
1790 } *GC_malloc_heap_l = 0;
1792 /* Is p the base of one of the malloc heap sections we already know */
1794 STATIC GC_bool GC_is_malloc_heap_base(void *p)
1796 struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1799 if (q -> allocation_base == p) return TRUE;
1805 STATIC void *GC_get_allocation_base(void *p)
1807 MEMORY_BASIC_INFORMATION buf;
1808 size_t result = VirtualQuery(p, &buf, sizeof(buf));
1809 if (result != sizeof(buf)) {
1810 ABORT("Weird VirtualQuery result");
1812 return buf.AllocationBase;
1815 GC_INNER void GC_add_current_malloc_heap(void)
1817 struct GC_malloc_heap_list *new_l = (struct GC_malloc_heap_list *)
1818 malloc(sizeof(struct GC_malloc_heap_list));
1821 if (NULL == new_l) return;
1822 candidate = GC_get_allocation_base(new_l);
1823 if (GC_is_malloc_heap_base(candidate)) {
1824 /* Try a little harder to find malloc heap. */
1825 size_t req_size = 10000;
1827 void *p = malloc(req_size);
1832 candidate = GC_get_allocation_base(p);
1835 } while (GC_is_malloc_heap_base(candidate)
1836 && req_size < GC_max_root_size/10 && req_size < 500000);
1837 if (GC_is_malloc_heap_base(candidate)) {
1842 GC_COND_LOG_PRINTF("Found new system malloc AllocationBase at %p\n",
1844 new_l -> allocation_base = candidate;
1845 new_l -> next = GC_malloc_heap_l;
1846 GC_malloc_heap_l = new_l;
1848 # endif /* USE_WINALLOC */
1850 # endif /* !REDIRECT_MALLOC */
1852 STATIC word GC_n_heap_bases = 0; /* See GC_heap_bases. */
1854 /* Is p the start of either the malloc heap, or of one of our */
1855 /* heap sections? */
1856 GC_INNER GC_bool GC_is_heap_base(void *p)
1859 # ifndef REDIRECT_MALLOC
1860 if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1861 # ifdef USE_WINALLOC
1862 if (GC_is_malloc_heap_base(p)) return TRUE;
1865 for (i = 0; i < (int)GC_n_heap_bases; i++) {
1866 if (GC_heap_bases[i] == p) return TRUE;
1872 STATIC void GC_register_root_section(ptr_t static_root)
1874 MEMORY_BASIC_INFORMATION buf;
1879 if (!GC_no_win32_dlls) return;
1880 p = base = limit = GC_least_described_address(static_root);
1881 while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
1882 size_t result = VirtualQuery(p, &buf, sizeof(buf));
1886 if (result != sizeof(buf) || buf.AllocationBase == 0
1887 || GC_is_heap_base(buf.AllocationBase)) break;
1888 new_limit = (char *)p + buf.RegionSize;
1889 protect = buf.Protect;
1890 if (buf.State == MEM_COMMIT
1891 && is_writable(protect)) {
1892 if ((char *)p == limit) {
1895 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1900 if ((word)p > (word)new_limit /* overflow */) break;
1901 p = (LPVOID)new_limit;
1903 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1905 #endif /* MSWIN32 */
1907 void GC_register_data_segments(void)
1910 GC_register_root_section((ptr_t)&GC_pages_executable);
1911 /* any other GC global variable would fit too. */
1915 # else /* !OS2 && !Windows */
1917 # if (defined(SVR4) || defined(AIX) || defined(DGUX) \
1918 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1919 ptr_t GC_SysVGetDataStart(size_t max_page_size, ptr_t etext_addr)
1921 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1922 & ~(word)(sizeof(word) - 1);
1923 /* etext rounded to word boundary */
1924 word next_page = ((text_end + (word)max_page_size - 1)
1925 & ~((word)max_page_size - 1));
1926 word page_offset = (text_end & ((word)max_page_size - 1));
1927 volatile ptr_t result = (char *)(next_page + page_offset);
1928 /* Note that this isn't equivalent to just adding */
1929 /* max_page_size to &etext if &etext is at a page boundary */
1931 GC_setup_temporary_fault_handler();
1932 if (SETJMP(GC_jmp_buf) == 0) {
1933 /* Try writing to the address. */
1934 # ifdef AO_HAVE_fetch_and_add
1935 volatile AO_t zero = 0;
1936 (void)AO_fetch_and_add((volatile AO_t *)result, zero);
1938 /* Fallback to non-atomic fetch-and-store. */
1940 # if defined(CPPCHECK)
1945 GC_reset_fault_handler();
1947 GC_reset_fault_handler();
1948 /* We got here via a longjmp. The address is not readable. */
1949 /* This is known to happen under Solaris 2.4 + gcc, which place */
1950 /* string constants in the text segment, but after etext. */
1951 /* Use plan B. Note that we now know there is a gap between */
1952 /* text and data segments, so plan A brought us something. */
1953 result = (char *)GC_find_limit(DATAEND, FALSE);
1955 return (/* no volatile */ ptr_t)result;
1959 #ifdef DATASTART_USES_BSDGETDATASTART
1960 /* Its unclear whether this should be identical to the above, or */
1961 /* whether it should apply to non-X86 architectures. */
1962 /* For now we don't assume that there is always an empty page after */
1963 /* etext. But in some cases there actually seems to be slightly more. */
1964 /* This also deals with holes between read-only data and writable data. */
1965 GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t max_page_size,
1968 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1969 & ~(word)(sizeof(word) - 1);
1970 /* etext rounded to word boundary */
1971 volatile word next_page = (text_end + (word)max_page_size - 1)
1972 & ~((word)max_page_size - 1);
1973 volatile ptr_t result = (ptr_t)text_end;
1974 GC_setup_temporary_fault_handler();
1975 if (SETJMP(GC_jmp_buf) == 0) {
1976 /* Try reading at the address. */
1977 /* This should happen before there is another thread. */
1978 for (; next_page < (word)DATAEND; next_page += (word)max_page_size)
1979 *(volatile char *)next_page;
1980 GC_reset_fault_handler();
1982 GC_reset_fault_handler();
1983 /* As above, we go to plan B */
1984 result = (ptr_t)GC_find_limit(DATAEND, FALSE);
1988 #endif /* DATASTART_USES_BSDGETDATASTART */
1992 # define GC_AMIGA_DS
1993 # include "extra/AmigaOS.c"
1996 #elif defined(OPENBSD)
1998 /* Depending on arch alignment, there can be multiple holes */
1999 /* between DATASTART and DATAEND. Scan in DATASTART .. DATAEND */
2000 /* and register each region. */
2001 void GC_register_data_segments(void)
2003 ptr_t region_start = DATASTART;
2005 if ((word)region_start - 1U >= (word)DATAEND)
2006 ABORT_ARG2("Wrong DATASTART/END pair",
2007 ": %p .. %p", (void *)region_start, (void *)DATAEND);
2009 ptr_t region_end = GC_find_limit_openbsd(region_start, DATAEND);
2011 GC_add_roots_inner(region_start, region_end, FALSE);
2012 if ((word)region_end >= (word)DATAEND)
2014 region_start = GC_skip_hole_openbsd(region_end, DATAEND);
2018 # else /* !OS2 && !Windows && !AMIGA && !OPENBSD */
2020 # if !defined(PCR) && !defined(MACOS) && defined(REDIRECT_MALLOC) \
2021 && defined(GC_SOLARIS_THREADS)
2023 extern caddr_t sbrk(int);
2027 void GC_register_data_segments(void)
2029 # if !defined(PCR) && !defined(MACOS)
2030 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
2031 /* As of Solaris 2.3, the Solaris threads implementation */
2032 /* allocates the data structure for the initial thread with */
2033 /* sbrk at process startup. It needs to be scanned, so that */
2034 /* we don't lose some malloc allocated data structures */
2035 /* hanging from it. We're on thin ice here ... */
2036 GC_ASSERT(DATASTART);
2038 ptr_t p = (ptr_t)sbrk(0);
2039 if ((word)DATASTART < (word)p)
2040 GC_add_roots_inner(DATASTART, p, FALSE);
2043 if ((word)DATASTART - 1U >= (word)DATAEND) {
2044 /* Subtract one to check also for NULL */
2045 /* without a compiler warning. */
2046 ABORT_ARG2("Wrong DATASTART/END pair",
2047 ": %p .. %p", (void *)DATASTART, (void *)DATAEND);
2049 GC_add_roots_inner(DATASTART, DATAEND, FALSE);
2050 # ifdef GC_HAVE_DATAREGION2
2051 if ((word)DATASTART2 - 1U >= (word)DATAEND2)
2052 ABORT_ARG2("Wrong DATASTART/END2 pair",
2053 ": %p .. %p", (void *)DATASTART2, (void *)DATAEND2);
2054 GC_add_roots_inner(DATASTART2, DATAEND2, FALSE);
2060 # if defined(THINK_C)
2061 extern void* GC_MacGetDataStart(void);
2062 /* globals begin above stack and end at a5. */
2063 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
2064 (ptr_t)LMGetCurrentA5(), FALSE);
2066 # if defined(__MWERKS__)
2068 extern void* GC_MacGetDataStart(void);
2069 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
2070 # if __option(far_data)
2071 extern void* GC_MacGetDataEnd(void);
2073 /* globals begin above stack and end at a5. */
2074 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
2075 (ptr_t)LMGetCurrentA5(), FALSE);
2076 /* MATTHEW: Handle Far Globals */
2077 # if __option(far_data)
2078 /* Far globals follow he QD globals: */
2079 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
2080 (ptr_t)GC_MacGetDataEnd(), FALSE);
2083 extern char __data_start__[], __data_end__[];
2084 GC_add_roots_inner((ptr_t)&__data_start__,
2085 (ptr_t)&__data_end__, FALSE);
2086 # endif /* __POWERPC__ */
2087 # endif /* __MWERKS__ */
2088 # endif /* !THINK_C */
2092 /* Dynamic libraries are added at every collection, since they may */
2096 # endif /* !AMIGA */
2097 # endif /* !MSWIN32 && !MSWINCE */
2101 * Auxiliary routines for obtaining memory from OS.
2104 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
2105 && !defined(USE_WINALLOC) && !defined(MACOS) && !defined(DOS4GW) \
2106 && !defined(NINTENDO_SWITCH) && !defined(NONSTOP) \
2107 && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PS3) \
2108 && !defined(SN_TARGET_PSP2) && !defined(RTEMS) && !defined(__CC_ARM)
2110 # define SBRK_ARG_T ptrdiff_t
2112 #if defined(MMAP_SUPPORTED)
2114 #ifdef USE_MMAP_FIXED
2115 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
2116 /* Seems to yield better performance on Solaris 2, but can */
2117 /* be unreliable if something is already mapped at the address. */
2119 # define GC_MMAP_FLAGS MAP_PRIVATE
2122 #ifdef USE_MMAP_ANON
2124 # if defined(MAP_ANONYMOUS) && !defined(CPPCHECK)
2125 # define OPT_MAP_ANON MAP_ANONYMOUS
2127 # define OPT_MAP_ANON MAP_ANON
2130 static int zero_fd = -1;
2131 # define OPT_MAP_ANON 0
2134 # ifndef MSWIN_XBOX1
2135 # if defined(SYMBIAN) && !defined(USE_MMAP_ANON)
2137 extern char *GC_get_private_path_and_zero_file(void);
2141 STATIC ptr_t GC_unix_mmap_get_mem(size_t bytes)
2144 static ptr_t last_addr = HEAP_START;
2146 # ifndef USE_MMAP_ANON
2147 static GC_bool initialized = FALSE;
2149 if (!EXPECT(initialized, TRUE)) {
2151 char *path = GC_get_private_path_and_zero_file();
2153 zero_fd = open(path, O_RDWR | O_CREAT, 0644);
2157 zero_fd = open("/dev/zero", O_RDONLY);
2160 ABORT("Could not open /dev/zero");
2161 if (fcntl(zero_fd, F_SETFD, FD_CLOEXEC) == -1)
2162 WARN("Could not set FD_CLOEXEC for /dev/zero\n", 0);
2168 if (bytes & (GC_page_size - 1)) ABORT("Bad GET_MEM arg");
2169 result = mmap(last_addr, bytes, (PROT_READ | PROT_WRITE)
2170 | (GC_pages_executable ? PROT_EXEC : 0),
2171 GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
2172 # undef IGNORE_PAGES_EXECUTABLE
2174 if (EXPECT(MAP_FAILED == result, FALSE)) {
2175 if (HEAP_START == last_addr && GC_pages_executable && EACCES == errno)
2176 ABORT("Cannot allocate executable pages");
2179 last_addr = (ptr_t)(((word)result + bytes + GC_page_size - 1)
2180 & ~(GC_page_size - 1));
2181 # if !defined(LINUX)
2182 if (last_addr == 0) {
2183 /* Oops. We got the end of the address space. This isn't */
2184 /* usable by arbitrary C code, since one-past-end pointers */
2185 /* don't work, so we discard it and try again. */
2186 munmap(result, ~GC_page_size - (size_t)result + 1);
2187 /* Leave last page mapped, so we can't repeat. */
2188 return GC_unix_mmap_get_mem(bytes);
2191 GC_ASSERT(last_addr != 0);
2193 if (((word)result % HBLKSIZE) != 0)
2195 "GC_unix_get_mem: Memory returned by mmap is not aligned to HBLKSIZE.");
2196 return((ptr_t)result);
2198 # endif /* !MSWIN_XBOX1 */
2200 #endif /* MMAP_SUPPORTED */
2202 #if defined(USE_MMAP)
2203 ptr_t GC_unix_get_mem(size_t bytes)
2205 return GC_unix_mmap_get_mem(bytes);
2207 #else /* !USE_MMAP */
2209 STATIC ptr_t GC_unix_sbrk_get_mem(size_t bytes)
2213 /* Bare sbrk isn't thread safe. Play by malloc rules. */
2214 /* The equivalent may be needed on other systems as well. */
2218 ptr_t cur_brk = (ptr_t)sbrk(0);
2219 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
2221 if ((SBRK_ARG_T)bytes < 0) {
2222 result = 0; /* too big */
2226 if((ptr_t)sbrk((SBRK_ARG_T)GC_page_size - lsbs) == (ptr_t)(-1)) {
2231 # ifdef ADD_HEAP_GUARD_PAGES
2232 /* This is useful for catching severe memory overwrite problems that */
2233 /* span heap sections. It shouldn't otherwise be turned on. */
2235 ptr_t guard = (ptr_t)sbrk((SBRK_ARG_T)GC_page_size);
2236 if (mprotect(guard, GC_page_size, PROT_NONE) != 0)
2237 ABORT("ADD_HEAP_GUARD_PAGES: mprotect failed");
2239 # endif /* ADD_HEAP_GUARD_PAGES */
2240 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
2241 if (result == (ptr_t)(-1)) result = 0;
2250 ptr_t GC_unix_get_mem(size_t bytes)
2252 # if defined(MMAP_SUPPORTED)
2253 /* By default, we try both sbrk and mmap, in that order. */
2254 static GC_bool sbrk_failed = FALSE;
2257 if (GC_pages_executable) {
2258 /* If the allocated memory should have the execute permission */
2259 /* then sbrk() cannot be used. */
2260 return GC_unix_mmap_get_mem(bytes);
2262 if (!sbrk_failed) result = GC_unix_sbrk_get_mem(bytes);
2265 result = GC_unix_mmap_get_mem(bytes);
2268 /* Try sbrk again, in case sbrk memory became available. */
2269 result = GC_unix_sbrk_get_mem(bytes);
2272 # else /* !MMAP_SUPPORTED */
2273 return GC_unix_sbrk_get_mem(bytes);
2277 #endif /* !USE_MMAP */
2283 void * os2_alloc(size_t bytes)
2287 if (DosAllocMem(&result, bytes, (PAG_READ | PAG_WRITE | PAG_COMMIT)
2288 | (GC_pages_executable ? PAG_EXECUTE : 0))
2292 /* FIXME: What's the purpose of this recursion? (Probably, if */
2293 /* DosAllocMem returns memory at 0 address then just retry once.) */
2294 if (result == 0) return(os2_alloc(bytes));
2301 ptr_t GC_durango_get_mem(size_t bytes)
2303 if (0 == bytes) return NULL;
2304 return (ptr_t)VirtualAlloc(NULL, bytes, MEM_COMMIT | MEM_TOP_DOWN,
2307 #elif defined(MSWINCE)
2308 ptr_t GC_wince_get_mem(size_t bytes)
2310 ptr_t result = 0; /* initialized to prevent warning. */
2313 bytes = ROUNDUP_PAGESIZE(bytes);
2315 /* Try to find reserved, uncommitted pages */
2316 for (i = 0; i < GC_n_heap_bases; i++) {
2317 if (((word)(-(signed_word)GC_heap_lengths[i])
2318 & (GC_sysinfo.dwAllocationGranularity-1))
2320 result = GC_heap_bases[i] + GC_heap_lengths[i];
2325 if (i == GC_n_heap_bases) {
2326 /* Reserve more pages */
2328 SIZET_SAT_ADD(bytes, (size_t)GC_sysinfo.dwAllocationGranularity-1)
2329 & ~((size_t)GC_sysinfo.dwAllocationGranularity-1);
2330 /* If we ever support MPROTECT_VDB here, we will probably need to */
2331 /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
2332 /* never spans regions. It seems to be OK for a VirtualFree */
2333 /* argument to span regions, so we should be OK for now. */
2334 result = (ptr_t) VirtualAlloc(NULL, res_bytes,
2335 MEM_RESERVE | MEM_TOP_DOWN,
2336 GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2338 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2339 /* If I read the documentation correctly, this can */
2340 /* only happen if HBLKSIZE > 64 KB or not a power of 2. */
2341 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2342 if (result == NULL) return NULL;
2343 GC_heap_bases[GC_n_heap_bases] = result;
2344 GC_heap_lengths[GC_n_heap_bases] = 0;
2349 result = (ptr_t) VirtualAlloc(result, bytes, MEM_COMMIT,
2350 GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2352 # undef IGNORE_PAGES_EXECUTABLE
2354 if (result != NULL) {
2355 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2356 GC_heap_lengths[i] += bytes;
2362 #elif (defined(USE_WINALLOC) && !defined(MSWIN_XBOX1)) || defined(CYGWIN32)
2364 # ifdef USE_GLOBAL_ALLOC
2365 # define GLOBAL_ALLOC_TEST 1
2367 # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
2370 # if (defined(GC_USE_MEM_TOP_DOWN) && defined(USE_WINALLOC)) \
2371 || defined(CPPCHECK)
2372 DWORD GC_mem_top_down = MEM_TOP_DOWN;
2373 /* Use GC_USE_MEM_TOP_DOWN for better 64-bit */
2374 /* testing. Otherwise all addresses tend to */
2375 /* end up in first 4 GB, hiding bugs. */
2377 # define GC_mem_top_down 0
2378 # endif /* !GC_USE_MEM_TOP_DOWN */
2380 ptr_t GC_win32_get_mem(size_t bytes)
2384 # ifndef USE_WINALLOC
2385 result = GC_unix_get_mem(bytes);
2387 # if defined(MSWIN32) && !defined(MSWINRT_FLAVOR)
2388 if (GLOBAL_ALLOC_TEST) {
2389 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
2390 /* There are also unconfirmed rumors of other */
2391 /* problems, so we dodge the issue. */
2392 result = (ptr_t)GlobalAlloc(0, SIZET_SAT_ADD(bytes, HBLKSIZE));
2393 /* Align it at HBLKSIZE boundary. */
2394 result = (ptr_t)(((word)result + HBLKSIZE - 1)
2395 & ~(word)(HBLKSIZE - 1));
2399 /* VirtualProtect only works on regions returned by a */
2400 /* single VirtualAlloc call. Thus we allocate one */
2401 /* extra page, which will prevent merging of blocks */
2402 /* in separate regions, and eliminate any temptation */
2403 /* to call VirtualProtect on a range spanning regions. */
2404 /* This wastes a small amount of memory, and risks */
2405 /* increased fragmentation. But better alternatives */
2406 /* would require effort. */
2407 # ifdef MPROTECT_VDB
2408 /* We can't check for GC_incremental here (because */
2409 /* GC_enable_incremental() might be called some time */
2410 /* later after the GC initialization). */
2412 # define VIRTUAL_ALLOC_PAD (GC_GWW_AVAILABLE() ? 0 : 1)
2414 # define VIRTUAL_ALLOC_PAD 1
2417 # define VIRTUAL_ALLOC_PAD 0
2419 /* Pass the MEM_WRITE_WATCH only if GetWriteWatch-based */
2420 /* VDBs are enabled and the GetWriteWatch function is */
2421 /* available. Otherwise we waste resources or possibly */
2422 /* cause VirtualAlloc to fail (observed in Windows 2000 */
2424 result = (ptr_t) VirtualAlloc(NULL,
2425 SIZET_SAT_ADD(bytes, VIRTUAL_ALLOC_PAD),
2426 GetWriteWatch_alloc_flag
2427 | (MEM_COMMIT | MEM_RESERVE)
2429 GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2431 # undef IGNORE_PAGES_EXECUTABLE
2433 # endif /* USE_WINALLOC */
2434 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2435 /* If I read the documentation correctly, this can */
2436 /* only happen if HBLKSIZE > 64 KB or not a power of 2. */
2437 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2438 if (0 != result) GC_heap_bases[GC_n_heap_bases++] = result;
2442 GC_API void GC_CALL GC_win32_free_heap(void)
2444 # ifndef MSWINRT_FLAVOR
2446 if (GLOBAL_ALLOC_TEST)
2449 while (GC_n_heap_bases-- > 0) {
2451 /* FIXME: Is it OK to use non-GC free() here? */
2453 GlobalFree(GC_heap_bases[GC_n_heap_bases]);
2455 GC_heap_bases[GC_n_heap_bases] = 0;
2461 /* Avoiding VirtualAlloc leak. */
2462 while (GC_n_heap_bases > 0) {
2463 VirtualFree(GC_heap_bases[--GC_n_heap_bases], 0, MEM_RELEASE);
2464 GC_heap_bases[GC_n_heap_bases] = 0;
2468 #endif /* USE_WINALLOC || CYGWIN32 */
2471 # define GC_AMIGA_AM
2472 # include "extra/AmigaOS.c"
2477 # include <stdlib.h>
2478 ptr_t GC_haiku_get_mem(size_t bytes)
2482 GC_ASSERT(GC_page_size != 0);
2483 if (posix_memalign(&mem, GC_page_size, bytes) == 0)
2491 /* For now, this only works on Win32/WinCE and some Unix-like */
2492 /* systems. If you have something else, don't define */
2495 #if !defined(NN_PLATFORM_CTR) && !defined(MSWIN32) && !defined(MSWINCE) \
2496 && !defined(MSWIN_XBOX1)
2497 # include <unistd.h>
2498 # ifdef SN_TARGET_PS3
2499 # include <sys/memory.h>
2501 # include <sys/mman.h>
2503 # include <sys/stat.h>
2504 # include <sys/types.h>
2507 /* Compute a page aligned starting address for the unmap */
2508 /* operation on a block of size bytes starting at start. */
2509 /* Return 0 if the block is too small to make this feasible. */
2510 STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes)
2512 ptr_t result = (ptr_t)(((word)start + GC_page_size - 1)
2513 & ~(GC_page_size - 1));
2515 if ((word)(result + GC_page_size) > (word)(start + bytes)) return 0;
2519 /* Compute end address for an unmap operation on the indicated */
2521 STATIC ptr_t GC_unmap_end(ptr_t start, size_t bytes)
2523 return (ptr_t)((word)(start + bytes) & ~(GC_page_size - 1));
2526 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
2527 /* memory using VirtualAlloc and VirtualFree. These functions */
2528 /* work on individual allocations of virtual memory, made */
2529 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
2530 /* The ranges we need to (de)commit may span several of these */
2531 /* allocations; therefore we use VirtualQuery to check */
2532 /* allocation lengths, and split up the range as necessary. */
2534 /* We assume that GC_remap is called on exactly the same range */
2535 /* as a previous call to GC_unmap. It is safe to consistently */
2536 /* round the endpoints in both places. */
2537 GC_INNER void GC_unmap(ptr_t start, size_t bytes)
2539 ptr_t start_addr = GC_unmap_start(start, bytes);
2540 ptr_t end_addr = GC_unmap_end(start, bytes);
2541 word len = end_addr - start_addr;
2543 if (0 == start_addr) return;
2544 # ifdef USE_WINALLOC
2546 MEMORY_BASIC_INFORMATION mem_info;
2549 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2550 != sizeof(mem_info))
2551 ABORT("Weird VirtualQuery result");
2552 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2553 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2554 ABORT("VirtualFree failed");
2555 GC_unmapped_bytes += free_len;
2556 start_addr += free_len;
2559 # elif defined(SN_TARGET_PS3)
2560 ps3_free_mem(start_addr, len);
2562 /* We immediately remap it to prevent an intervening mmap from */
2563 /* accidentally grabbing the same address space. */
2565 # if defined(AIX) || defined(CYGWIN32)
2566 /* On AIX, mmap(PROT_NONE) fails with ENOMEM unless the */
2567 /* environment variable XPG_SUS_ENV is set to ON. */
2568 /* On Cygwin, calling mmap() with the new protection flags on */
2569 /* an existing memory map with MAP_FIXED is broken. */
2570 /* However, calling mprotect() on the given address range */
2571 /* with PROT_NONE seems to work fine. */
2572 if (mprotect(start_addr, len, PROT_NONE))
2573 ABORT("mprotect(PROT_NONE) failed");
2575 void * result = mmap(start_addr, len, PROT_NONE,
2576 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2577 zero_fd, 0/* offset */);
2579 if (result != (void *)start_addr)
2580 ABORT("mmap(PROT_NONE) failed");
2581 # if defined(CPPCHECK) || defined(LINT2)
2582 /* Explicitly store the resource handle to a global variable. */
2583 GC_noop1((word)result);
2585 # endif /* !CYGWIN32 */
2587 GC_unmapped_bytes += len;
2591 GC_INNER void GC_remap(ptr_t start, size_t bytes)
2593 ptr_t start_addr = GC_unmap_start(start, bytes);
2594 ptr_t end_addr = GC_unmap_end(start, bytes);
2595 word len = end_addr - start_addr;
2596 if (0 == start_addr) return;
2598 /* FIXME: Handle out-of-memory correctly (at least for Win32) */
2599 # ifdef USE_WINALLOC
2601 MEMORY_BASIC_INFORMATION mem_info;
2605 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2606 != sizeof(mem_info))
2607 ABORT("Weird VirtualQuery result");
2608 alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2609 result = (ptr_t)VirtualAlloc(start_addr, alloc_len, MEM_COMMIT,
2611 ? PAGE_EXECUTE_READWRITE
2613 if (result != start_addr) {
2614 if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY ||
2615 GetLastError() == ERROR_OUTOFMEMORY) {
2616 ABORT("Not enough memory to process remapping");
2618 ABORT("VirtualAlloc remapping failed");
2622 GC_noop1((word)result);
2624 GC_unmapped_bytes -= alloc_len;
2625 start_addr += alloc_len;
2629 /* It was already remapped with PROT_NONE. */
2631 # if defined(NACL) || defined(NETBSD)
2632 /* NaCl does not expose mprotect, but mmap should work fine. */
2633 /* In case of NetBSD, mprotect fails (unlike mmap) even */
2634 /* without PROT_EXEC if PaX MPROTECT feature is enabled. */
2635 void *result = mmap(start_addr, len, (PROT_READ | PROT_WRITE)
2636 | (GC_pages_executable ? PROT_EXEC : 0),
2637 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2638 zero_fd, 0 /* offset */);
2639 if (result != (void *)start_addr)
2640 ABORT("mmap as mprotect failed");
2641 # if defined(CPPCHECK) || defined(LINT2)
2642 GC_noop1((word)result);
2645 if (mprotect(start_addr, len, (PROT_READ | PROT_WRITE)
2646 | (GC_pages_executable ? PROT_EXEC : 0)) != 0) {
2647 ABORT_ARG3("mprotect remapping failed",
2648 " at %p (length %lu), errcode= %d",
2649 (void *)start_addr, (unsigned long)len, errno);
2653 # undef IGNORE_PAGES_EXECUTABLE
2654 GC_unmapped_bytes -= len;
2658 /* Two adjacent blocks have already been unmapped and are about to */
2659 /* be merged. Unmap the whole block. This typically requires */
2660 /* that we unmap a small section in the middle that was not previously */
2661 /* unmapped due to alignment constraints. */
2662 GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
2665 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
2666 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
2667 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
2668 ptr_t start_addr = end1_addr;
2669 ptr_t end_addr = start2_addr;
2672 GC_ASSERT(start1 + bytes1 == start2);
2673 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
2674 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
2675 if (0 == start_addr) return;
2676 len = end_addr - start_addr;
2677 # ifdef USE_WINALLOC
2679 MEMORY_BASIC_INFORMATION mem_info;
2682 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2683 != sizeof(mem_info))
2684 ABORT("Weird VirtualQuery result");
2685 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2686 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2687 ABORT("VirtualFree failed");
2688 GC_unmapped_bytes += free_len;
2689 start_addr += free_len;
2694 /* Immediately remap as above. */
2695 # if defined(AIX) || defined(CYGWIN32)
2696 if (mprotect(start_addr, len, PROT_NONE))
2697 ABORT("mprotect(PROT_NONE) failed");
2699 void * result = mmap(start_addr, len, PROT_NONE,
2700 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2701 zero_fd, 0/* offset */);
2703 if (result != (void *)start_addr)
2704 ABORT("mmap(PROT_NONE) failed");
2705 # if defined(CPPCHECK) || defined(LINT2)
2706 GC_noop1((word)result);
2708 # endif /* !CYGWIN32 */
2709 GC_unmapped_bytes += len;
2714 #endif /* USE_MUNMAP */
2716 /* Routine for pushing any additional roots. In THREADS */
2717 /* environment, this is also responsible for marking from */
2718 /* thread stacks. */
2720 GC_push_other_roots_proc GC_push_other_roots = 0;
2724 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
2726 struct PCR_ThCtl_TInfoRep info;
2729 info.ti_stkLow = info.ti_stkHi = 0;
2730 result = PCR_ThCtl_GetInfo(t, &info);
2731 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
2735 /* Push the contents of an old object. We treat this as stack */
2736 /* data only because that makes it robust against mark stack */
2738 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
2740 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
2741 return(PCR_ERes_okay);
2744 extern struct PCR_MM_ProcsRep * GC_old_allocator;
2745 /* defined in pcr_interface.c. */
2747 STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2749 /* Traverse data allocated by previous memory managers. */
2750 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
2753 ABORT("Old object enumeration failed");
2755 /* Traverse all thread stacks. */
2757 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
2758 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
2759 ABORT("Thread stack marking failed");
2765 # if defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \
2766 || defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
2767 STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2769 GC_push_all_stacks();
2773 # ifdef SN_TARGET_PS3
2774 STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2776 ABORT("GC_default_push_other_roots is not implemented");
2779 void GC_push_thread_structures(void)
2781 ABORT("GC_push_thread_structures is not implemented");
2783 # endif /* SN_TARGET_PS3 */
2785 GC_push_other_roots_proc GC_push_other_roots = GC_default_push_other_roots;
2786 #endif /* THREADS */
2788 GC_API void GC_CALL GC_set_push_other_roots(GC_push_other_roots_proc fn)
2790 GC_push_other_roots = fn;
2793 GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void)
2795 return GC_push_other_roots;
2799 * Routines for accessing dirty bits on virtual pages.
2800 * There are six ways to maintain this information:
2801 * DEFAULT_VDB: A simple dummy implementation that treats every page
2802 * as possibly dirty. This makes incremental collection
2803 * useless, but the implementation is still correct.
2804 * Manual VDB: Stacks and static data are always considered dirty.
2805 * Heap pages are considered dirty if GC_dirty(p) has been
2806 * called on some pointer p pointing to somewhere inside
2807 * an object on that page. A GC_dirty() call on a large
2808 * object directly dirties only a single page, but for the
2809 * manual VDB we are careful to treat an object with a dirty
2810 * page as completely dirty.
2811 * In order to avoid races, an object must be marked dirty
2812 * after it is written, and a reference to the object
2813 * must be kept on a stack or in a register in the interim.
2814 * With threads enabled, an object directly reachable from the
2815 * stack at the time of a collection is treated as dirty.
2816 * In single-threaded mode, it suffices to ensure that no
2817 * collection can take place between the pointer assignment
2818 * and the GC_dirty() call.
2819 * PCR_VDB: Use PPCRs virtual dirty bit facility.
2820 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2821 * works under some SVR4 variants. Even then, it may be
2822 * too slow to be entirely satisfactory. Requires reading
2823 * dirty bits for entire address space. Implementations tend
2824 * to assume that the client is a (slow) debugger.
2825 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2826 * dirtied pages. The implementation (and implementability)
2827 * is highly system dependent. This usually fails when system
2828 * calls write to a protected page. We prevent the read system
2829 * call from doing so. It is the clients responsibility to
2830 * make sure that other system calls are similarly protected
2831 * or write only to the stack.
2832 * GWW_VDB: Use the Win32 GetWriteWatch functions, if available, to
2833 * read dirty bits. In case it is not available (because we
2834 * are running on Windows 95, Windows 2000 or earlier),
2835 * MPROTECT_VDB may be defined as a fallback strategy.
2838 #if (defined(CHECKSUMS) && defined(GWW_VDB)) || defined(PROC_VDB)
2839 /* Add all pages in pht2 to pht1. */
2840 STATIC void GC_or_pages(page_hash_table pht1, page_hash_table pht2)
2843 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2845 #endif /* CHECKSUMS && GWW_VDB || PROC_VDB */
2849 # define GC_GWW_BUF_LEN (MAXHINCR * HBLKSIZE / 4096 /* X86 page size */)
2850 /* Still susceptible to overflow, if there are very large allocations, */
2851 /* and everything is dirty. */
2852 static PVOID gww_buf[GC_GWW_BUF_LEN];
2854 # ifndef MPROTECT_VDB
2855 # define GC_gww_dirty_init GC_dirty_init
2858 GC_INNER GC_bool GC_gww_dirty_init(void)
2860 detect_GetWriteWatch();
2861 return GC_GWW_AVAILABLE();
2864 GC_INLINE void GC_gww_read_dirty(GC_bool output_unneeded)
2868 if (!output_unneeded)
2869 BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
2871 for (i = 0; i != GC_n_heap_sects; ++i) {
2875 PVOID * pages = gww_buf;
2878 count = GC_GWW_BUF_LEN;
2879 /* GetWriteWatch is documented as returning non-zero when it */
2880 /* fails, but the documentation doesn't explicitly say why it */
2881 /* would fail or what its behavior will be if it fails. It */
2882 /* does appear to fail, at least on recent Win2K instances, if */
2883 /* the underlying memory was not allocated with the appropriate */
2884 /* flag. This is common if GC_enable_incremental is called */
2885 /* shortly after GC initialization. To avoid modifying the */
2886 /* interface, we silently work around such a failure, it only */
2887 /* affects the initial (small) heap allocation. If there are */
2888 /* more dirty pages than will fit in the buffer, this is not */
2889 /* treated as a failure; we must check the page count in the */
2890 /* loop condition. Since each partial call will reset the */
2891 /* status of some pages, this should eventually terminate even */
2892 /* in the overflow case. */
2893 if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
2894 GC_heap_sects[i].hs_start,
2895 GC_heap_sects[i].hs_bytes,
2899 static int warn_count = 0;
2900 struct hblk * start = (struct hblk *)GC_heap_sects[i].hs_start;
2901 static struct hblk *last_warned = 0;
2902 size_t nblocks = divHBLKSZ(GC_heap_sects[i].hs_bytes);
2904 if (i != 0 && last_warned != start && warn_count++ < 5) {
2905 last_warned = start;
2906 WARN("GC_gww_read_dirty unexpectedly failed at %p: "
2907 "Falling back to marking all pages dirty\n", start);
2909 if (!output_unneeded) {
2912 for (j = 0; j < nblocks; ++j) {
2913 word hash = PHT_HASH(start + j);
2914 set_pht_entry_from_index(GC_grungy_pages, hash);
2917 count = 1; /* Done with this section. */
2918 } else /* succeeded */ if (!output_unneeded) {
2919 PVOID * pages_end = pages + count;
2921 while (pages != pages_end) {
2922 struct hblk * h = (struct hblk *) *pages++;
2923 struct hblk * h_end = (struct hblk *) ((char *) h + page_size);
2925 set_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h));
2926 } while ((word)(++h) < (word)h_end);
2929 } while (count == GC_GWW_BUF_LEN);
2930 /* FIXME: It's unclear from Microsoft's documentation if this loop */
2931 /* is useful. We suspect the call just fails if the buffer fills */
2932 /* up. But that should still be handled correctly. */
2936 GC_ASSERT(!output_unneeded);
2937 GC_or_pages(GC_written_pages, GC_grungy_pages);
2941 # define GC_GWW_AVAILABLE() FALSE
2942 #endif /* !GWW_VDB */
2945 /* All of the following assume the allocation lock is held. */
2947 /* The client asserts that unallocated pages in the heap are never */
2950 /* Initialize virtual dirty bit implementation. */
2951 GC_INNER GC_bool GC_dirty_init(void)
2953 GC_VERBOSE_LOG_PRINTF("Initializing DEFAULT_VDB...\n");
2954 /* GC_dirty_pages and GC_grungy_pages are already cleared. */
2957 #endif /* DEFAULT_VDB */
2959 #ifndef GC_DISABLE_INCREMENTAL
2960 # if !defined(THREADS) || defined(HAVE_LOCKFREE_AO_OR)
2961 # define async_set_pht_entry_from_index(db, index) \
2962 set_pht_entry_from_index_concurrent(db, index)
2963 # elif defined(AO_HAVE_test_and_set_acquire)
2964 /* We need to lock around the bitmap update (in the write fault */
2965 /* handler or GC_dirty) in order to avoid the risk of losing a bit. */
2966 /* We do this with a test-and-set spin lock if possible. */
2967 GC_INNER volatile AO_TS_t GC_fault_handler_lock = AO_TS_INITIALIZER;
2969 static void async_set_pht_entry_from_index(volatile page_hash_table db,
2972 GC_acquire_dirty_lock();
2973 set_pht_entry_from_index(db, index);
2974 GC_release_dirty_lock();
2977 # error No test_and_set operation: Introduces a race.
2978 # endif /* THREADS && !AO_HAVE_test_and_set_acquire */
2979 #endif /* !GC_DISABLE_INCREMENTAL */
2983 * This implementation maintains dirty bits itself by catching write
2984 * faults and keeping track of them. We assume nobody else catches
2985 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2986 * This means that clients must ensure that system calls don't write
2987 * to the write-protected heap. Probably the best way to do this is to
2988 * ensure that system calls write at most to pointer-free objects in the
2989 * heap, and do even that only if we are on a platform on which those
2990 * are not protected. Another alternative is to wrap system calls
2991 * (see example for read below), but the current implementation holds
2993 * We assume the page size is a multiple of HBLKSIZE.
2994 * We prefer them to be the same. We avoid protecting pointer-free
2995 * objects only if they are the same.
2998 /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2999 decrease the likelihood of some of the problems described below. */
3000 # include <mach/vm_map.h>
3001 STATIC mach_port_t GC_task_self = 0;
3002 # define PROTECT(addr,len) \
3003 if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
3004 FALSE, VM_PROT_READ \
3005 | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
3006 == KERN_SUCCESS) {} else ABORT("vm_protect(PROTECT) failed")
3007 # define UNPROTECT(addr,len) \
3008 if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
3009 FALSE, (VM_PROT_READ | VM_PROT_WRITE) \
3010 | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
3011 == KERN_SUCCESS) {} else ABORT("vm_protect(UNPROTECT) failed")
3013 # elif !defined(USE_WINALLOC)
3014 # include <sys/mman.h>
3015 # include <signal.h>
3016 # if !defined(CYGWIN32) && !defined(HAIKU)
3017 # include <sys/syscall.h>
3020 # define PROTECT(addr, len) \
3021 if (mprotect((caddr_t)(addr), (size_t)(len), \
3023 | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
3024 } else ABORT("mprotect failed")
3025 # define UNPROTECT(addr, len) \
3026 if (mprotect((caddr_t)(addr), (size_t)(len), \
3027 (PROT_READ | PROT_WRITE) \
3028 | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
3029 } else ABORT(GC_pages_executable ? \
3030 "un-mprotect executable page failed" \
3031 " (probably disabled by OS)" : \
3032 "un-mprotect failed")
3033 # undef IGNORE_PAGES_EXECUTABLE
3035 # else /* USE_WINALLOC */
3037 # include <signal.h>
3040 static DWORD protect_junk;
3041 # define PROTECT(addr, len) \
3042 if (VirtualProtect((addr), (len), \
3043 GC_pages_executable ? PAGE_EXECUTE_READ : \
3046 } else ABORT_ARG1("VirtualProtect failed", \
3047 ": errcode= 0x%X", (unsigned)GetLastError())
3048 # define UNPROTECT(addr, len) \
3049 if (VirtualProtect((addr), (len), \
3050 GC_pages_executable ? PAGE_EXECUTE_READWRITE : \
3053 } else ABORT("un-VirtualProtect failed")
3054 # endif /* USE_WINALLOC */
3056 # if defined(MSWIN32)
3057 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR;
3059 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER)((signed_word)-1)
3060 # elif defined(MSWINCE)
3061 typedef LONG (WINAPI *SIG_HNDLR_PTR)(struct _EXCEPTION_POINTERS *);
3063 # define SIG_DFL (SIG_HNDLR_PTR) (-1)
3064 # elif defined(DARWIN)
3065 typedef void (* SIG_HNDLR_PTR)();
3067 typedef void (* SIG_HNDLR_PTR)(int, siginfo_t *, void *);
3068 typedef void (* PLAIN_HNDLR_PTR)(int);
3071 # if defined(__GLIBC__)
3072 # if __GLIBC__ < 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ < 2
3073 # error glibc too old?
3078 STATIC SIG_HNDLR_PTR GC_old_segv_handler = 0;
3079 /* Also old MSWIN32 ACCESS_VIOLATION filter */
3080 # if defined(FREEBSD) || defined(HPUX) || defined(HURD) || defined(LINUX)
3081 STATIC SIG_HNDLR_PTR GC_old_bus_handler = 0;
3083 STATIC GC_bool GC_old_bus_handler_used_si = FALSE;
3086 # if !defined(MSWIN32) && !defined(MSWINCE)
3087 STATIC GC_bool GC_old_segv_handler_used_si = FALSE;
3088 # endif /* !MSWIN32 */
3089 #endif /* !DARWIN */
3092 /* This function is used only by the fault handler. Potential data */
3093 /* race between this function and GC_install_header, GC_remove_header */
3094 /* should not be harmful because the added or removed header should */
3095 /* be already unprotected. */
3096 GC_ATTR_NO_SANITIZE_THREAD
3097 static GC_bool is_header_found_async(void *addr)
3101 GET_HDR((ptr_t)addr, result);
3102 return result != NULL;
3104 return HDR_INNER(addr) != NULL;
3108 # define is_header_found_async(addr) (HDR(addr) != NULL)
3109 #endif /* !THREADS */
3113 # if !defined(MSWIN32) && !defined(MSWINCE)
3115 # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3116 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
3118 # define SIG_OK (sig == SIGSEGV)
3119 /* Catch SIGSEGV but ignore SIGBUS. */
3121 # if defined(FREEBSD)
3122 # ifndef SEGV_ACCERR
3123 # define SEGV_ACCERR 2
3125 # if defined(AARCH64) || defined(ARM32) || defined(MIPS)
3126 # define CODE_OK (si -> si_code == SEGV_ACCERR)
3127 # elif defined(POWERPC)
3128 # define AIM /* Pretend that we're AIM. */
3129 # include <machine/trap.h>
3130 # define CODE_OK (si -> si_code == EXC_DSI \
3131 || si -> si_code == SEGV_ACCERR)
3133 # define CODE_OK (si -> si_code == BUS_PAGE_FAULT \
3134 || si -> si_code == SEGV_ACCERR)
3136 # elif defined(OSF1)
3137 # define CODE_OK (si -> si_code == 2 /* experimentally determined */)
3138 # elif defined(IRIX5)
3139 # define CODE_OK (si -> si_code == EACCES)
3140 # elif defined(CYGWIN32) || defined(HAIKU) || defined(HURD)
3141 # define CODE_OK TRUE
3142 # elif defined(LINUX)
3143 # define CODE_OK TRUE
3144 /* Empirically c.trapno == 14, on IA32, but is that useful? */
3145 /* Should probably consider alignment issues on other */
3146 /* architectures. */
3147 # elif defined(HPUX)
3148 # define CODE_OK (si -> si_code == SEGV_ACCERR \
3149 || si -> si_code == BUS_ADRERR \
3150 || si -> si_code == BUS_UNKNOWN \
3151 || si -> si_code == SEGV_UNKNOWN \
3152 || si -> si_code == BUS_OBJERR)
3153 # elif defined(SUNOS5SIGS)
3154 # define CODE_OK (si -> si_code == SEGV_ACCERR)
3156 # ifndef NO_GETCONTEXT
3157 # include <ucontext.h>
3159 STATIC void GC_write_fault_handler(int sig, siginfo_t *si, void *raw_sc)
3161 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode \
3162 == STATUS_ACCESS_VIOLATION)
3163 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] \
3164 == 1) /* Write fault */
3165 STATIC LONG WINAPI GC_write_fault_handler(
3166 struct _EXCEPTION_POINTERS *exc_info)
3167 # endif /* MSWIN32 || MSWINCE */
3169 # if !defined(MSWIN32) && !defined(MSWINCE)
3170 char *addr = (char *)si->si_addr;
3172 char * addr = (char *) (exc_info -> ExceptionRecord
3173 -> ExceptionInformation[1]);
3176 if (SIG_OK && CODE_OK) {
3177 struct hblk * h = (struct hblk *)((word)addr & ~(GC_page_size-1));
3178 GC_bool in_allocd_block;
3185 /* Address is only within the correct physical page. */
3186 in_allocd_block = FALSE;
3187 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3188 if (is_header_found_async(&h[i])) {
3189 in_allocd_block = TRUE;
3194 in_allocd_block = is_header_found_async(addr);
3196 if (!in_allocd_block) {
3197 /* FIXME - We should make sure that we invoke the */
3198 /* old handler with the appropriate calling */
3199 /* sequence, which often depends on SA_SIGINFO. */
3201 /* Heap blocks now begin and end on page boundaries */
3202 SIG_HNDLR_PTR old_handler;
3204 # if defined(MSWIN32) || defined(MSWINCE)
3205 old_handler = GC_old_segv_handler;
3209 # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3210 if (sig == SIGBUS) {
3211 old_handler = GC_old_bus_handler;
3212 used_si = GC_old_bus_handler_used_si;
3216 old_handler = GC_old_segv_handler;
3217 used_si = GC_old_segv_handler_used_si;
3221 if (old_handler == (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
3222 # if !defined(MSWIN32) && !defined(MSWINCE)
3223 ABORT_ARG1("Unexpected bus error or segmentation fault",
3224 " at %p", (void *)addr);
3226 return(EXCEPTION_CONTINUE_SEARCH);
3230 * FIXME: This code should probably check if the
3231 * old signal handler used the traditional style and
3232 * if so call it using that style.
3234 # if defined(MSWIN32) || defined(MSWINCE)
3235 return((*old_handler)(exc_info));
3238 ((SIG_HNDLR_PTR)old_handler) (sig, si, raw_sc);
3240 /* FIXME: should pass nonstandard args as well. */
3241 ((PLAIN_HNDLR_PTR)(signed_word)old_handler)(sig);
3246 UNPROTECT(h, GC_page_size);
3247 /* We need to make sure that no collection occurs between */
3248 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
3249 /* a write by a third thread might go unnoticed. Reversing */
3250 /* the order is just as bad, since we would end up unprotecting */
3251 /* a page in a GC cycle during which it's not marked. */
3252 /* Currently we do this by disabling the thread stopping */
3253 /* signals while this handler is running. An alternative might */
3254 /* be to record the fact that we're about to unprotect, or */
3255 /* have just unprotected a page in the GC's thread structure, */
3256 /* and then to have the thread stopping code set the dirty */
3257 /* flag, if necessary. */
3258 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3259 word index = PHT_HASH(h+i);
3261 async_set_pht_entry_from_index(GC_dirty_pages, index);
3263 /* The write may not take place before dirty bits are read. */
3264 /* But then we'll fault again ... */
3265 # if defined(MSWIN32) || defined(MSWINCE)
3266 return(EXCEPTION_CONTINUE_EXECUTION);
3271 # if defined(MSWIN32) || defined(MSWINCE)
3272 return EXCEPTION_CONTINUE_SEARCH;
3274 ABORT_ARG1("Unexpected bus error or segmentation fault",
3275 " at %p", (void *)addr);
3279 # if defined(GC_WIN32_THREADS) && !defined(CYGWIN32)
3280 GC_INNER void GC_set_write_fault_handler(void)
3282 SetUnhandledExceptionFilter(GC_write_fault_handler);
3285 #endif /* !DARWIN */
3287 #if !defined(DARWIN)
3288 GC_INNER GC_bool GC_dirty_init(void)
3290 # if !defined(MSWIN32) && !defined(MSWINCE)
3291 struct sigaction act, oldact;
3292 act.sa_flags = SA_RESTART | SA_SIGINFO;
3293 act.sa_sigaction = GC_write_fault_handler;
3294 (void)sigemptyset(&act.sa_mask);
3295 # if defined(THREADS) && !defined(GC_OPENBSD_UTHREADS) \
3296 && !defined(GC_WIN32_THREADS) && !defined(NACL)
3297 /* Arrange to postpone the signal while we are in a write fault */
3298 /* handler. This effectively makes the handler atomic w.r.t. */
3299 /* stopping the world for GC. */
3300 (void)sigaddset(&act.sa_mask, GC_get_suspend_signal());
3302 # endif /* !MSWIN32 */
3303 GC_VERBOSE_LOG_PRINTF(
3304 "Initializing mprotect virtual dirty bit implementation\n");
3305 if (GC_page_size % HBLKSIZE != 0) {
3306 ABORT("Page size not multiple of HBLKSIZE");
3308 # if !defined(MSWIN32) && !defined(MSWINCE)
3309 /* act.sa_restorer is deprecated and should not be initialized. */
3310 # if defined(GC_IRIX_THREADS)
3311 sigaction(SIGSEGV, 0, &oldact);
3312 sigaction(SIGSEGV, &act, 0);
3315 int res = sigaction(SIGSEGV, &act, &oldact);
3316 if (res != 0) ABORT("Sigaction failed");
3319 if (oldact.sa_flags & SA_SIGINFO) {
3320 GC_old_segv_handler = oldact.sa_sigaction;
3321 GC_old_segv_handler_used_si = TRUE;
3323 GC_old_segv_handler = (SIG_HNDLR_PTR)(signed_word)oldact.sa_handler;
3324 GC_old_segv_handler_used_si = FALSE;
3326 if (GC_old_segv_handler == (SIG_HNDLR_PTR)(signed_word)SIG_IGN) {
3327 WARN("Previously ignored segmentation violation!?\n", 0);
3328 GC_old_segv_handler = (SIG_HNDLR_PTR)(signed_word)SIG_DFL;
3330 if (GC_old_segv_handler != (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
3331 GC_VERBOSE_LOG_PRINTF("Replaced other SIGSEGV handler\n");
3333 # if defined(HPUX) || defined(LINUX) || defined(HURD) \
3334 || (defined(FREEBSD) && (defined(__GLIBC__) || defined(SUNOS5SIGS)))
3335 sigaction(SIGBUS, &act, &oldact);
3336 if ((oldact.sa_flags & SA_SIGINFO) != 0) {
3337 GC_old_bus_handler = oldact.sa_sigaction;
3338 # if !defined(LINUX)
3339 GC_old_bus_handler_used_si = TRUE;
3342 GC_old_bus_handler = (SIG_HNDLR_PTR)(signed_word)oldact.sa_handler;
3344 if (GC_old_bus_handler == (SIG_HNDLR_PTR)(signed_word)SIG_IGN) {
3345 WARN("Previously ignored bus error!?\n", 0);
3346 # if !defined(LINUX)
3347 GC_old_bus_handler = (SIG_HNDLR_PTR)(signed_word)SIG_DFL;
3349 /* GC_old_bus_handler is not used by GC_write_fault_handler. */
3351 } else if (GC_old_bus_handler != (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
3352 GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
3354 # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
3355 # endif /* ! MS windows */
3356 # if defined(GWW_VDB)
3357 if (GC_gww_dirty_init())
3360 # if defined(MSWIN32)
3361 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
3362 if (GC_old_segv_handler != NULL) {
3363 GC_COND_LOG_PRINTF("Replaced other UnhandledExceptionFilter\n");
3365 GC_old_segv_handler = SIG_DFL;
3367 # elif defined(MSWINCE)
3368 /* MPROTECT_VDB is unsupported for WinCE at present. */
3369 /* FIXME: implement it (if possible). */
3371 # if defined(CPPCHECK) && defined(ADDRESS_SANITIZER)
3372 GC_noop1((word)&__asan_default_options);
3376 #endif /* !DARWIN */
3378 GC_API int GC_CALL GC_incremental_protection_needs(void)
3380 GC_ASSERT(GC_is_initialized);
3382 if (GC_page_size == HBLKSIZE) {
3383 return GC_PROTECTS_POINTER_HEAP;
3385 return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
3388 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
3390 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
3391 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
3393 STATIC void GC_protect_heap(void)
3396 GC_bool protect_all =
3397 (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
3399 for (i = 0; i < GC_n_heap_sects; i++) {
3400 ptr_t start = GC_heap_sects[i].hs_start;
3401 size_t len = GC_heap_sects[i].hs_bytes;
3404 PROTECT(start, len);
3406 struct hblk * current;
3407 struct hblk * current_start; /* Start of block to be protected. */
3408 struct hblk * limit;
3410 GC_ASSERT(PAGE_ALIGNED(len));
3411 GC_ASSERT(PAGE_ALIGNED(start));
3412 current_start = current = (struct hblk *)start;
3413 limit = (struct hblk *)(start + len);
3414 while ((word)current < (word)limit) {
3419 GC_ASSERT(PAGE_ALIGNED(current));
3420 GET_HDR(current, hhdr);
3421 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
3422 /* This can happen only if we're at the beginning of a */
3423 /* heap segment, and a block spans heap segments. */
3424 /* We will handle that block as part of the preceding */
3426 GC_ASSERT(current_start == current);
3427 current_start = ++current;
3430 if (HBLK_IS_FREE(hhdr)) {
3431 GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
3432 nhblks = divHBLKSZ(hhdr -> hb_sz);
3433 is_ptrfree = TRUE; /* dirty on alloc */
3435 nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
3436 is_ptrfree = IS_PTRFREE(hhdr);
3439 if ((word)current_start < (word)current) {
3440 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3442 current_start = (current += nhblks);
3447 if ((word)current_start < (word)current) {
3448 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3455 * Acquiring the allocation lock here is dangerous, since this
3456 * can be called from within GC_call_with_alloc_lock, and the cord
3457 * package does so. On systems that allow nested lock acquisition, this
3461 /* We no longer wrap read by default, since that was causing too many */
3462 /* problems. It is preferred that the client instead avoids writing */
3463 /* to the write-protected heap with a system call. */
3464 #endif /* MPROTECT_VDB */
3467 /* This implementation assumes a Solaris 2.X like /proc */
3468 /* pseudo-file-system from which we can read page modified bits. This */
3469 /* facility is far from optimal (e.g. we would like to get the info for */
3470 /* only some of the address space), but it avoids intercepting system */
3474 # include <sys/types.h>
3475 # include <sys/signal.h>
3476 # include <sys/syscall.h>
3477 # include <sys/stat.h>
3479 # ifdef GC_NO_SYS_FAULT_H
3480 /* This exists only to check PROC_VDB code compilation (on Linux). */
3481 # define PG_MODIFIED 1
3482 struct prpageheader {
3483 int dummy[2]; /* pr_tstamp */
3484 unsigned long pr_nmap;
3485 unsigned long pr_npage;
3490 char dummy1[64+8]; /* pr_mapname, pr_offset */
3492 unsigned pr_pagesize;
3496 # include <sys/fault.h>
3497 # include <sys/procfs.h>
3500 # define INITIAL_BUF_SZ 16384
3501 STATIC size_t GC_proc_buf_size = INITIAL_BUF_SZ;
3502 STATIC char *GC_proc_buf = NULL;
3503 STATIC int GC_proc_fd = 0;
3505 GC_INNER GC_bool GC_dirty_init(void)
3509 if (GC_bytes_allocd != 0 || GC_bytes_allocd_before_gc != 0) {
3510 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3511 GC_VERBOSE_LOG_PRINTF(
3512 "Allocated %lu bytes: all pages may have been written\n",
3513 (unsigned long)(GC_bytes_allocd + GC_bytes_allocd_before_gc));
3516 (void)snprintf(buf, sizeof(buf), "/proc/%ld/pagedata", (long)getpid());
3517 buf[sizeof(buf) - 1] = '\0';
3518 GC_proc_fd = open(buf, O_RDONLY);
3519 if (GC_proc_fd < 0) {
3520 WARN("/proc open failed; cannot enable GC incremental mode\n", 0);
3523 if (syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC) == -1)
3524 WARN("Could not set FD_CLOEXEC for /proc\n", 0);
3526 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
3527 if (GC_proc_buf == NULL)
3528 ABORT("Insufficient space for /proc read");
3532 GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded)
3536 char * bufp = GC_proc_buf;
3539 BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
3540 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3541 /* Retry with larger buffer. */
3542 size_t new_size = 2 * GC_proc_buf_size;
3545 WARN("/proc read failed: GC_proc_buf_size = %" WARN_PRIdPTR "\n",
3546 (signed_word)GC_proc_buf_size);
3547 new_buf = GC_scratch_alloc(new_size);
3549 GC_scratch_recycle_no_gww(bufp, GC_proc_buf_size);
3550 GC_proc_buf = bufp = new_buf;
3551 GC_proc_buf_size = new_size;
3553 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3554 WARN("Insufficient space for /proc read\n", 0);
3556 if (!output_unneeded)
3557 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3558 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3563 /* Copy dirty bits into GC_grungy_pages */
3564 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3565 # ifdef DEBUG_DIRTY_BITS
3566 GC_log_printf("Proc VDB read: pr_nmap= %u, pr_npage= %lu\n",
3567 nmaps, ((struct prpageheader *)bufp)->pr_npage);
3569 # if defined(GC_NO_SYS_FAULT_H) && defined(CPPCHECK)
3570 GC_noop1(((struct prpageheader *)bufp)->dummy[0]);
3572 bufp += sizeof(struct prpageheader);
3573 for (i = 0; i < nmaps; i++) {
3574 struct prasmap * map = (struct prasmap *)bufp;
3575 ptr_t vaddr = (ptr_t)(map -> pr_vaddr);
3576 unsigned long npages = map -> pr_npage;
3577 unsigned pagesize = map -> pr_pagesize;
3580 # if defined(GC_NO_SYS_FAULT_H) && defined(CPPCHECK)
3581 GC_noop1(map->dummy1[0] + map->dummy2[0]);
3583 # ifdef DEBUG_DIRTY_BITS
3585 "pr_vaddr= %p, npage= %lu, mflags= 0x%x, pagesize= 0x%x\n",
3586 (void *)vaddr, npages, map->pr_mflags, pagesize);
3589 bufp += sizeof(struct prasmap);
3590 limit = vaddr + pagesize * npages;
3591 for (; (word)vaddr < (word)limit; vaddr += pagesize) {
3592 if ((*bufp++) & PG_MODIFIED) {
3594 ptr_t next_vaddr = vaddr + pagesize;
3595 # ifdef DEBUG_DIRTY_BITS
3596 GC_log_printf("dirty page at: %p\n", (void *)vaddr);
3598 for (h = (struct hblk *)vaddr;
3599 (word)h < (word)next_vaddr; h++) {
3600 word index = PHT_HASH(h);
3602 set_pht_entry_from_index(GC_grungy_pages, index);
3606 bufp = (char *)(((word)bufp + (sizeof(long)-1))
3607 & ~(word)(sizeof(long)-1));
3609 # ifdef DEBUG_DIRTY_BITS
3610 GC_log_printf("Proc VDB read done\n");
3613 /* Update GC_written_pages (even if output_unneeded). */
3614 GC_or_pages(GC_written_pages, GC_grungy_pages);
3618 #endif /* PROC_VDB */
3622 # include "vd/PCR_VD.h"
3624 # define NPAGES (32*1024) /* 128 MB */
3626 PCR_VD_DB GC_grungy_bits[NPAGES];
3628 STATIC ptr_t GC_vd_base = NULL;
3629 /* Address corresponding to GC_grungy_bits[0] */
3630 /* HBLKSIZE aligned. */
3632 GC_INNER GC_bool GC_dirty_init(void)
3634 /* For the time being, we assume the heap generally grows up */
3635 GC_vd_base = GC_heap_sects[0].hs_start;
3636 if (GC_vd_base == 0) {
3637 ABORT("Bad initial heap segment");
3639 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3641 ABORT("Dirty bit initialization failed");
3645 #endif /* PCR_VDB */
3647 #ifndef GC_DISABLE_INCREMENTAL
3648 GC_INNER GC_bool GC_manual_vdb = FALSE;
3650 /* Manually mark the page containing p as dirty. Logically, this */
3651 /* dirties the entire object. */
3652 GC_INNER void GC_dirty_inner(const void *p)
3654 word index = PHT_HASH(p);
3656 # if defined(MPROTECT_VDB)
3657 /* Do not update GC_dirty_pages if it should be followed by the */
3658 /* page unprotection. */
3659 GC_ASSERT(GC_manual_vdb);
3661 async_set_pht_entry_from_index(GC_dirty_pages, index);
3664 /* Retrieve system dirty bits for the heap to a local buffer (unless */
3665 /* output_unneeded). Restore the systems notion of which pages are */
3666 /* dirty. We assume that either the world is stopped or it is OK to */
3667 /* lose dirty bits while it's happening (as in GC_enable_incremental).*/
3668 GC_INNER void GC_read_dirty(GC_bool output_unneeded)
3671 # if defined(MPROTECT_VDB)
3672 || !GC_GWW_AVAILABLE()
3675 if (!output_unneeded)
3676 BCOPY((/* no volatile */ void *)GC_dirty_pages, GC_grungy_pages,
3677 sizeof(GC_dirty_pages));
3678 BZERO((/* no volatile */ void *)GC_dirty_pages,
3679 sizeof(GC_dirty_pages));
3680 # ifdef MPROTECT_VDB
3688 GC_gww_read_dirty(output_unneeded);
3689 # elif defined(PROC_VDB)
3690 GC_proc_read_dirty(output_unneeded);
3691 # elif defined(PCR_VDB)
3692 /* lazily enable dirty bits on newly added heap sects */
3694 static int onhs = 0;
3695 int nhs = GC_n_heap_sects;
3696 for (; onhs < nhs; onhs++) {
3697 PCR_VD_WriteProtectEnable(
3698 GC_heap_sects[onhs].hs_start,
3699 GC_heap_sects[onhs].hs_bytes);
3702 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3704 ABORT("Dirty bit read failed");
3709 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
3710 /* If the actual page size is different, this returns TRUE if any */
3711 /* of the pages overlapping h are dirty. This routine may err on the */
3712 /* side of labeling pages as dirty (and this implementation does). */
3713 GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
3716 if (!GC_manual_vdb) {
3717 if ((word)h < (word)GC_vd_base
3718 || (word)h >= (word)(GC_vd_base + NPAGES * HBLKSIZE)) {
3721 return GC_grungy_bits[h-(struct hblk*)GC_vd_base] & PCR_VD_DB_dirtyBit;
3723 # elif defined(DEFAULT_VDB)
3727 return NULL == HDR(h)
3728 || get_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h));
3731 # if defined(CHECKSUMS) || defined(PROC_VDB)
3732 /* Could any valid GC heap pointer ever have been written to this page? */
3733 GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
3735 # if defined(GWW_VDB) || defined(PROC_VDB)
3736 # ifdef MPROTECT_VDB
3737 if (!GC_GWW_AVAILABLE())
3740 return NULL == HDR(h)
3741 || get_pht_entry_from_index(GC_written_pages, PHT_HASH(h));
3743 /* TODO: implement me for MANUAL_VDB. */
3748 # endif /* CHECKSUMS || PROC_VDB */
3750 /* We expect block h to be written shortly. Ensure that all pages */
3751 /* containing any part of the n hblks starting at h are no longer */
3752 /* protected. If is_ptrfree is false, also ensure that they will */
3753 /* subsequently appear to be dirty. Not allowed to call GC_printf */
3754 /* (and the friends) here, see Win32 GC_stop_world for the details. */
3755 GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
3760 if (!GC_auto_incremental)
3762 PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3763 PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
3764 # elif defined(MPROTECT_VDB)
3765 struct hblk * h_trunc; /* Truncated to page boundary */
3766 struct hblk * h_end; /* Page boundary following block end */
3767 struct hblk * current;
3769 if (!GC_auto_incremental || GC_GWW_AVAILABLE())
3771 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
3772 h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size - 1)
3773 & ~(GC_page_size - 1));
3774 if (h_end == h_trunc + 1 &&
3775 get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) {
3776 /* already marked dirty, and hence unprotected. */
3779 for (current = h_trunc; (word)current < (word)h_end; ++current) {
3780 word index = PHT_HASH(current);
3782 if (!is_ptrfree || (word)current < (word)h
3783 || (word)current >= (word)(h + nblocks)) {
3784 async_set_pht_entry_from_index(GC_dirty_pages, index);
3787 UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
3789 /* Ignore write hints. They don't help us here. */
3790 (void)h; (void)nblocks; (void)is_ptrfree;
3793 #endif /* !GC_DISABLE_INCREMENTAL */
3795 #if defined(MPROTECT_VDB) && defined(DARWIN)
3796 /* The following sources were used as a "reference" for this exception
3798 1. Apple's mach/xnu documentation
3799 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3800 omnigroup's macosx-dev list.
3801 www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
3802 3. macosx-nat.c from Apple's GDB source code.
3805 /* The bug that caused all this trouble should now be fixed. This should
3806 eventually be removed if all goes well. */
3808 /* #define BROKEN_EXCEPTION_HANDLING */
3810 #include <mach/mach.h>
3811 #include <mach/mach_error.h>
3812 #include <mach/exception.h>
3813 #include <mach/task.h>
3814 #include <pthread.h>
3818 /* Some of the following prototypes are missing in any header, although */
3819 /* they are documented. Some are in mach/exc.h file. */
3821 exc_server(mach_msg_header_t *, mach_msg_header_t *);
3823 extern kern_return_t
3824 exception_raise(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3825 exception_data_t, mach_msg_type_number_t);
3827 extern kern_return_t
3828 exception_raise_state(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3829 exception_data_t, mach_msg_type_number_t,
3830 thread_state_flavor_t*, thread_state_t,
3831 mach_msg_type_number_t, thread_state_t,
3832 mach_msg_type_number_t*);
3834 extern kern_return_t
3835 exception_raise_state_identity(mach_port_t, mach_port_t, mach_port_t,
3836 exception_type_t, exception_data_t,
3837 mach_msg_type_number_t, thread_state_flavor_t*,
3838 thread_state_t, mach_msg_type_number_t,
3839 thread_state_t, mach_msg_type_number_t*);
3841 GC_API_OSCALL kern_return_t
3842 catch_exception_raise(mach_port_t exception_port, mach_port_t thread,
3843 mach_port_t task, exception_type_t exception,
3844 exception_data_t code,
3845 mach_msg_type_number_t code_count);
3847 GC_API_OSCALL kern_return_t
3848 catch_exception_raise_state(mach_port_name_t exception_port,
3849 int exception, exception_data_t code,
3850 mach_msg_type_number_t codeCnt, int flavor,
3851 thread_state_t old_state, int old_stateCnt,
3852 thread_state_t new_state, int new_stateCnt);
3854 GC_API_OSCALL kern_return_t
3855 catch_exception_raise_state_identity(mach_port_name_t exception_port,
3856 mach_port_t thread, mach_port_t task, int exception,
3857 exception_data_t code, mach_msg_type_number_t codeCnt,
3858 int flavor, thread_state_t old_state, int old_stateCnt,
3859 thread_state_t new_state, int new_stateCnt);
3863 /* These should never be called, but just in case... */
3864 GC_API_OSCALL kern_return_t
3865 catch_exception_raise_state(mach_port_name_t exception_port GC_ATTR_UNUSED,
3866 int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
3867 mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
3868 thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
3869 thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
3871 ABORT_RET("Unexpected catch_exception_raise_state invocation");
3872 return(KERN_INVALID_ARGUMENT);
3875 GC_API_OSCALL kern_return_t
3876 catch_exception_raise_state_identity(
3877 mach_port_name_t exception_port GC_ATTR_UNUSED,
3878 mach_port_t thread GC_ATTR_UNUSED, mach_port_t task GC_ATTR_UNUSED,
3879 int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
3880 mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
3881 thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
3882 thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
3884 ABORT_RET("Unexpected catch_exception_raise_state_identity invocation");
3885 return(KERN_INVALID_ARGUMENT);
3888 #define MAX_EXCEPTION_PORTS 16
3891 mach_msg_type_number_t count;
3892 exception_mask_t masks[MAX_EXCEPTION_PORTS];
3893 exception_handler_t ports[MAX_EXCEPTION_PORTS];
3894 exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
3895 thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3898 STATIC struct ports_s {
3899 void (*volatile os_callback[3])(void);
3900 mach_port_t exception;
3901 # if defined(THREADS)
3906 /* This is to prevent stripping these routines as dead. */
3907 (void (*)(void))catch_exception_raise,
3908 (void (*)(void))catch_exception_raise_state,
3909 (void (*)(void))catch_exception_raise_state_identity
3912 0, /* for 'exception' */
3918 mach_msg_header_t head;
3925 } GC_mprotect_state_t;
3928 /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field, but it */
3929 /* is not documented. Use the source and see if they should be OK. */
3931 # define ID_RESUME 2
3933 /* This value is only used on the reply port. */
3936 STATIC GC_mprotect_state_t GC_mprotect_state = GC_MP_NORMAL;
3938 /* The following should ONLY be called when the world is stopped. */
3939 STATIC void GC_mprotect_thread_notify(mach_msg_id_t id)
3943 mach_msg_trailer_t trailer;
3945 mach_msg_return_t r;
3948 buf.msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3949 buf.msg.head.msgh_size = sizeof(buf.msg);
3950 buf.msg.head.msgh_remote_port = GC_ports.exception;
3951 buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3952 buf.msg.head.msgh_id = id;
3954 r = mach_msg(&buf.msg.head, MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_LARGE,
3955 sizeof(buf.msg), sizeof(buf), GC_ports.reply,
3956 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3957 if (r != MACH_MSG_SUCCESS)
3958 ABORT("mach_msg failed in GC_mprotect_thread_notify");
3959 if (buf.msg.head.msgh_id != ID_ACK)
3960 ABORT("Invalid ack in GC_mprotect_thread_notify");
3963 /* Should only be called by the mprotect thread */
3964 STATIC void GC_mprotect_thread_reply(void)
3967 mach_msg_return_t r;
3970 msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3971 msg.head.msgh_size = sizeof(msg);
3972 msg.head.msgh_remote_port = GC_ports.reply;
3973 msg.head.msgh_local_port = MACH_PORT_NULL;
3974 msg.head.msgh_id = ID_ACK;
3976 r = mach_msg(&msg.head, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
3977 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3978 if (r != MACH_MSG_SUCCESS)
3979 ABORT("mach_msg failed in GC_mprotect_thread_reply");
3982 GC_INNER void GC_mprotect_stop(void)
3984 GC_mprotect_thread_notify(ID_STOP);
3987 GC_INNER void GC_mprotect_resume(void)
3989 GC_mprotect_thread_notify(ID_RESUME);
3993 /* The compiler should optimize away any GC_mprotect_state computations */
3994 # define GC_mprotect_state GC_MP_NORMAL
3995 #endif /* !THREADS */
3998 mach_msg_header_t head;
4003 mach_msg_header_t head;
4004 mach_msg_body_t msgh_body;
4008 STATIC void *GC_mprotect_thread(void *arg)
4010 mach_msg_return_t r;
4011 /* These two structures contain some private kernel data. We don't */
4012 /* need to access any of it so we don't bother defining a proper */
4013 /* struct. The correct definitions are in the xnu source code. */
4014 struct mp_reply_s reply;
4015 struct mp_msg_s msg;
4018 if ((word)arg == GC_WORD_MAX) return 0; /* to prevent a compiler warning */
4019 # if defined(CPPCHECK)
4020 reply.data[0] = 0; /* to prevent "field unused" warnings */
4024 # if defined(THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
4025 GC_darwin_register_mach_handler_thread(mach_thread_self());
4029 r = mach_msg(&msg.head, MACH_RCV_MSG | MACH_RCV_LARGE |
4030 (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
4031 0, sizeof(msg), GC_ports.exception,
4032 GC_mprotect_state == GC_MP_DISCARDING ? 0
4033 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
4034 id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
4036 # if defined(THREADS)
4037 if(GC_mprotect_state == GC_MP_DISCARDING) {
4038 if(r == MACH_RCV_TIMED_OUT) {
4039 GC_mprotect_state = GC_MP_STOPPED;
4040 GC_mprotect_thread_reply();
4043 if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
4044 ABORT("Out of order mprotect thread request");
4046 # endif /* THREADS */
4048 if (r != MACH_MSG_SUCCESS) {
4049 ABORT_ARG2("mach_msg failed",
4050 ": errcode= %d (%s)", (int)r, mach_error_string(r));
4054 # if defined(THREADS)
4056 if(GC_mprotect_state != GC_MP_NORMAL)
4057 ABORT("Called mprotect_stop when state wasn't normal");
4058 GC_mprotect_state = GC_MP_DISCARDING;
4061 if(GC_mprotect_state != GC_MP_STOPPED)
4062 ABORT("Called mprotect_resume when state wasn't stopped");
4063 GC_mprotect_state = GC_MP_NORMAL;
4064 GC_mprotect_thread_reply();
4066 # endif /* THREADS */
4068 /* Handle the message (calls catch_exception_raise) */
4069 if(!exc_server(&msg.head, &reply.head))
4070 ABORT("exc_server failed");
4071 /* Send the reply */
4072 r = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0,
4073 MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
4075 if(r != MACH_MSG_SUCCESS) {
4076 /* This will fail if the thread dies, but the thread */
4077 /* shouldn't die... */
4078 # ifdef BROKEN_EXCEPTION_HANDLING
4079 GC_err_printf("mach_msg failed with %d %s while sending "
4080 "exc reply\n", (int)r, mach_error_string(r));
4082 ABORT("mach_msg failed while sending exception reply");
4089 /* All this SIGBUS code shouldn't be necessary. All protection faults should
4090 be going through the mach exception handler. However, it seems a SIGBUS is
4091 occasionally sent for some unknown reason. Even more odd, it seems to be
4092 meaningless and safe to ignore. */
4093 #ifdef BROKEN_EXCEPTION_HANDLING
4095 /* Updates to this aren't atomic, but the SIGBUS'es seem pretty rare. */
4096 /* Even if this doesn't get updated property, it isn't really a problem. */
4097 STATIC int GC_sigbus_count = 0;
4099 STATIC void GC_darwin_sigbus(int num, siginfo_t *sip, void *context)
4102 ABORT("Got a non-sigbus signal in the sigbus handler");
4104 /* Ugh... some seem safe to ignore, but too many in a row probably means
4105 trouble. GC_sigbus_count is reset for each mach exception that is
4107 if (GC_sigbus_count >= 8) {
4108 ABORT("Got more than 8 SIGBUSs in a row!");
4111 WARN("Ignoring SIGBUS\n", 0);
4114 #endif /* BROKEN_EXCEPTION_HANDLING */
4116 GC_INNER GC_bool GC_dirty_init(void)
4121 pthread_attr_t attr;
4122 exception_mask_t mask;
4124 # ifdef CAN_HANDLE_FORK
4125 if (GC_handle_fork) {
4126 /* To both support GC incremental mode and GC functions usage in */
4127 /* the forked child, pthread_atfork should be used to install */
4128 /* handlers that switch off GC_incremental in the child */
4129 /* gracefully (unprotecting all pages and clearing */
4130 /* GC_mach_handler_thread). For now, we just disable incremental */
4131 /* mode if fork() handling is requested by the client. */
4132 WARN("Can't turn on GC incremental mode as fork()"
4133 " handling requested\n", 0);
4138 GC_VERBOSE_LOG_PRINTF("Initializing mach/darwin mprotect"
4139 " virtual dirty bit implementation\n");
4140 # ifdef BROKEN_EXCEPTION_HANDLING
4141 WARN("Enabling workarounds for various darwin "
4142 "exception handling bugs\n", 0);
4144 if (GC_page_size % HBLKSIZE != 0) {
4145 ABORT("Page size not multiple of HBLKSIZE");
4148 GC_task_self = me = mach_task_self();
4150 r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.exception);
4151 /* TODO: WARN and return FALSE in case of a failure. */
4152 if (r != KERN_SUCCESS)
4153 ABORT("mach_port_allocate failed (exception port)");
4155 r = mach_port_insert_right(me, GC_ports.exception, GC_ports.exception,
4156 MACH_MSG_TYPE_MAKE_SEND);
4157 if (r != KERN_SUCCESS)
4158 ABORT("mach_port_insert_right failed (exception port)");
4160 # if defined(THREADS)
4161 r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.reply);
4162 if(r != KERN_SUCCESS)
4163 ABORT("mach_port_allocate failed (reply port)");
4166 /* The exceptions we want to catch */
4167 mask = EXC_MASK_BAD_ACCESS;
4169 r = task_get_exception_ports(me, mask, GC_old_exc_ports.masks,
4170 &GC_old_exc_ports.count, GC_old_exc_ports.ports,
4171 GC_old_exc_ports.behaviors,
4172 GC_old_exc_ports.flavors);
4173 if (r != KERN_SUCCESS)
4174 ABORT("task_get_exception_ports failed");
4176 r = task_set_exception_ports(me, mask, GC_ports.exception, EXCEPTION_DEFAULT,
4177 GC_MACH_THREAD_STATE);
4178 if (r != KERN_SUCCESS)
4179 ABORT("task_set_exception_ports failed");
4180 if (pthread_attr_init(&attr) != 0)
4181 ABORT("pthread_attr_init failed");
4182 if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
4183 ABORT("pthread_attr_setdetachedstate failed");
4185 # undef pthread_create
4186 /* This will call the real pthread function, not our wrapper */
4187 if (pthread_create(&thread, &attr, GC_mprotect_thread, NULL) != 0)
4188 ABORT("pthread_create failed");
4189 (void)pthread_attr_destroy(&attr);
4191 /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
4192 # ifdef BROKEN_EXCEPTION_HANDLING
4194 struct sigaction sa, oldsa;
4195 sa.sa_handler = (SIG_HNDLR_PTR)GC_darwin_sigbus;
4196 sigemptyset(&sa.sa_mask);
4197 sa.sa_flags = SA_RESTART|SA_SIGINFO;
4198 /* sa.sa_restorer is deprecated and should not be initialized. */
4199 if (sigaction(SIGBUS, &sa, &oldsa) < 0)
4200 ABORT("sigaction failed");
4201 if (oldsa.sa_handler != (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
4202 GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
4205 # endif /* BROKEN_EXCEPTION_HANDLING */
4206 # if defined(CPPCHECK)
4207 GC_noop1((word)GC_ports.os_callback[0]);
4212 /* The source code for Apple's GDB was used as a reference for the */
4213 /* exception forwarding code. This code is similar to be GDB code only */
4214 /* because there is only one way to do it. */
4215 STATIC kern_return_t GC_forward_exception(mach_port_t thread, mach_port_t task,
4216 exception_type_t exception,
4217 exception_data_t data,
4218 mach_msg_type_number_t data_count)
4223 exception_behavior_t behavior;
4224 thread_state_flavor_t flavor;
4226 thread_state_data_t thread_state;
4227 mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
4229 for (i=0; i < GC_old_exc_ports.count; i++)
4230 if (GC_old_exc_ports.masks[i] & (1 << exception))
4232 if (i == GC_old_exc_ports.count)
4233 ABORT("No handler for exception!");
4235 port = GC_old_exc_ports.ports[i];
4236 behavior = GC_old_exc_ports.behaviors[i];
4237 flavor = GC_old_exc_ports.flavors[i];
4239 if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4240 r = thread_get_state(thread, flavor, thread_state, &thread_state_count);
4241 if(r != KERN_SUCCESS)
4242 ABORT("thread_get_state failed in forward_exception");
4246 case EXCEPTION_STATE:
4247 r = exception_raise_state(port, thread, task, exception, data, data_count,
4248 &flavor, thread_state, thread_state_count,
4249 thread_state, &thread_state_count);
4251 case EXCEPTION_STATE_IDENTITY:
4252 r = exception_raise_state_identity(port, thread, task, exception, data,
4253 data_count, &flavor, thread_state,
4254 thread_state_count, thread_state,
4255 &thread_state_count);
4257 /* case EXCEPTION_DEFAULT: */ /* default signal handlers */
4258 default: /* user-supplied signal handlers */
4259 r = exception_raise(port, thread, task, exception, data, data_count);
4262 if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4263 r = thread_set_state(thread, flavor, thread_state, thread_state_count);
4264 if (r != KERN_SUCCESS)
4265 ABORT("thread_set_state failed in forward_exception");
4270 #define FWD() GC_forward_exception(thread, task, exception, code, code_count)
4273 # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE
4274 # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE_COUNT
4275 # define DARWIN_EXC_STATE_T arm_exception_state_t
4276 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(far)
4277 #elif defined(AARCH64)
4278 # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE64
4279 # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE64_COUNT
4280 # define DARWIN_EXC_STATE_T arm_exception_state64_t
4281 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(far)
4282 #elif defined(POWERPC)
4283 # if CPP_WORDSZ == 32
4284 # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE
4285 # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE_COUNT
4286 # define DARWIN_EXC_STATE_T ppc_exception_state_t
4288 # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE64
4289 # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE64_COUNT
4290 # define DARWIN_EXC_STATE_T ppc_exception_state64_t
4292 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(dar)
4293 #elif defined(I386) || defined(X86_64)
4294 # if CPP_WORDSZ == 32
4295 # if defined(i386_EXCEPTION_STATE_COUNT) \
4296 && !defined(x86_EXCEPTION_STATE32_COUNT)
4297 /* Use old naming convention for 32-bit x86. */
4298 # define DARWIN_EXC_STATE i386_EXCEPTION_STATE
4299 # define DARWIN_EXC_STATE_COUNT i386_EXCEPTION_STATE_COUNT
4300 # define DARWIN_EXC_STATE_T i386_exception_state_t
4302 # define DARWIN_EXC_STATE x86_EXCEPTION_STATE32
4303 # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE32_COUNT
4304 # define DARWIN_EXC_STATE_T x86_exception_state32_t
4307 # define DARWIN_EXC_STATE x86_EXCEPTION_STATE64
4308 # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE64_COUNT
4309 # define DARWIN_EXC_STATE_T x86_exception_state64_t
4311 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(faultvaddr)
4312 #elif !defined(CPPCHECK)
4313 # error FIXME for non-arm/ppc/x86 darwin
4316 /* This violates the namespace rules but there isn't anything that can */
4317 /* be done about it. The exception handling stuff is hard coded to */
4318 /* call this. catch_exception_raise, catch_exception_raise_state and */
4319 /* and catch_exception_raise_state_identity are called from OS. */
4320 GC_API_OSCALL kern_return_t
4321 catch_exception_raise(mach_port_t exception_port GC_ATTR_UNUSED,
4322 mach_port_t thread, mach_port_t task GC_ATTR_UNUSED,
4323 exception_type_t exception, exception_data_t code,
4324 mach_msg_type_number_t code_count GC_ATTR_UNUSED)
4328 thread_state_flavor_t flavor = DARWIN_EXC_STATE;
4329 mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT;
4330 DARWIN_EXC_STATE_T exc_state;
4332 if (exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
4333 # ifdef DEBUG_EXCEPTION_HANDLING
4334 /* We aren't interested, pass it on to the old handler */
4335 GC_log_printf("Exception: 0x%x Code: 0x%x 0x%x in catch...\n",
4336 exception, code_count > 0 ? code[0] : -1,
4337 code_count > 1 ? code[1] : -1);
4342 r = thread_get_state(thread, flavor, (natural_t*)&exc_state,
4344 if(r != KERN_SUCCESS) {
4345 /* The thread is supposed to be suspended while the exception */
4346 /* handler is called. This shouldn't fail. */
4347 # ifdef BROKEN_EXCEPTION_HANDLING
4348 GC_err_printf("thread_get_state failed in catch_exception_raise\n");
4349 return KERN_SUCCESS;
4351 ABORT("thread_get_state failed in catch_exception_raise");
4355 /* This is the address that caused the fault */
4356 addr = (char*) exc_state.DARWIN_EXC_STATE_DAR;
4357 if (!is_header_found_async(addr)) {
4358 /* Ugh... just like the SIGBUS problem above, it seems we get */
4359 /* a bogus KERN_PROTECTION_FAILURE every once and a while. We wait */
4360 /* till we get a bunch in a row before doing anything about it. */
4361 /* If a "real" fault ever occurs it'll just keep faulting over and */
4362 /* over and we'll hit the limit pretty quickly. */
4363 # ifdef BROKEN_EXCEPTION_HANDLING
4364 static char *last_fault;
4365 static int last_fault_count;
4367 if(addr != last_fault) {
4369 last_fault_count = 0;
4371 if(++last_fault_count < 32) {
4372 if(last_fault_count == 1)
4373 WARN("Ignoring KERN_PROTECTION_FAILURE at %p\n", addr);
4374 return KERN_SUCCESS;
4377 GC_err_printf("Unexpected KERN_PROTECTION_FAILURE at %p; aborting...\n",
4379 /* Can't pass it along to the signal handler because that is */
4380 /* ignoring SIGBUS signals. We also shouldn't call ABORT here as */
4381 /* signals don't always work too well from the exception handler. */
4383 # else /* BROKEN_EXCEPTION_HANDLING */
4384 /* Pass it along to the next exception handler
4385 (which should call SIGBUS/SIGSEGV) */
4387 # endif /* !BROKEN_EXCEPTION_HANDLING */
4390 # ifdef BROKEN_EXCEPTION_HANDLING
4391 /* Reset the number of consecutive SIGBUSs */
4392 GC_sigbus_count = 0;
4395 if (GC_mprotect_state == GC_MP_NORMAL) { /* common case */
4396 struct hblk * h = (struct hblk*)((word)addr & ~(GC_page_size-1));
4399 UNPROTECT(h, GC_page_size);
4400 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
4401 word index = PHT_HASH(h+i);
4402 async_set_pht_entry_from_index(GC_dirty_pages, index);
4404 } else if (GC_mprotect_state == GC_MP_DISCARDING) {
4405 /* Lie to the thread for now. No sense UNPROTECT()ing the memory
4406 when we're just going to PROTECT() it again later. The thread
4407 will just fault again once it resumes */
4409 /* Shouldn't happen, i don't think */
4410 GC_err_printf("KERN_PROTECTION_FAILURE while world is stopped\n");
4413 return KERN_SUCCESS;
4417 #ifndef NO_DESC_CATCH_EXCEPTION_RAISE
4418 /* These symbols should have REFERENCED_DYNAMICALLY (0x10) bit set to */
4419 /* let strip know they are not to be stripped. */
4420 __asm__(".desc _catch_exception_raise, 0x10");
4421 __asm__(".desc _catch_exception_raise_state, 0x10");
4422 __asm__(".desc _catch_exception_raise_state_identity, 0x10");
4425 #endif /* DARWIN && MPROTECT_VDB */
4427 #ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
4428 GC_API int GC_CALL GC_incremental_protection_needs(void)
4430 GC_ASSERT(GC_is_initialized);
4431 return GC_PROTECTS_NONE;
4433 #endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
4436 /* Undo sbrk() redirection. */
4440 /* If value is non-zero then allocate executable memory. */
4441 GC_API void GC_CALL GC_set_pages_executable(int value)
4443 GC_ASSERT(!GC_is_initialized);
4444 /* Even if IGNORE_PAGES_EXECUTABLE is defined, GC_pages_executable is */
4445 /* touched here to prevent a compiler warning. */
4446 GC_pages_executable = (GC_bool)(value != 0);
4449 /* Returns non-zero if the GC-allocated memory is executable. */
4450 /* GC_get_pages_executable is defined after all the places */
4451 /* where GC_get_pages_executable is undefined. */
4452 GC_API int GC_CALL GC_get_pages_executable(void)
4454 # ifdef IGNORE_PAGES_EXECUTABLE
4455 return 1; /* Always allocate executable memory. */
4457 return (int)GC_pages_executable;
4461 /* Call stack save code for debugging. Should probably be in */
4462 /* mach_dep.c, but that requires reorganization. */
4464 /* I suspect the following works for most X86 *nix variants, so */
4465 /* long as the frame pointer is explicitly stored. In the case of gcc, */
4466 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
4467 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
4468 # include <features.h>
4471 struct frame *fr_savfp;
4474 long fr_arg[NARGS]; /* All the arguments go here. */
4481 # include <features.h>
4483 # if defined(SAVE_CALL_CHAIN)
4487 struct frame *fr_savfp;
4496 # elif defined (DRSNX)
4497 # include <sys/sparc/frame.h>
4498 # elif defined(OPENBSD)
4500 # elif defined(FREEBSD) || defined(NETBSD)
4501 # include <machine/frame.h>
4503 # include <sys/frame.h>
4506 # error We only know how to get the first 6 arguments
4510 #ifdef NEED_CALLINFO
4511 /* Fill in the pc and argument information for up to NFRAMES of my */
4512 /* callers. Ignore my frame and my callers frame. */
4515 # include <unistd.h>
4518 #endif /* NEED_CALLINFO */
4520 #if defined(GC_HAVE_BUILTIN_BACKTRACE)
4522 # include "private/msvc_dbg.h"
4524 # include <execinfo.h>
4528 #ifdef SAVE_CALL_CHAIN
4530 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
4531 && defined(GC_HAVE_BUILTIN_BACKTRACE)
4533 #ifdef REDIRECT_MALLOC
4534 /* Deal with possible malloc calls in backtrace by omitting */
4535 /* the infinitely recursing backtrace. */
4537 __thread /* If your compiler doesn't understand this */
4538 /* you could use something like pthread_getspecific. */
4540 GC_bool GC_in_save_callers = FALSE;
4543 GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4545 void * tmp_info[NFRAMES + 1];
4547 # define IGNORE_FRAMES 1
4549 /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
4550 /* points to our own frame. */
4551 # ifdef REDIRECT_MALLOC
4552 if (GC_in_save_callers) {
4553 info[0].ci_pc = (word)(&GC_save_callers);
4554 for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
4557 GC_in_save_callers = TRUE;
4560 GC_ASSERT(I_HOLD_LOCK());
4561 /* backtrace may call dl_iterate_phdr which is also */
4562 /* used by GC_register_dynamic_libraries, and */
4563 /* dl_iterate_phdr is not guaranteed to be reentrant. */
4565 GC_STATIC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
4566 npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
4567 if (npcs > IGNORE_FRAMES)
4568 BCOPY(&tmp_info[IGNORE_FRAMES], info,
4569 (npcs - IGNORE_FRAMES) * sizeof(void *));
4570 for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
4571 # ifdef REDIRECT_MALLOC
4572 GC_in_save_callers = FALSE;
4576 #else /* No builtin backtrace; do it ourselves */
4578 #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
4579 # define FR_SAVFP fr_fp
4580 # define FR_SAVPC fr_pc
4582 # define FR_SAVFP fr_savfp
4583 # define FR_SAVPC fr_savpc
4586 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
4592 GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4594 struct frame *frame;
4598 /* We assume this is turned on only with gcc as the compiler. */
4599 asm("movl %%ebp,%0" : "=r"(frame));
4602 frame = (struct frame *)GC_save_regs_in_stack();
4603 fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
4606 for (; !((word)fp HOTTER_THAN (word)frame)
4608 && !((word)GC_stackbottom HOTTER_THAN (word)fp)
4609 # elif defined(STACK_GROWS_UP)
4612 && nframes < NFRAMES;
4613 fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
4618 info[nframes].ci_pc = fp->FR_SAVPC;
4620 for (i = 0; i < NARGS; i++) {
4621 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
4623 # endif /* NARGS > 0 */
4625 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
4628 #endif /* No builtin backtrace */
4630 #endif /* SAVE_CALL_CHAIN */
4632 #ifdef NEED_CALLINFO
4634 /* Print info to stderr. We do NOT hold the allocation lock */
4635 GC_INNER void GC_print_callers(struct callinfo info[NFRAMES])
4638 static int reentry_count = 0;
4641 /* FIXME: This should probably use a different lock, so that we */
4642 /* become callable with or without the allocation lock. */
4648 GC_err_printf("\tCaller at allocation:\n");
4650 GC_err_printf("\tCall chain at allocation:\n");
4652 for (i = 0; i < NFRAMES; i++) {
4653 # if defined(LINUX) && !defined(SMALL_CONFIG)
4654 GC_bool stop = FALSE;
4657 if (0 == info[i].ci_pc)
4663 GC_err_printf("\t\targs: ");
4664 for (j = 0; j < NARGS; j++) {
4665 if (j != 0) GC_err_printf(", ");
4666 GC_err_printf("%d (0x%X)", ~(info[i].ci_arg[j]),
4667 ~(info[i].ci_arg[j]));
4669 GC_err_printf("\n");
4672 if (reentry_count > 1) {
4673 /* We were called during an allocation during */
4674 /* a previous GC_print_callers call; punt. */
4675 GC_err_printf("\t\t##PC##= 0x%lx\n",
4676 (unsigned long)info[i].ci_pc);
4682 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4683 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4685 backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
4686 if (sym_name != NULL) {
4691 (void)snprintf(buf, sizeof(buf), "##PC##= 0x%lx",
4692 (unsigned long)info[i].ci_pc);
4693 buf[sizeof(buf) - 1] = '\0';
4696 # if defined(LINUX) && !defined(SMALL_CONFIG)
4697 /* Try for a line number. */
4701 static char exe_name[EXE_SZ];
4703 char cmd_buf[CMD_SZ];
4704 # define RESULT_SZ 200
4705 static char result_buf[RESULT_SZ];
4708 # define PRELOAD_SZ 200
4709 char preload_buf[PRELOAD_SZ];
4710 static GC_bool found_exe_name = FALSE;
4711 static GC_bool will_fail = FALSE;
4713 /* Try to get it via a hairy and expensive scheme. */
4714 /* First we get the name of the executable: */
4717 if (!found_exe_name) {
4718 int ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
4720 if (ret_code < 0 || ret_code >= EXE_SZ
4721 || exe_name[0] != '/') {
4722 will_fail = TRUE; /* Don't try again. */
4725 exe_name[ret_code] = '\0';
4726 found_exe_name = TRUE;
4728 /* Then we use popen to start addr2line -e <exe> <addr> */
4729 /* There are faster ways to do this, but hopefully this */
4730 /* isn't time critical. */
4731 (void)snprintf(cmd_buf, sizeof(cmd_buf),
4732 "/usr/bin/addr2line -f -e %s 0x%lx",
4733 exe_name, (unsigned long)info[i].ci_pc);
4734 cmd_buf[sizeof(cmd_buf) - 1] = '\0';
4735 old_preload = GETENV("LD_PRELOAD");
4736 if (0 != old_preload) {
4737 size_t old_len = strlen(old_preload);
4738 if (old_len >= PRELOAD_SZ) {
4742 BCOPY(old_preload, preload_buf, old_len + 1);
4743 unsetenv ("LD_PRELOAD");
4745 pipe = popen(cmd_buf, "r");
4746 if (0 != old_preload
4747 && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
4748 WARN("Failed to reset LD_PRELOAD\n", 0);
4754 result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe);
4756 if (0 == result_len) {
4760 if (result_buf[result_len - 1] == '\n') --result_len;
4761 result_buf[result_len] = 0;
4762 if (result_buf[0] == '?'
4763 || (result_buf[result_len-2] == ':'
4764 && result_buf[result_len-1] == '0'))
4766 /* Get rid of embedded newline, if any. Test for "main" */
4768 char * nl = strchr(result_buf, '\n');
4770 && (word)nl < (word)(result_buf + result_len)) {
4773 if (strncmp(result_buf, "main",
4775 ? (size_t)((word)nl /* a cppcheck workaround */
4776 - COVERT_DATAFLOW(result_buf))
4777 : result_len) == 0) {
4781 if (result_len < RESULT_SZ - 25) {
4782 /* Add in hex address */
4783 (void)snprintf(&result_buf[result_len],
4784 sizeof(result_buf) - result_len,
4785 " [0x%lx]", (unsigned long)info[i].ci_pc);
4786 result_buf[sizeof(result_buf) - 1] = '\0';
4788 # if defined(CPPCHECK)
4789 GC_noop1((unsigned char)name[0]);
4790 /* name computed previously is discarded */
4795 GC_err_printf("\t\t%s\n", name);
4796 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4797 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4798 if (sym_name != NULL)
4799 free(sym_name); /* May call GC_[debug_]free; that's OK */
4802 # if defined(LINUX) && !defined(SMALL_CONFIG)
4812 #endif /* NEED_CALLINFO */
4814 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4815 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for */
4816 /* addresses in FIND_LEAK output. */
4817 void GC_print_address_map(void)
4821 GC_err_printf("---------- Begin address map ----------\n");
4822 maps = GC_get_maps();
4823 GC_err_puts(maps != NULL ? maps : "Failed to get map!\n");
4824 GC_err_printf("---------- End address map ----------\n");
4826 #endif /* LINUX && ELF */