2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
19 #if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
20 && !defined(MSWINCE) && !defined(SN_TARGET_ORBIS) \
21 && !defined(SN_TARGET_PSP2) && !defined(__CC_ARM)
22 # include <sys/types.h>
23 # if !defined(MSWIN32) && !defined(MSWIN_XBOX1)
29 #if defined(MSWINCE) || defined(SN_TARGET_PS3)
30 # define SIGSEGV 0 /* value is irrelevant */
35 #if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(NACL) \
40 #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
44 /* Blatantly OS dependent routines, except for those that are related */
45 /* to dynamic loading. */
49 # include "extra/AmigaOS.c"
54 # include <Processes.h>
59 # include <malloc.h> /* for locking */
62 #if defined(MMAP_SUPPORTED) || defined(ADD_HEAP_GUARD_PAGES)
63 # if defined(USE_MUNMAP) && !defined(USE_MMAP) && !defined(CPPCHECK)
64 # error Invalid config: USE_MUNMAP requires USE_MMAP
66 # include <sys/types.h>
67 # include <sys/mman.h>
68 # include <sys/stat.h>
73 /* for get_etext and friends */
74 # include <mach-o/getsect.h>
78 /* Apparently necessary for djgpp 2.01. May cause problems with */
80 typedef long unsigned int caddr_t;
84 # include "il/PCR_IL.h"
85 # include "th/PCR_ThCtl.h"
86 # include "mm/PCR_MM.h"
89 #if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB)
90 /* Declare GC_mprotect_stop and GC_mprotect_resume as extern "C". */
91 # include "private/darwin_stop_world.h"
94 #if !defined(NO_EXECUTE_PERMISSION)
95 STATIC GC_bool GC_pages_executable = TRUE;
97 STATIC GC_bool GC_pages_executable = FALSE;
99 #define IGNORE_PAGES_EXECUTABLE 1
100 /* Undefined on GC_pages_executable real use. */
102 #ifdef NEED_PROC_MAPS
103 /* We need to parse /proc/self/maps, either to find dynamic libraries, */
104 /* and/or to find the register backing store base (IA64). Do it once */
107 /* Repeatedly perform a read call until the buffer is filled or */
108 /* we encounter EOF. */
109 STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
114 ASSERT_CANCEL_DISABLED();
115 while (num_read < count) {
116 ssize_t result = READ(fd, buf + num_read, count - num_read);
118 if (result < 0) return result;
119 if (result == 0) break;
127 /* Determine the length of a file by incrementally reading it into a */
128 /* buffer. This would be silly to use it on a file supporting lseek, */
129 /* but Linux /proc files usually do not. */
130 STATIC size_t GC_get_file_len(int f)
134 # define GET_FILE_LEN_BUF_SZ 500
135 char buf[GET_FILE_LEN_BUF_SZ];
138 result = read(f, buf, GET_FILE_LEN_BUF_SZ);
139 if (result == -1) return 0;
141 } while (result > 0);
145 STATIC size_t GC_get_maps_len(void)
147 int f = open("/proc/self/maps", O_RDONLY);
149 if (f < 0) return 0; /* treat missing file as empty */
150 result = GC_get_file_len(f);
156 /* Copy the contents of /proc/self/maps to a buffer in our address */
157 /* space. Return the address of the buffer, or zero on failure. */
158 /* This code could be simplified if we could determine its size ahead */
160 GC_INNER char * GC_get_maps(void)
163 static char *maps_buf = NULL;
164 static size_t maps_buf_sz = 1;
167 size_t old_maps_size = 0;
170 /* The buffer is essentially static, so there must be a single client. */
171 GC_ASSERT(I_HOLD_LOCK());
173 /* Note that in the presence of threads, the maps file can */
174 /* essentially shrink asynchronously and unexpectedly as */
175 /* threads that we already think of as dead release their */
176 /* stacks. And there is no easy way to read the entire */
177 /* file atomically. This is arguably a misfeature of the */
178 /* /proc/.../maps interface. */
179 /* Since we expect the file can grow asynchronously in rare */
180 /* cases, it should suffice to first determine */
181 /* the size (using lseek or read), and then to reread the */
182 /* file. If the size is inconsistent we have to retry. */
183 /* This only matters with threads enabled, and if we use */
184 /* this to locate roots (not the default). */
187 /* Determine the initial size of /proc/self/maps. */
188 /* Note that lseek doesn't work, at least as of 2.6.15. */
189 maps_size = GC_get_maps_len();
190 if (0 == maps_size) return 0;
192 maps_size = 4000; /* Guess */
195 /* Read /proc/self/maps, growing maps_buf as necessary. */
196 /* Note that we may not allocate conventionally, and */
197 /* thus can't use stdio. */
201 while (maps_size >= maps_buf_sz) {
202 GC_scratch_recycle_no_gww(maps_buf, maps_buf_sz);
203 /* Grow only by powers of 2, since we leak "too small" buffers.*/
204 while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
205 maps_buf = GC_scratch_alloc(maps_buf_sz);
207 /* Recompute initial length, since we allocated. */
208 /* This can only happen a few times per program */
210 maps_size = GC_get_maps_len();
211 if (0 == maps_size) return 0;
213 if (maps_buf == 0) return 0;
215 GC_ASSERT(maps_buf_sz >= maps_size + 1);
216 f = open("/proc/self/maps", O_RDONLY);
217 if (-1 == f) return 0;
219 old_maps_size = maps_size;
223 result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
227 } while ((size_t)result == maps_buf_sz-1);
232 if (maps_size > old_maps_size) {
233 /* This might be caused by e.g. thread creation. */
234 WARN("Unexpected asynchronous /proc/self/maps growth"
235 " (to %" WARN_PRIdPTR " bytes)\n", maps_size);
238 } while (maps_size >= maps_buf_sz
240 || maps_size < old_maps_size
243 maps_buf[maps_size] = '\0';
248 * GC_parse_map_entry parses an entry from /proc/self/maps so we can
249 * locate all writable data segments that belong to shared libraries.
250 * The format of one of these entries and the fields we care about
252 * XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
253 * ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
254 * start end prot maj_dev
256 * Note that since about august 2003 kernels, the columns no longer have
257 * fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
258 * anywhere, which is safer anyway.
261 /* Assign various fields of the first line in buf_ptr to (*start), */
262 /* (*end), (*prot), (*maj_dev) and (*mapping_name). mapping_name may */
263 /* be NULL. (*prot) and (*mapping_name) are assigned pointers into the */
264 /* original buffer. */
265 #if (defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES)) \
266 || defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) \
267 || defined(REDIRECT_MALLOC)
268 GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
269 char **prot, unsigned int *maj_dev,
272 unsigned char *start_start, *end_start, *maj_dev_start;
273 unsigned char *p; /* unsigned for isspace, isxdigit */
275 if (buf_ptr == NULL || *buf_ptr == '\0') {
279 p = (unsigned char *)buf_ptr;
280 while (isspace(*p)) ++p;
282 GC_ASSERT(isxdigit(*start_start));
283 *start = (ptr_t)strtoul((char *)start_start, (char **)&p, 16);
288 GC_ASSERT(isxdigit(*end_start));
289 *end = (ptr_t)strtoul((char *)end_start, (char **)&p, 16);
290 GC_ASSERT(isspace(*p));
292 while (isspace(*p)) ++p;
293 GC_ASSERT(*p == 'r' || *p == '-');
295 /* Skip past protection field to offset field */
296 while (!isspace(*p)) ++p;
297 while (isspace(*p)) p++;
298 GC_ASSERT(isxdigit(*p));
299 /* Skip past offset field, which we ignore */
300 while (!isspace(*p)) ++p;
301 while (isspace(*p)) p++;
303 GC_ASSERT(isxdigit(*maj_dev_start));
304 *maj_dev = strtoul((char *)maj_dev_start, NULL, 16);
306 if (mapping_name == 0) {
307 while (*p && *p++ != '\n');
309 while (*p && *p != '\n' && *p != '/' && *p != '[') p++;
310 *mapping_name = (char *)p;
311 while (*p && *p++ != '\n');
315 #endif /* REDIRECT_MALLOC || DYNAMIC_LOADING || IA64 || ... */
317 #if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
318 /* Try to read the backing store base from /proc/self/maps. */
319 /* Return the bounds of the writable mapping with a 0 major device, */
320 /* which includes the address passed as data. */
321 /* Return FALSE if there is no such mapping. */
322 GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, ptr_t *startp,
326 ptr_t my_start, my_end;
327 unsigned int maj_dev;
328 char *maps = GC_get_maps();
329 char *buf_ptr = maps;
331 if (0 == maps) return(FALSE);
333 buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
336 if (buf_ptr == NULL) return FALSE;
337 if (prot[1] == 'w' && maj_dev == 0) {
338 if ((word)my_end > (word)addr && (word)my_start <= (word)addr) {
347 #endif /* IA64 || INCLUDE_LINUX_THREAD_DESCR */
349 #if defined(REDIRECT_MALLOC)
350 /* Find the text(code) mapping for the library whose name, after */
351 /* stripping the directory part, starts with nm. */
352 GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp)
354 size_t nm_len = strlen(nm);
357 ptr_t my_start, my_end;
358 unsigned int maj_dev;
359 char *maps = GC_get_maps();
360 char *buf_ptr = maps;
362 if (0 == maps) return(FALSE);
364 buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
365 &prot, &maj_dev, &map_path);
367 if (buf_ptr == NULL) return FALSE;
368 if (prot[0] == 'r' && prot[1] == '-' && prot[2] == 'x') {
370 /* Set p to point just past last slash, if any. */
371 while (*p != '\0' && *p != '\n' && *p != ' ' && *p != '\t') ++p;
372 while (*p != '/' && (word)p >= (word)map_path) --p;
374 if (strncmp(nm, p, nm_len) == 0) {
383 #endif /* REDIRECT_MALLOC */
386 static ptr_t backing_store_base_from_proc(void)
388 ptr_t my_start, my_end;
389 if (!GC_enclosing_mapping(GC_save_regs_in_stack(), &my_start, &my_end)) {
390 GC_COND_LOG_PRINTF("Failed to find backing store base from /proc\n");
397 #endif /* NEED_PROC_MAPS */
399 #if defined(SEARCH_FOR_DATA_START)
400 /* The I386 case can be handled without a search. The Alpha case */
401 /* used to be handled differently as well, but the rules changed */
402 /* for recent Linux versions. This seems to be the easiest way to */
403 /* cover all versions. */
405 # if defined(LINUX) || defined(HURD)
406 /* Some Linux distributions arrange to define __data_start. Some */
407 /* define data_start as a weak symbol. The latter is technically */
408 /* broken, since the user program may define data_start, in which */
409 /* case we lose. Nonetheless, we try both, preferring __data_start.*/
410 /* We assume gcc-compatible pragmas. */
412 # pragma weak __data_start
413 # pragma weak data_start
414 extern int __data_start[], data_start[];
418 ptr_t GC_data_start = NULL;
420 GC_INNER void GC_init_linux_data_start(void)
422 ptr_t data_end = DATAEND;
424 # if (defined(LINUX) || defined(HURD)) && !defined(IGNORE_PROG_DATA_START)
425 /* Try the easy approaches first: */
426 if (COVERT_DATAFLOW(__data_start) != 0) {
427 GC_data_start = (ptr_t)(__data_start);
429 GC_data_start = (ptr_t)(data_start);
431 if (COVERT_DATAFLOW(GC_data_start) != 0) {
432 if ((word)GC_data_start > (word)data_end)
433 ABORT_ARG2("Wrong __data_start/_end pair",
434 ": %p .. %p", (void *)GC_data_start, (void *)data_end);
437 # ifdef DEBUG_ADD_DEL_ROOTS
438 GC_log_printf("__data_start not provided\n");
443 /* Not needed, avoids the SIGSEGV caused by */
444 /* GC_find_limit which complicates debugging. */
445 GC_data_start = data_end; /* set data root size to 0 */
449 GC_data_start = (ptr_t)GC_find_limit(data_end, FALSE);
451 #endif /* SEARCH_FOR_DATA_START */
455 # ifndef ECOS_GC_MEMORY_SIZE
456 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
457 # endif /* ECOS_GC_MEMORY_SIZE */
459 /* TODO: This is a simple way of allocating memory which is */
460 /* compatible with ECOS early releases. Later releases use a more */
461 /* sophisticated means of allocating memory than this simple static */
462 /* allocator, but this method is at least bound to work. */
463 static char ecos_gc_memory[ECOS_GC_MEMORY_SIZE];
464 static char *ecos_gc_brk = ecos_gc_memory;
466 static void *tiny_sbrk(ptrdiff_t increment)
468 void *p = ecos_gc_brk;
469 ecos_gc_brk += increment;
470 if ((word)ecos_gc_brk > (word)(ecos_gc_memory + sizeof(ecos_gc_memory))) {
471 ecos_gc_brk -= increment;
476 # define sbrk tiny_sbrk
479 #if defined(NETBSD) && defined(__ELF__)
480 ptr_t GC_data_start = NULL;
483 extern char **environ;
486 GC_INNER void GC_init_netbsd_elf(void)
488 /* This may need to be environ, without the underscore, for */
490 GC_data_start = (ptr_t)GC_find_limit(&environ, FALSE);
494 #if defined(ADDRESS_SANITIZER) && (defined(UNIX_LIKE) \
495 || defined(NEED_FIND_LIMIT) || defined(MPROTECT_VDB)) \
496 && !defined(CUSTOM_ASAN_DEF_OPTIONS)
497 /* To tell ASan to allow GC to use its own SIGBUS/SEGV handlers. */
498 /* The function is exported just to be visible to ASan library. */
499 GC_API const char *__asan_default_options(void)
501 return "allow_user_segv_handler=1";
506 static struct sigaction old_segv_act;
507 STATIC JMP_BUF GC_jmp_buf_openbsd;
510 # include <sys/syscall.h>
512 extern sigset_t __syscall(quad_t, ...);
516 /* Don't use GC_find_limit() because siglongjmp() outside of the */
517 /* signal handler by-passes our userland pthreads lib, leaving */
518 /* SIGSEGV and SIGPROF masked. Instead, use this custom one that */
519 /* works-around the issues. */
521 STATIC void GC_fault_handler_openbsd(int sig GC_ATTR_UNUSED)
523 LONGJMP(GC_jmp_buf_openbsd, 1);
526 /* Return the first non-addressable location > p or bound. */
527 /* Requires the allocation lock. */
528 STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound)
530 static volatile ptr_t result;
531 /* Safer if static, since otherwise it may not be */
532 /* preserved across the longjmp. Can safely be */
533 /* static since it's only called with the */
534 /* allocation lock held. */
536 struct sigaction act;
537 word pgsz = (word)sysconf(_SC_PAGESIZE);
539 GC_ASSERT((word)bound >= pgsz);
540 GC_ASSERT(I_HOLD_LOCK());
542 act.sa_handler = GC_fault_handler_openbsd;
543 sigemptyset(&act.sa_mask);
544 act.sa_flags = SA_NODEFER | SA_RESTART;
545 /* act.sa_restorer is deprecated and should not be initialized. */
546 sigaction(SIGSEGV, &act, &old_segv_act);
548 if (SETJMP(GC_jmp_buf_openbsd) == 0) {
549 result = (ptr_t)((word)p & ~(pgsz-1));
551 if ((word)result >= (word)bound - pgsz) {
555 result += pgsz; /* no overflow expected */
556 GC_noop1((word)(*result));
561 /* Due to the siglongjump we need to manually unmask SIGPROF. */
562 __syscall(SYS_sigprocmask, SIG_UNBLOCK, sigmask(SIGPROF));
565 sigaction(SIGSEGV, &old_segv_act, 0);
569 /* Return first addressable location > p or bound. */
570 /* Requires the allocation lock. */
571 STATIC ptr_t GC_skip_hole_openbsd(ptr_t p, ptr_t bound)
573 static volatile ptr_t result;
574 static volatile int firstpass;
576 struct sigaction act;
577 word pgsz = (word)sysconf(_SC_PAGESIZE);
579 GC_ASSERT((word)bound >= pgsz);
580 GC_ASSERT(I_HOLD_LOCK());
582 act.sa_handler = GC_fault_handler_openbsd;
583 sigemptyset(&act.sa_mask);
584 act.sa_flags = SA_NODEFER | SA_RESTART;
585 /* act.sa_restorer is deprecated and should not be initialized. */
586 sigaction(SIGSEGV, &act, &old_segv_act);
589 result = (ptr_t)((word)p & ~(pgsz-1));
590 if (SETJMP(GC_jmp_buf_openbsd) != 0 || firstpass) {
592 if ((word)result >= (word)bound - pgsz) {
595 result += pgsz; /* no overflow expected */
596 GC_noop1((word)(*result));
600 sigaction(SIGSEGV, &old_segv_act, 0);
609 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
612 unsigned short magic_number;
613 unsigned short padding[29];
617 #define E_MAGIC(x) (x).magic_number
618 #define EMAGIC 0x5A4D
619 #define E_LFANEW(x) (x).new_exe_offset
622 unsigned char magic_number[2];
623 unsigned char byte_order;
624 unsigned char word_order;
625 unsigned long exe_format_level;
628 unsigned long padding1[13];
629 unsigned long object_table_offset;
630 unsigned long object_count;
631 unsigned long padding2[31];
634 #define E32_MAGIC1(x) (x).magic_number[0]
635 #define E32MAGIC1 'L'
636 #define E32_MAGIC2(x) (x).magic_number[1]
637 #define E32MAGIC2 'X'
638 #define E32_BORDER(x) (x).byte_order
640 #define E32_WORDER(x) (x).word_order
642 #define E32_CPU(x) (x).cpu
644 #define E32_OBJTAB(x) (x).object_table_offset
645 #define E32_OBJCNT(x) (x).object_count
651 unsigned long pagemap;
652 unsigned long mapsize;
653 unsigned long reserved;
656 #define O32_FLAGS(x) (x).flags
657 #define OBJREAD 0x0001L
658 #define OBJWRITE 0x0002L
659 #define OBJINVALID 0x0080L
660 #define O32_SIZE(x) (x).size
661 #define O32_BASE(x) (x).base
663 # else /* IBM's compiler */
665 /* A kludge to get around what appears to be a header file bug */
667 # define WORD unsigned short
670 # define DWORD unsigned long
677 # endif /* __IBMC__ */
679 # define INCL_DOSEXCEPTIONS
680 # define INCL_DOSPROCESS
681 # define INCL_DOSERRORS
682 # define INCL_DOSMODULEMGR
683 # define INCL_DOSMEMMGR
688 /* Find the page size */
689 GC_INNER size_t GC_page_size = 0;
691 #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
692 # ifndef VER_PLATFORM_WIN32_CE
693 # define VER_PLATFORM_WIN32_CE 3
696 # if defined(MSWINCE) && defined(THREADS)
697 GC_INNER GC_bool GC_dont_query_stack_min = FALSE;
700 GC_INNER SYSTEM_INFO GC_sysinfo;
702 GC_INNER void GC_setpagesize(void)
704 GetSystemInfo(&GC_sysinfo);
705 # if defined(CYGWIN32) && (defined(MPROTECT_VDB) || defined(USE_MUNMAP))
706 /* Allocations made with mmap() are aligned to the allocation */
707 /* granularity, which (at least on 64-bit Windows OS) is not the */
708 /* same as the page size. Probably a separate variable could */
709 /* be added to distinguish the allocation granularity from the */
710 /* actual page size, but in practice there is no good reason to */
711 /* make allocations smaller than dwAllocationGranularity, so we */
712 /* just use it instead of the actual page size here (as Cygwin */
713 /* itself does in many cases). */
714 GC_page_size = (size_t)GC_sysinfo.dwAllocationGranularity;
715 GC_ASSERT(GC_page_size >= (size_t)GC_sysinfo.dwPageSize);
717 GC_page_size = (size_t)GC_sysinfo.dwPageSize;
719 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
721 OSVERSIONINFO verInfo;
722 /* Check the current WinCE version. */
723 verInfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
724 if (!GetVersionEx(&verInfo))
725 ABORT("GetVersionEx failed");
726 if (verInfo.dwPlatformId == VER_PLATFORM_WIN32_CE &&
727 verInfo.dwMajorVersion < 6) {
728 /* Only the first 32 MB of address space belongs to the */
729 /* current process (unless WinCE 6.0+ or emulation). */
730 GC_sysinfo.lpMaximumApplicationAddress = (LPVOID)((word)32 << 20);
732 /* On some old WinCE versions, it's observed that */
733 /* VirtualQuery calls don't work properly when used to */
734 /* get thread current stack committed minimum. */
735 if (verInfo.dwMajorVersion < 5)
736 GC_dont_query_stack_min = TRUE;
744 # define is_writable(prot) ((prot) == PAGE_READWRITE \
745 || (prot) == PAGE_WRITECOPY \
746 || (prot) == PAGE_EXECUTE_READWRITE \
747 || (prot) == PAGE_EXECUTE_WRITECOPY)
748 /* Return the number of bytes that are writable starting at p. */
749 /* The pointer p is assumed to be page aligned. */
750 /* If base is not 0, *base becomes the beginning of the */
751 /* allocation region containing p. */
752 STATIC word GC_get_writable_length(ptr_t p, ptr_t *base)
754 MEMORY_BASIC_INFORMATION buf;
758 result = VirtualQuery(p, &buf, sizeof(buf));
759 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
760 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
761 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
762 if (!is_writable(protect)) {
765 if (buf.State != MEM_COMMIT) return(0);
766 return(buf.RegionSize);
769 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
774 /* Set page size if it is not ready (so client can use this */
775 /* function even before GC is initialized). */
776 if (!GC_page_size) GC_setpagesize();
778 trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1));
779 /* FIXME: This won't work if called from a deeply recursive */
780 /* client code (and the committed stack space has grown). */
781 size = GC_get_writable_length(trunc_sp, 0);
782 GC_ASSERT(size != 0);
783 sb -> mem_base = trunc_sp + size;
786 # else /* CYGWIN32 */
787 /* An alternate version for Cygwin (adapted from Dave Korn's */
788 /* gcc version of boehm-gc). */
789 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
792 sb -> mem_base = ((NT_TIB*)NtCurrentTeb())->StackBase;
796 __asm__ ("movl %%fs:4, %0"
798 sb -> mem_base = _tlsbase;
802 # endif /* CYGWIN32 */
803 # define HAVE_GET_STACK_BASE
806 GC_INNER void GC_setpagesize(void)
808 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
809 GC_page_size = (size_t)GETPAGESIZE();
810 # if !defined(CPPCHECK)
811 if (0 == GC_page_size)
812 ABORT("getpagesize failed");
815 /* It's acceptable to fake it. */
816 GC_page_size = HBLKSIZE;
819 #endif /* !MSWIN32 */
822 # include <kernel/OS.h>
824 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
827 get_thread_info(find_thread(NULL),&th);
828 sb->mem_base = th.stack_end;
831 # define HAVE_GET_STACK_BASE
835 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
837 PTIB ptib; /* thread information block */
839 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
840 WARN("DosGetInfoBlocks failed\n", 0);
841 return GC_UNIMPLEMENTED;
843 sb->mem_base = ptib->tib_pstacklimit;
846 # define HAVE_GET_STACK_BASE
851 # include "extra/AmigaOS.c"
853 # define GET_MAIN_STACKBASE_SPECIAL
856 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
858 typedef void (*GC_fault_handler_t)(int);
860 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
861 || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
863 static struct sigaction old_segv_act;
864 # if defined(_sigargs) /* !Irix6.x */ \
865 || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
866 static struct sigaction old_bus_act;
869 static GC_fault_handler_t old_segv_handler;
871 static GC_fault_handler_t old_bus_handler;
875 GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t h)
877 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
878 || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
880 struct sigaction act;
883 # ifdef SIGACTION_FLAGS_NODEFER_HACK
884 /* Was necessary for Solaris 2.3 and very temporary */
886 act.sa_flags = SA_RESTART | SA_NODEFER;
888 act.sa_flags = SA_RESTART;
891 (void) sigemptyset(&act.sa_mask);
892 /* act.sa_restorer is deprecated and should not be initialized. */
893 # ifdef GC_IRIX_THREADS
894 /* Older versions have a bug related to retrieving and */
895 /* and setting a handler at the same time. */
896 (void) sigaction(SIGSEGV, 0, &old_segv_act);
897 (void) sigaction(SIGSEGV, &act, 0);
899 (void) sigaction(SIGSEGV, &act, &old_segv_act);
900 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
901 || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
902 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
903 /* Pthreads doesn't exist under Irix 5.x, so we */
904 /* don't have to worry in the threads case. */
905 (void) sigaction(SIGBUS, &act, &old_bus_act);
907 # endif /* !GC_IRIX_THREADS */
909 old_segv_handler = signal(SIGSEGV, h);
911 old_bus_handler = signal(SIGBUS, h);
914 # if defined(CPPCHECK) && defined(ADDRESS_SANITIZER)
915 GC_noop1((word)&__asan_default_options);
918 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
920 # if defined(NEED_FIND_LIMIT) \
921 || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))
922 /* Some tools to implement HEURISTIC2 */
923 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
925 GC_INNER JMP_BUF GC_jmp_buf;
927 STATIC void GC_fault_handler(int sig GC_ATTR_UNUSED)
929 LONGJMP(GC_jmp_buf, 1);
932 GC_INNER void GC_setup_temporary_fault_handler(void)
934 /* Handler is process-wide, so this should only happen in */
935 /* one thread at a time. */
936 GC_ASSERT(I_HOLD_LOCK());
937 GC_set_and_save_fault_handler(GC_fault_handler);
940 GC_INNER void GC_reset_fault_handler(void)
942 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
943 || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
945 (void) sigaction(SIGSEGV, &old_segv_act, 0);
946 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
947 || defined(HURD) || defined(NETBSD)
948 (void) sigaction(SIGBUS, &old_bus_act, 0);
951 (void) signal(SIGSEGV, old_segv_handler);
953 (void) signal(SIGBUS, old_bus_handler);
958 /* Return the first non-addressable location > p (up) or */
959 /* the smallest location q s.t. [q,p) is addressable (!up). */
960 /* We assume that p (up) or p-1 (!up) is addressable. */
961 /* Requires allocation lock. */
962 STATIC ptr_t GC_find_limit_with_bound(ptr_t p, GC_bool up, ptr_t bound)
964 static volatile ptr_t result;
965 /* Safer if static, since otherwise it may not be */
966 /* preserved across the longjmp. Can safely be */
967 /* static since it's only called with the */
968 /* allocation lock held. */
970 GC_ASSERT(up ? (word)bound >= MIN_PAGE_SIZE
971 : (word)bound <= ~(word)MIN_PAGE_SIZE);
972 GC_ASSERT(I_HOLD_LOCK());
973 GC_setup_temporary_fault_handler();
974 if (SETJMP(GC_jmp_buf) == 0) {
975 result = (ptr_t)(((word)(p))
976 & ~(MIN_PAGE_SIZE-1));
979 if ((word)result >= (word)bound - MIN_PAGE_SIZE) {
983 result += MIN_PAGE_SIZE; /* no overflow expected */
985 if ((word)result <= (word)bound + MIN_PAGE_SIZE) {
986 result = bound - MIN_PAGE_SIZE;
987 /* This is to compensate */
988 /* further result increment (we */
989 /* do not modify "up" variable */
990 /* since it might be clobbered */
991 /* by setjmp otherwise). */
994 result -= MIN_PAGE_SIZE; /* no underflow expected */
996 GC_noop1((word)(*result));
999 GC_reset_fault_handler();
1001 result += MIN_PAGE_SIZE;
1006 void * GC_find_limit(void * p, int up)
1008 return GC_find_limit_with_bound((ptr_t)p, (GC_bool)up,
1009 up ? (ptr_t)GC_WORD_MAX : 0);
1011 # endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES */
1013 #ifdef HPUX_STACKBOTTOM
1015 #include <sys/param.h>
1016 #include <sys/pstat.h>
1018 GC_INNER ptr_t GC_get_register_stack_base(void)
1020 struct pst_vm_status vm_status;
1023 while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
1024 if (vm_status.pst_type == PS_RSESTACK) {
1025 return (ptr_t) vm_status.pst_vaddr;
1029 /* old way to get the register stackbottom */
1030 return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
1031 & ~(BACKING_STORE_ALIGNMENT - 1));
1034 #endif /* HPUX_STACK_BOTTOM */
1036 #ifdef LINUX_STACKBOTTOM
1038 # include <sys/types.h>
1039 # include <sys/stat.h>
1041 # define STAT_SKIP 27 /* Number of fields preceding startstack */
1042 /* field in /proc/self/stat */
1044 # ifdef USE_LIBC_PRIVATES
1046 # pragma weak __libc_stack_end
1047 extern ptr_t __libc_stack_end;
1049 # pragma weak __libc_ia64_register_backing_store_base
1050 extern ptr_t __libc_ia64_register_backing_store_base;
1056 GC_INNER ptr_t GC_get_register_stack_base(void)
1060 # ifdef USE_LIBC_PRIVATES
1061 if (0 != &__libc_ia64_register_backing_store_base
1062 && 0 != __libc_ia64_register_backing_store_base) {
1063 /* Glibc 2.2.4 has a bug such that for dynamically linked */
1064 /* executables __libc_ia64_register_backing_store_base is */
1065 /* defined but uninitialized during constructor calls. */
1066 /* Hence we check for both nonzero address and value. */
1067 return __libc_ia64_register_backing_store_base;
1070 result = backing_store_base_from_proc();
1072 result = (ptr_t)GC_find_limit(GC_save_regs_in_stack(), FALSE);
1073 /* This works better than a constant displacement heuristic. */
1079 STATIC ptr_t GC_linux_main_stack_base(void)
1081 /* We read the stack bottom value from /proc/self/stat. We do this */
1082 /* using direct I/O system calls in order to avoid calling malloc */
1083 /* in case REDIRECT_MALLOC is defined. */
1085 /* Also defined in pthread_support.c. */
1086 # define STAT_BUF_SIZE 4096
1087 # define STAT_READ read
1089 /* Should probably call the real read, if read is wrapped. */
1090 char stat_buf[STAT_BUF_SIZE];
1093 int i, buf_offset = 0, len;
1095 /* First try the easy way. This should work for glibc 2.2 */
1096 /* This fails in a prelinked ("prelink" command) executable */
1097 /* since the correct value of __libc_stack_end never */
1098 /* becomes visible to us. The second test works around */
1100 # ifdef USE_LIBC_PRIVATES
1101 if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
1103 /* Some versions of glibc set the address 16 bytes too */
1104 /* low while the initialization code is running. */
1105 if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
1106 return __libc_stack_end + 0x10;
1107 } /* Otherwise it's not safe to add 16 bytes and we fall */
1108 /* back to using /proc. */
1109 # elif defined(SPARC)
1110 /* Older versions of glibc for 64-bit SPARC do not set this */
1111 /* variable correctly, it gets set to either zero or one. */
1112 if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
1113 return __libc_stack_end;
1115 return __libc_stack_end;
1119 f = open("/proc/self/stat", O_RDONLY);
1121 ABORT("Couldn't read /proc/self/stat");
1122 len = STAT_READ(f, stat_buf, STAT_BUF_SIZE);
1125 /* Skip the required number of fields. This number is hopefully */
1126 /* constant across all Linux implementations. */
1127 for (i = 0; i < STAT_SKIP; ++i) {
1128 while (buf_offset < len && isspace(stat_buf[buf_offset++])) {
1131 while (buf_offset < len && !isspace(stat_buf[buf_offset++])) {
1136 while (buf_offset < len && isspace(stat_buf[buf_offset])) {
1139 /* Find the end of the number and cut the buffer there. */
1140 for (i = 0; buf_offset + i < len; i++) {
1141 if (!isdigit(stat_buf[buf_offset + i])) break;
1143 if (buf_offset + i >= len) ABORT("Could not parse /proc/self/stat");
1144 stat_buf[buf_offset + i] = '\0';
1146 result = (word)STRTOULL(&stat_buf[buf_offset], NULL, 10);
1147 if (result < 0x100000 || (result & (sizeof(word) - 1)) != 0)
1148 ABORT("Absurd stack bottom value");
1149 return (ptr_t)result;
1151 #endif /* LINUX_STACKBOTTOM */
1153 #ifdef FREEBSD_STACKBOTTOM
1154 /* This uses an undocumented sysctl call, but at least one expert */
1155 /* believes it will stay. */
1157 # include <unistd.h>
1158 # include <sys/types.h>
1159 # include <sys/sysctl.h>
1161 STATIC ptr_t GC_freebsd_main_stack_base(void)
1163 int nm[2] = {CTL_KERN, KERN_USRSTACK};
1165 size_t len = sizeof(ptr_t);
1166 int r = sysctl(nm, 2, &base, &len, NULL, 0);
1167 if (r) ABORT("Error getting main stack base");
1170 #endif /* FREEBSD_STACKBOTTOM */
1172 #if defined(ECOS) || defined(NOSYS)
1173 ptr_t GC_get_main_stack_base(void)
1177 # define GET_MAIN_STACKBASE_SPECIAL
1178 #elif defined(SYMBIAN)
1180 extern int GC_get_main_symbian_stack_base(void);
1183 ptr_t GC_get_main_stack_base(void)
1185 return (ptr_t)GC_get_main_symbian_stack_base();
1187 # define GET_MAIN_STACKBASE_SPECIAL
1188 #elif !defined(AMIGA) && !defined(HAIKU) && !defined(OS2) \
1189 && !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) \
1190 && !defined(GC_OPENBSD_THREADS) \
1191 && (!defined(GC_SOLARIS_THREADS) || defined(_STRICT_STDC))
1193 # if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
1194 && (defined(THREADS) || defined(USE_GET_STACKBASE_FOR_MAIN))
1195 # include <pthread.h>
1196 # ifdef HAVE_PTHREAD_NP_H
1197 # include <pthread_np.h> /* for pthread_attr_get_np() */
1199 # elif defined(DARWIN) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1200 /* We could use pthread_get_stackaddr_np even in case of a */
1201 /* single-threaded gclib (there is no -lpthread on Darwin). */
1202 # include <pthread.h>
1204 # define STACKBOTTOM (ptr_t)pthread_get_stackaddr_np(pthread_self())
1207 ptr_t GC_get_main_stack_base(void)
1210 # if (defined(HAVE_PTHREAD_ATTR_GET_NP) \
1211 || defined(HAVE_PTHREAD_GETATTR_NP)) \
1212 && (defined(USE_GET_STACKBASE_FOR_MAIN) \
1213 || (defined(THREADS) && !defined(REDIRECT_MALLOC)))
1214 pthread_attr_t attr;
1218 # ifdef HAVE_PTHREAD_ATTR_GET_NP
1219 if (pthread_attr_init(&attr) == 0
1220 && (pthread_attr_get_np(pthread_self(), &attr) == 0
1221 ? TRUE : (pthread_attr_destroy(&attr), FALSE)))
1222 # else /* HAVE_PTHREAD_GETATTR_NP */
1223 if (pthread_getattr_np(pthread_self(), &attr) == 0)
1226 if (pthread_attr_getstack(&attr, &stackaddr, &size) == 0
1227 && stackaddr != NULL) {
1228 (void)pthread_attr_destroy(&attr);
1229 # ifdef STACK_GROWS_DOWN
1230 stackaddr = (char *)stackaddr + size;
1232 return (ptr_t)stackaddr;
1234 (void)pthread_attr_destroy(&attr);
1236 WARN("pthread_getattr_np or pthread_attr_getstack failed"
1237 " for main thread\n", 0);
1240 result = STACKBOTTOM;
1243 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
1244 # ifdef STACK_GROWS_DOWN
1245 result = (ptr_t)(((word)GC_approx_sp() + STACKBOTTOM_ALIGNMENT_M1)
1246 & ~STACKBOTTOM_ALIGNMENT_M1);
1248 result = (ptr_t)((word)GC_approx_sp() & ~STACKBOTTOM_ALIGNMENT_M1);
1250 # elif defined(LINUX_STACKBOTTOM)
1251 result = GC_linux_main_stack_base();
1252 # elif defined(FREEBSD_STACKBOTTOM)
1253 result = GC_freebsd_main_stack_base();
1254 # elif defined(HEURISTIC2)
1256 ptr_t sp = GC_approx_sp();
1257 # ifdef STACK_GROWS_DOWN
1258 result = (ptr_t)GC_find_limit(sp, TRUE);
1259 # if defined(HEURISTIC2_LIMIT) && !defined(CPPCHECK)
1260 if ((word)result > (word)HEURISTIC2_LIMIT
1261 && (word)sp < (word)HEURISTIC2_LIMIT) {
1262 result = HEURISTIC2_LIMIT;
1266 result = (ptr_t)GC_find_limit(sp, FALSE);
1267 # if defined(HEURISTIC2_LIMIT) && !defined(CPPCHECK)
1268 if ((word)result < (word)HEURISTIC2_LIMIT
1269 && (word)sp > (word)HEURISTIC2_LIMIT) {
1270 result = HEURISTIC2_LIMIT;
1275 # elif defined(STACK_NOT_SCANNED) || defined(CPPCHECK)
1278 # error None of HEURISTIC* and *STACKBOTTOM defined!
1280 # if defined(STACK_GROWS_DOWN) && !defined(CPPCHECK)
1282 result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1285 # if !defined(CPPCHECK)
1286 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)result);
1290 # define GET_MAIN_STACKBASE_SPECIAL
1291 #endif /* !AMIGA, !HAIKU, !OPENBSD, !OS2, !Windows */
1293 #if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
1294 && defined(THREADS) && !defined(HAVE_GET_STACK_BASE)
1295 # include <pthread.h>
1296 # ifdef HAVE_PTHREAD_NP_H
1297 # include <pthread_np.h>
1300 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1302 pthread_attr_t attr;
1308 # ifdef HAVE_PTHREAD_ATTR_GET_NP
1309 if (pthread_attr_init(&attr) != 0)
1310 ABORT("pthread_attr_init failed");
1311 if (pthread_attr_get_np(pthread_self(), &attr) != 0) {
1312 WARN("pthread_attr_get_np failed\n", 0);
1313 (void)pthread_attr_destroy(&attr);
1314 return GC_UNIMPLEMENTED;
1316 # else /* HAVE_PTHREAD_GETATTR_NP */
1317 if (pthread_getattr_np(pthread_self(), &attr) != 0) {
1318 WARN("pthread_getattr_np failed\n", 0);
1319 return GC_UNIMPLEMENTED;
1322 if (pthread_attr_getstack(&attr, &(b -> mem_base), &size) != 0) {
1323 ABORT("pthread_attr_getstack failed");
1325 (void)pthread_attr_destroy(&attr);
1326 # ifdef STACK_GROWS_DOWN
1327 b -> mem_base = (char *)(b -> mem_base) + size;
1330 /* We could try backing_store_base_from_proc, but that's safe */
1331 /* only if no mappings are being asynchronously created. */
1332 /* Subtracting the size from the stack base doesn't work for at */
1333 /* least the main thread. */
1336 IF_CANCEL(int cancel_state;)
1340 DISABLE_CANCEL(cancel_state);
1341 bsp = GC_save_regs_in_stack();
1342 next_stack = GC_greatest_stack_base_below(bsp);
1343 if (0 == next_stack) {
1344 b -> reg_base = GC_find_limit(bsp, FALSE);
1346 /* Avoid walking backwards into preceding memory stack and */
1348 b -> reg_base = GC_find_limit_with_bound(bsp, FALSE, next_stack);
1350 RESTORE_CANCEL(cancel_state);
1356 # define HAVE_GET_STACK_BASE
1357 #endif /* THREADS && (HAVE_PTHREAD_ATTR_GET_NP || HAVE_PTHREAD_GETATTR_NP) */
1359 #if defined(GC_DARWIN_THREADS) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1360 # include <pthread.h>
1362 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1364 /* pthread_get_stackaddr_np() should return stack bottom (highest */
1365 /* stack address plus 1). */
1366 b->mem_base = pthread_get_stackaddr_np(pthread_self());
1367 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)b->mem_base);
1370 # define HAVE_GET_STACK_BASE
1371 #endif /* GC_DARWIN_THREADS */
1373 #ifdef GC_OPENBSD_THREADS
1374 # include <sys/signal.h>
1375 # include <pthread.h>
1376 # include <pthread_np.h>
1378 /* Find the stack using pthread_stackseg_np(). */
1379 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1382 if (pthread_stackseg_np(pthread_self(), &stack))
1383 ABORT("pthread_stackseg_np(self) failed");
1384 sb->mem_base = stack.ss_sp;
1387 # define HAVE_GET_STACK_BASE
1388 #endif /* GC_OPENBSD_THREADS */
1390 #if defined(GC_SOLARIS_THREADS) && !defined(_STRICT_STDC)
1392 # include <thread.h>
1393 # include <signal.h>
1394 # include <pthread.h>
1396 /* These variables are used to cache ss_sp value for the primordial */
1397 /* thread (it's better not to call thr_stksegment() twice for this */
1398 /* thread - see JDK bug #4352906). */
1399 static pthread_t stackbase_main_self = 0;
1400 /* 0 means stackbase_main_ss_sp value is unset. */
1401 static void *stackbase_main_ss_sp = NULL;
1403 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1406 pthread_t self = pthread_self();
1408 if (self == stackbase_main_self)
1410 /* If the client calls GC_get_stack_base() from the main thread */
1411 /* then just return the cached value. */
1412 b -> mem_base = stackbase_main_ss_sp;
1413 GC_ASSERT(b -> mem_base != NULL);
1417 if (thr_stksegment(&s)) {
1418 /* According to the manual, the only failure error code returned */
1419 /* is EAGAIN meaning "the information is not available due to the */
1420 /* thread is not yet completely initialized or it is an internal */
1421 /* thread" - this shouldn't happen here. */
1422 ABORT("thr_stksegment failed");
1424 /* s.ss_sp holds the pointer to the stack bottom. */
1425 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)s.ss_sp);
1427 if (!stackbase_main_self && thr_main() != 0)
1429 /* Cache the stack bottom pointer for the primordial thread */
1430 /* (this is done during GC_init, so there is no race). */
1431 stackbase_main_ss_sp = s.ss_sp;
1432 stackbase_main_self = self;
1435 b -> mem_base = s.ss_sp;
1438 # define HAVE_GET_STACK_BASE
1439 #endif /* GC_SOLARIS_THREADS */
1441 #ifdef GC_RTEMS_PTHREADS
1442 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1444 sb->mem_base = rtems_get_stack_bottom();
1447 # define HAVE_GET_STACK_BASE
1448 #endif /* GC_RTEMS_PTHREADS */
1450 #ifndef HAVE_GET_STACK_BASE
1451 # ifdef NEED_FIND_LIMIT
1452 /* Retrieve the stack bottom. */
1453 /* Using the GC_find_limit version is risky. */
1454 /* On IA64, for example, there is no guard page between the */
1455 /* stack of one thread and the register backing store of the */
1456 /* next. Thus this is likely to identify way too large a */
1457 /* "stack" and thus at least result in disastrous performance. */
1458 /* TODO: Implement better strategies here. */
1459 GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1461 IF_CANCEL(int cancel_state;)
1465 DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
1466 # ifdef STACK_GROWS_DOWN
1467 b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
1469 b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
1472 b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
1474 RESTORE_CANCEL(cancel_state);
1479 GC_API int GC_CALL GC_get_stack_base(
1480 struct GC_stack_base *b GC_ATTR_UNUSED)
1482 # if defined(GET_MAIN_STACKBASE_SPECIAL) && !defined(THREADS) \
1484 b->mem_base = GC_get_main_stack_base();
1487 return GC_UNIMPLEMENTED;
1490 # endif /* !NEED_FIND_LIMIT */
1491 #endif /* !HAVE_GET_STACK_BASE */
1493 #ifndef GET_MAIN_STACKBASE_SPECIAL
1494 /* This is always called from the main thread. Default implementation. */
1495 ptr_t GC_get_main_stack_base(void)
1497 struct GC_stack_base sb;
1499 if (GC_get_stack_base(&sb) != GC_SUCCESS)
1500 ABORT("GC_get_stack_base failed");
1501 GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)sb.mem_base);
1502 return (ptr_t)sb.mem_base;
1504 #endif /* !GET_MAIN_STACKBASE_SPECIAL */
1506 /* Register static data segment(s) as roots. If more data segments are */
1507 /* added later then they need to be registered at that point (as we do */
1508 /* with SunOS dynamic loading), or GC_mark_roots needs to check for */
1509 /* them (as we do with PCR). Called with allocator lock held. */
1512 void GC_register_data_segments(void)
1516 HMODULE module_handle;
1517 # define PBUFSIZ 512
1518 UCHAR path[PBUFSIZ];
1520 struct exe_hdr hdrdos; /* MSDOS header. */
1521 struct e32_exe hdr386; /* Real header for my executable */
1522 struct o32_obj seg; /* Current segment */
1525 # if defined(CPPCHECK)
1526 hdrdos.padding[0] = 0; /* to prevent "field unused" warnings */
1527 hdr386.exe_format_level = 0;
1529 hdr386.padding1[0] = 0;
1530 hdr386.padding2[0] = 0;
1535 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1536 ABORT("DosGetInfoBlocks failed");
1538 module_handle = ppib -> pib_hmte;
1539 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1540 ABORT("DosQueryModuleName failed");
1542 myexefile = fopen(path, "rb");
1543 if (myexefile == 0) {
1544 ABORT_ARG1("Failed to open executable", ": %s", path);
1546 if (fread((char *)(&hdrdos), 1, sizeof(hdrdos), myexefile)
1548 ABORT_ARG1("Could not read MSDOS header", " from: %s", path);
1550 if (E_MAGIC(hdrdos) != EMAGIC) {
1551 ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
1553 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1554 ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
1556 if (fread((char *)(&hdr386), 1, sizeof(hdr386), myexefile)
1558 ABORT_ARG1("Could not read OS/2 header", " from: %s", path);
1560 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1561 ABORT_ARG1("Bad OS/2 magic number", " in file: %s", path);
1563 if (E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1564 ABORT_ARG1("Bad byte order in executable", " file: %s", path);
1566 if (E32_CPU(hdr386) == E32CPU286) {
1567 ABORT_ARG1("GC cannot handle 80286 executables", ": %s", path);
1569 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1571 ABORT_ARG1("Seek to object table failed", " in file: %s", path);
1573 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1575 if (fread((char *)(&seg), 1, sizeof(seg), myexefile) < sizeof(seg)) {
1576 ABORT_ARG1("Could not read obj table entry", " from file: %s", path);
1578 flags = O32_FLAGS(seg);
1579 if (!(flags & OBJWRITE)) continue;
1580 if (!(flags & OBJREAD)) continue;
1581 if (flags & OBJINVALID) {
1582 GC_err_printf("Object with invalid pages?\n");
1585 GC_add_roots_inner((ptr_t)O32_BASE(seg),
1586 (ptr_t)(O32_BASE(seg)+O32_SIZE(seg)), FALSE);
1588 (void)fclose(myexefile);
1593 # if defined(GWW_VDB)
1594 # ifndef MEM_WRITE_WATCH
1595 # define MEM_WRITE_WATCH 0x200000
1597 # ifndef WRITE_WATCH_FLAG_RESET
1598 # define WRITE_WATCH_FLAG_RESET 1
1601 /* Since we can't easily check whether ULONG_PTR and SIZE_T are */
1602 /* defined in Win32 basetsd.h, we define own ULONG_PTR. */
1603 # define GC_ULONG_PTR word
1605 typedef UINT (WINAPI * GetWriteWatch_type)(
1606 DWORD, PVOID, GC_ULONG_PTR /* SIZE_T */,
1607 PVOID *, GC_ULONG_PTR *, PULONG);
1608 static GetWriteWatch_type GetWriteWatch_func;
1609 static DWORD GetWriteWatch_alloc_flag;
1611 # define GC_GWW_AVAILABLE() (GetWriteWatch_func != NULL)
1613 static void detect_GetWriteWatch(void)
1615 static GC_bool done;
1620 # if defined(MPROTECT_VDB)
1622 char * str = GETENV("GC_USE_GETWRITEWATCH");
1623 # if defined(GC_PREFER_MPROTECT_VDB)
1624 if (str == NULL || (*str == '0' && *(str + 1) == '\0')) {
1625 /* GC_USE_GETWRITEWATCH is unset or set to "0". */
1626 done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1627 /* This should work as if GWW_VDB is undefined. */
1631 if (str != NULL && *str == '0' && *(str + 1) == '\0') {
1632 /* GC_USE_GETWRITEWATCH is set "0". */
1633 done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1640 # ifdef MSWINRT_FLAVOR
1642 MEMORY_BASIC_INFORMATION memInfo;
1643 SIZE_T result = VirtualQuery(GetProcAddress,
1644 &memInfo, sizeof(memInfo));
1645 if (result != sizeof(memInfo))
1646 ABORT("Weird VirtualQuery result");
1647 hK32 = (HMODULE)memInfo.AllocationBase;
1650 hK32 = GetModuleHandle(TEXT("kernel32.dll"));
1652 if (hK32 != (HMODULE)0 &&
1653 (GetWriteWatch_func = (GetWriteWatch_type)GetProcAddress(hK32,
1654 "GetWriteWatch")) != NULL) {
1655 /* Also check whether VirtualAlloc accepts MEM_WRITE_WATCH, */
1656 /* as some versions of kernel32.dll have one but not the */
1657 /* other, making the feature completely broken. */
1658 void * page = VirtualAlloc(NULL, GC_page_size,
1659 MEM_WRITE_WATCH | MEM_RESERVE,
1663 GC_ULONG_PTR count = 16;
1665 /* Check that it actually works. In spite of some */
1666 /* documentation it actually seems to exist on Win2K. */
1667 /* This test may be unnecessary, but ... */
1668 if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
1673 /* GetWriteWatch always fails. */
1674 GetWriteWatch_func = NULL;
1676 GetWriteWatch_alloc_flag = MEM_WRITE_WATCH;
1678 VirtualFree(page, 0 /* dwSize */, MEM_RELEASE);
1680 /* GetWriteWatch will be useless. */
1681 GetWriteWatch_func = NULL;
1684 # ifndef SMALL_CONFIG
1685 if (GetWriteWatch_func == NULL) {
1686 GC_COND_LOG_PRINTF("Did not find a usable GetWriteWatch()\n");
1688 GC_COND_LOG_PRINTF("Using GetWriteWatch()\n");
1695 # define GetWriteWatch_alloc_flag 0
1696 # endif /* !GWW_VDB */
1698 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1701 /* Unfortunately, we have to handle win32s very differently from NT, */
1702 /* Since VirtualQuery has very different semantics. In particular, */
1703 /* under win32s a VirtualQuery call on an unmapped page returns an */
1704 /* invalid result. Under NT, GC_register_data_segments is a no-op */
1705 /* and all real work is done by GC_register_dynamic_libraries. Under */
1706 /* win32s, we cannot find the data segments associated with dll's. */
1707 /* We register the main data segment here. */
1708 GC_INNER GC_bool GC_no_win32_dlls = FALSE;
1709 /* This used to be set for gcc, to avoid dealing with */
1710 /* the structured exception handling issues. But we now have */
1711 /* assembly code to do that right. */
1713 GC_INNER GC_bool GC_wnt = FALSE;
1714 /* This is a Windows NT derivative, i.e. NT, Win2K, XP or later. */
1716 GC_INNER void GC_init_win32(void)
1718 # if defined(_WIN64) || (defined(_MSC_VER) && _MSC_VER >= 1800)
1719 /* MS Visual Studio 2013 deprecates GetVersion, but on the other */
1720 /* hand it cannot be used to target pre-Win2K. */
1723 /* Set GC_wnt. If we're running under win32s, assume that no */
1724 /* DLLs will be loaded. I doubt anyone still runs win32s, but... */
1725 DWORD v = GetVersion();
1727 GC_wnt = !(v & 0x80000000);
1728 GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
1731 if (GC_no_win32_dlls) {
1732 /* Turn off unmapping for safety (since may not work well with */
1734 GC_unmap_threshold = 0;
1739 /* Return the smallest address a such that VirtualQuery */
1740 /* returns correct results for all addresses between a and start. */
1741 /* Assumes VirtualQuery returns correct information for start. */
1742 STATIC ptr_t GC_least_described_address(ptr_t start)
1744 MEMORY_BASIC_INFORMATION buf;
1748 limit = GC_sysinfo.lpMinimumApplicationAddress;
1749 p = (ptr_t)((word)start & ~(GC_page_size - 1));
1752 LPVOID q = (LPVOID)(p - GC_page_size);
1754 if ((word)q > (word)p /* underflow */ || (word)q < (word)limit) break;
1755 result = VirtualQuery(q, &buf, sizeof(buf));
1756 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1757 p = (ptr_t)(buf.AllocationBase);
1761 # endif /* MSWIN32 */
1763 # ifndef REDIRECT_MALLOC
1764 /* We maintain a linked list of AllocationBase values that we know */
1765 /* correspond to malloc heap sections. Currently this is only called */
1766 /* during a GC. But there is some hope that for long running */
1767 /* programs we will eventually see most heap sections. */
1769 /* In the long run, it would be more reliable to occasionally walk */
1770 /* the malloc heap with HeapWalk on the default heap. But that */
1771 /* apparently works only for NT-based Windows. */
1773 STATIC size_t GC_max_root_size = 100000; /* Appr. largest root size. */
1775 # ifdef USE_WINALLOC
1776 /* In the long run, a better data structure would also be nice ... */
1777 STATIC struct GC_malloc_heap_list {
1778 void * allocation_base;
1779 struct GC_malloc_heap_list *next;
1780 } *GC_malloc_heap_l = 0;
1782 /* Is p the base of one of the malloc heap sections we already know */
1784 STATIC GC_bool GC_is_malloc_heap_base(void *p)
1786 struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1789 if (q -> allocation_base == p) return TRUE;
1795 STATIC void *GC_get_allocation_base(void *p)
1797 MEMORY_BASIC_INFORMATION buf;
1798 size_t result = VirtualQuery(p, &buf, sizeof(buf));
1799 if (result != sizeof(buf)) {
1800 ABORT("Weird VirtualQuery result");
1802 return buf.AllocationBase;
1805 GC_INNER void GC_add_current_malloc_heap(void)
1807 struct GC_malloc_heap_list *new_l = (struct GC_malloc_heap_list *)
1808 malloc(sizeof(struct GC_malloc_heap_list));
1811 if (NULL == new_l) return;
1812 candidate = GC_get_allocation_base(new_l);
1813 if (GC_is_malloc_heap_base(candidate)) {
1814 /* Try a little harder to find malloc heap. */
1815 size_t req_size = 10000;
1817 void *p = malloc(req_size);
1822 candidate = GC_get_allocation_base(p);
1825 } while (GC_is_malloc_heap_base(candidate)
1826 && req_size < GC_max_root_size/10 && req_size < 500000);
1827 if (GC_is_malloc_heap_base(candidate)) {
1832 GC_COND_LOG_PRINTF("Found new system malloc AllocationBase at %p\n",
1834 new_l -> allocation_base = candidate;
1835 new_l -> next = GC_malloc_heap_l;
1836 GC_malloc_heap_l = new_l;
1838 # endif /* USE_WINALLOC */
1840 # endif /* !REDIRECT_MALLOC */
1842 STATIC word GC_n_heap_bases = 0; /* See GC_heap_bases. */
1844 /* Is p the start of either the malloc heap, or of one of our */
1845 /* heap sections? */
1846 GC_INNER GC_bool GC_is_heap_base(void *p)
1849 # ifndef REDIRECT_MALLOC
1850 if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1851 # ifdef USE_WINALLOC
1852 if (GC_is_malloc_heap_base(p)) return TRUE;
1855 for (i = 0; i < (int)GC_n_heap_bases; i++) {
1856 if (GC_heap_bases[i] == p) return TRUE;
1862 STATIC void GC_register_root_section(ptr_t static_root)
1864 MEMORY_BASIC_INFORMATION buf;
1869 if (!GC_no_win32_dlls) return;
1870 p = base = limit = GC_least_described_address(static_root);
1871 while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
1872 size_t result = VirtualQuery(p, &buf, sizeof(buf));
1876 if (result != sizeof(buf) || buf.AllocationBase == 0
1877 || GC_is_heap_base(buf.AllocationBase)) break;
1878 new_limit = (char *)p + buf.RegionSize;
1879 protect = buf.Protect;
1880 if (buf.State == MEM_COMMIT
1881 && is_writable(protect)) {
1882 if ((char *)p == limit) {
1885 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1890 if ((word)p > (word)new_limit /* overflow */) break;
1891 p = (LPVOID)new_limit;
1893 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1895 #endif /* MSWIN32 */
1897 void GC_register_data_segments(void)
1900 GC_register_root_section((ptr_t)&GC_pages_executable);
1901 /* any other GC global variable would fit too. */
1905 # else /* !OS2 && !Windows */
1907 # if (defined(SVR4) || defined(AIX) || defined(DGUX) \
1908 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1909 ptr_t GC_SysVGetDataStart(size_t max_page_size, ptr_t etext_addr)
1911 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1912 & ~(word)(sizeof(word) - 1);
1913 /* etext rounded to word boundary */
1914 word next_page = ((text_end + (word)max_page_size - 1)
1915 & ~((word)max_page_size - 1));
1916 word page_offset = (text_end & ((word)max_page_size - 1));
1917 volatile ptr_t result = (char *)(next_page + page_offset);
1918 /* Note that this isn't equivalent to just adding */
1919 /* max_page_size to &etext if &etext is at a page boundary */
1921 GC_setup_temporary_fault_handler();
1922 if (SETJMP(GC_jmp_buf) == 0) {
1923 /* Try writing to the address. */
1924 # ifdef AO_HAVE_fetch_and_add
1925 volatile AO_t zero = 0;
1926 (void)AO_fetch_and_add((volatile AO_t *)result, zero);
1928 /* Fallback to non-atomic fetch-and-store. */
1930 # if defined(CPPCHECK)
1935 GC_reset_fault_handler();
1937 GC_reset_fault_handler();
1938 /* We got here via a longjmp. The address is not readable. */
1939 /* This is known to happen under Solaris 2.4 + gcc, which place */
1940 /* string constants in the text segment, but after etext. */
1941 /* Use plan B. Note that we now know there is a gap between */
1942 /* text and data segments, so plan A brought us something. */
1943 result = (char *)GC_find_limit(DATAEND, FALSE);
1945 return (/* no volatile */ ptr_t)result;
1949 #ifdef DATASTART_USES_BSDGETDATASTART
1950 /* Its unclear whether this should be identical to the above, or */
1951 /* whether it should apply to non-X86 architectures. */
1952 /* For now we don't assume that there is always an empty page after */
1953 /* etext. But in some cases there actually seems to be slightly more. */
1954 /* This also deals with holes between read-only data and writable data. */
1955 GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t max_page_size,
1958 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1959 & ~(word)(sizeof(word) - 1);
1960 /* etext rounded to word boundary */
1961 volatile word next_page = (text_end + (word)max_page_size - 1)
1962 & ~((word)max_page_size - 1);
1963 volatile ptr_t result = (ptr_t)text_end;
1964 GC_setup_temporary_fault_handler();
1965 if (SETJMP(GC_jmp_buf) == 0) {
1966 /* Try reading at the address. */
1967 /* This should happen before there is another thread. */
1968 for (; next_page < (word)DATAEND; next_page += (word)max_page_size)
1969 *(volatile char *)next_page;
1970 GC_reset_fault_handler();
1972 GC_reset_fault_handler();
1973 /* As above, we go to plan B */
1974 result = (ptr_t)GC_find_limit(DATAEND, FALSE);
1978 #endif /* DATASTART_USES_BSDGETDATASTART */
1982 # define GC_AMIGA_DS
1983 # include "extra/AmigaOS.c"
1986 #elif defined(OPENBSD)
1988 /* Depending on arch alignment, there can be multiple holes */
1989 /* between DATASTART and DATAEND. Scan in DATASTART .. DATAEND */
1990 /* and register each region. */
1991 void GC_register_data_segments(void)
1993 ptr_t region_start = DATASTART;
1995 if ((word)region_start - 1U >= (word)DATAEND)
1996 ABORT_ARG2("Wrong DATASTART/END pair",
1997 ": %p .. %p", (void *)region_start, (void *)DATAEND);
1999 ptr_t region_end = GC_find_limit_openbsd(region_start, DATAEND);
2001 GC_add_roots_inner(region_start, region_end, FALSE);
2002 if ((word)region_end >= (word)DATAEND)
2004 region_start = GC_skip_hole_openbsd(region_end, DATAEND);
2008 # else /* !OS2 && !Windows && !AMIGA && !OPENBSD */
2010 # if !defined(PCR) && !defined(MACOS) && defined(REDIRECT_MALLOC) \
2011 && defined(GC_SOLARIS_THREADS)
2013 extern caddr_t sbrk(int);
2017 void GC_register_data_segments(void)
2019 # if !defined(PCR) && !defined(MACOS)
2020 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
2021 /* As of Solaris 2.3, the Solaris threads implementation */
2022 /* allocates the data structure for the initial thread with */
2023 /* sbrk at process startup. It needs to be scanned, so that */
2024 /* we don't lose some malloc allocated data structures */
2025 /* hanging from it. We're on thin ice here ... */
2026 GC_ASSERT(DATASTART);
2028 ptr_t p = (ptr_t)sbrk(0);
2029 if ((word)DATASTART < (word)p)
2030 GC_add_roots_inner(DATASTART, p, FALSE);
2033 if ((word)DATASTART - 1U >= (word)DATAEND) {
2034 /* Subtract one to check also for NULL */
2035 /* without a compiler warning. */
2036 ABORT_ARG2("Wrong DATASTART/END pair",
2037 ": %p .. %p", (void *)DATASTART, (void *)DATAEND);
2039 GC_add_roots_inner(DATASTART, DATAEND, FALSE);
2040 # ifdef GC_HAVE_DATAREGION2
2041 if ((word)DATASTART2 - 1U >= (word)DATAEND2)
2042 ABORT_ARG2("Wrong DATASTART/END2 pair",
2043 ": %p .. %p", (void *)DATASTART2, (void *)DATAEND2);
2044 GC_add_roots_inner(DATASTART2, DATAEND2, FALSE);
2050 # if defined(THINK_C)
2051 extern void* GC_MacGetDataStart(void);
2052 /* globals begin above stack and end at a5. */
2053 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
2054 (ptr_t)LMGetCurrentA5(), FALSE);
2056 # if defined(__MWERKS__)
2058 extern void* GC_MacGetDataStart(void);
2059 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
2060 # if __option(far_data)
2061 extern void* GC_MacGetDataEnd(void);
2063 /* globals begin above stack and end at a5. */
2064 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
2065 (ptr_t)LMGetCurrentA5(), FALSE);
2066 /* MATTHEW: Handle Far Globals */
2067 # if __option(far_data)
2068 /* Far globals follow he QD globals: */
2069 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
2070 (ptr_t)GC_MacGetDataEnd(), FALSE);
2073 extern char __data_start__[], __data_end__[];
2074 GC_add_roots_inner((ptr_t)&__data_start__,
2075 (ptr_t)&__data_end__, FALSE);
2076 # endif /* __POWERPC__ */
2077 # endif /* __MWERKS__ */
2078 # endif /* !THINK_C */
2082 /* Dynamic libraries are added at every collection, since they may */
2086 # endif /* !AMIGA */
2087 # endif /* !MSWIN32 && !MSWINCE */
2091 * Auxiliary routines for obtaining memory from OS.
2094 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
2095 && !defined(USE_WINALLOC) && !defined(MACOS) && !defined(DOS4GW) \
2096 && !defined(NINTENDO_SWITCH) && !defined(NONSTOP) \
2097 && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PS3) \
2098 && !defined(SN_TARGET_PSP2) && !defined(RTEMS) && !defined(__CC_ARM)
2100 # define SBRK_ARG_T ptrdiff_t
2102 #if defined(MMAP_SUPPORTED)
2104 #ifdef USE_MMAP_FIXED
2105 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
2106 /* Seems to yield better performance on Solaris 2, but can */
2107 /* be unreliable if something is already mapped at the address. */
2109 # define GC_MMAP_FLAGS MAP_PRIVATE
2112 #ifdef USE_MMAP_ANON
2114 # if defined(MAP_ANONYMOUS) && !defined(CPPCHECK)
2115 # define OPT_MAP_ANON MAP_ANONYMOUS
2117 # define OPT_MAP_ANON MAP_ANON
2120 static int zero_fd = -1;
2121 # define OPT_MAP_ANON 0
2124 # ifndef MSWIN_XBOX1
2125 # if defined(SYMBIAN) && !defined(USE_MMAP_ANON)
2127 extern char *GC_get_private_path_and_zero_file(void);
2131 STATIC ptr_t GC_unix_mmap_get_mem(size_t bytes)
2134 static ptr_t last_addr = HEAP_START;
2136 # ifndef USE_MMAP_ANON
2137 static GC_bool initialized = FALSE;
2139 if (!EXPECT(initialized, TRUE)) {
2141 char *path = GC_get_private_path_and_zero_file();
2143 zero_fd = open(path, O_RDWR | O_CREAT, 0644);
2147 zero_fd = open("/dev/zero", O_RDONLY);
2150 ABORT("Could not open /dev/zero");
2151 if (fcntl(zero_fd, F_SETFD, FD_CLOEXEC) == -1)
2152 WARN("Could not set FD_CLOEXEC for /dev/zero\n", 0);
2158 if (bytes & (GC_page_size - 1)) ABORT("Bad GET_MEM arg");
2159 result = mmap(last_addr, bytes, (PROT_READ | PROT_WRITE)
2160 | (GC_pages_executable ? PROT_EXEC : 0),
2161 GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
2162 # undef IGNORE_PAGES_EXECUTABLE
2164 if (EXPECT(MAP_FAILED == result, FALSE)) {
2165 if (HEAP_START == last_addr && GC_pages_executable && EACCES == errno)
2166 ABORT("Cannot allocate executable pages");
2169 last_addr = (ptr_t)(((word)result + bytes + GC_page_size - 1)
2170 & ~(GC_page_size - 1));
2171 # if !defined(LINUX)
2172 if (last_addr == 0) {
2173 /* Oops. We got the end of the address space. This isn't */
2174 /* usable by arbitrary C code, since one-past-end pointers */
2175 /* don't work, so we discard it and try again. */
2176 munmap(result, ~GC_page_size - (size_t)result + 1);
2177 /* Leave last page mapped, so we can't repeat. */
2178 return GC_unix_mmap_get_mem(bytes);
2181 GC_ASSERT(last_addr != 0);
2183 if (((word)result % HBLKSIZE) != 0)
2185 "GC_unix_get_mem: Memory returned by mmap is not aligned to HBLKSIZE.");
2186 return((ptr_t)result);
2188 # endif /* !MSWIN_XBOX1 */
2190 #endif /* MMAP_SUPPORTED */
2192 #if defined(USE_MMAP)
2193 ptr_t GC_unix_get_mem(size_t bytes)
2195 return GC_unix_mmap_get_mem(bytes);
2197 #else /* !USE_MMAP */
2199 STATIC ptr_t GC_unix_sbrk_get_mem(size_t bytes)
2203 /* Bare sbrk isn't thread safe. Play by malloc rules. */
2204 /* The equivalent may be needed on other systems as well. */
2208 ptr_t cur_brk = (ptr_t)sbrk(0);
2209 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
2211 if ((SBRK_ARG_T)bytes < 0) {
2212 result = 0; /* too big */
2216 if((ptr_t)sbrk((SBRK_ARG_T)GC_page_size - lsbs) == (ptr_t)(-1)) {
2221 # ifdef ADD_HEAP_GUARD_PAGES
2222 /* This is useful for catching severe memory overwrite problems that */
2223 /* span heap sections. It shouldn't otherwise be turned on. */
2225 ptr_t guard = (ptr_t)sbrk((SBRK_ARG_T)GC_page_size);
2226 if (mprotect(guard, GC_page_size, PROT_NONE) != 0)
2227 ABORT("ADD_HEAP_GUARD_PAGES: mprotect failed");
2229 # endif /* ADD_HEAP_GUARD_PAGES */
2230 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
2231 if (result == (ptr_t)(-1)) result = 0;
2240 ptr_t GC_unix_get_mem(size_t bytes)
2242 # if defined(MMAP_SUPPORTED)
2243 /* By default, we try both sbrk and mmap, in that order. */
2244 static GC_bool sbrk_failed = FALSE;
2247 if (GC_pages_executable) {
2248 /* If the allocated memory should have the execute permission */
2249 /* then sbrk() cannot be used. */
2250 return GC_unix_mmap_get_mem(bytes);
2252 if (!sbrk_failed) result = GC_unix_sbrk_get_mem(bytes);
2255 result = GC_unix_mmap_get_mem(bytes);
2258 /* Try sbrk again, in case sbrk memory became available. */
2259 result = GC_unix_sbrk_get_mem(bytes);
2262 # else /* !MMAP_SUPPORTED */
2263 return GC_unix_sbrk_get_mem(bytes);
2267 #endif /* !USE_MMAP */
2273 void * os2_alloc(size_t bytes)
2277 if (DosAllocMem(&result, bytes, (PAG_READ | PAG_WRITE | PAG_COMMIT)
2278 | (GC_pages_executable ? PAG_EXECUTE : 0))
2282 /* FIXME: What's the purpose of this recursion? (Probably, if */
2283 /* DosAllocMem returns memory at 0 address then just retry once.) */
2284 if (result == 0) return(os2_alloc(bytes));
2291 ptr_t GC_durango_get_mem(size_t bytes)
2293 if (0 == bytes) return NULL;
2294 return (ptr_t)VirtualAlloc(NULL, bytes, MEM_COMMIT | MEM_TOP_DOWN,
2297 #elif defined(MSWINCE)
2298 ptr_t GC_wince_get_mem(size_t bytes)
2300 ptr_t result = 0; /* initialized to prevent warning. */
2303 bytes = ROUNDUP_PAGESIZE(bytes);
2305 /* Try to find reserved, uncommitted pages */
2306 for (i = 0; i < GC_n_heap_bases; i++) {
2307 if (((word)(-(signed_word)GC_heap_lengths[i])
2308 & (GC_sysinfo.dwAllocationGranularity-1))
2310 result = GC_heap_bases[i] + GC_heap_lengths[i];
2315 if (i == GC_n_heap_bases) {
2316 /* Reserve more pages */
2318 SIZET_SAT_ADD(bytes, (size_t)GC_sysinfo.dwAllocationGranularity-1)
2319 & ~((size_t)GC_sysinfo.dwAllocationGranularity-1);
2320 /* If we ever support MPROTECT_VDB here, we will probably need to */
2321 /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
2322 /* never spans regions. It seems to be OK for a VirtualFree */
2323 /* argument to span regions, so we should be OK for now. */
2324 result = (ptr_t) VirtualAlloc(NULL, res_bytes,
2325 MEM_RESERVE | MEM_TOP_DOWN,
2326 GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2328 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2329 /* If I read the documentation correctly, this can */
2330 /* only happen if HBLKSIZE > 64 KB or not a power of 2. */
2331 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2332 if (result == NULL) return NULL;
2333 GC_heap_bases[GC_n_heap_bases] = result;
2334 GC_heap_lengths[GC_n_heap_bases] = 0;
2339 result = (ptr_t) VirtualAlloc(result, bytes, MEM_COMMIT,
2340 GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2342 # undef IGNORE_PAGES_EXECUTABLE
2344 if (result != NULL) {
2345 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2346 GC_heap_lengths[i] += bytes;
2352 #elif (defined(USE_WINALLOC) && !defined(MSWIN_XBOX1)) || defined(CYGWIN32)
2354 # ifdef USE_GLOBAL_ALLOC
2355 # define GLOBAL_ALLOC_TEST 1
2357 # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
2360 # if (defined(GC_USE_MEM_TOP_DOWN) && defined(USE_WINALLOC)) \
2361 || defined(CPPCHECK)
2362 DWORD GC_mem_top_down = MEM_TOP_DOWN;
2363 /* Use GC_USE_MEM_TOP_DOWN for better 64-bit */
2364 /* testing. Otherwise all addresses tend to */
2365 /* end up in first 4 GB, hiding bugs. */
2367 # define GC_mem_top_down 0
2368 # endif /* !GC_USE_MEM_TOP_DOWN */
2370 ptr_t GC_win32_get_mem(size_t bytes)
2374 # ifndef USE_WINALLOC
2375 result = GC_unix_get_mem(bytes);
2377 # if defined(MSWIN32) && !defined(MSWINRT_FLAVOR)
2378 if (GLOBAL_ALLOC_TEST) {
2379 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
2380 /* There are also unconfirmed rumors of other */
2381 /* problems, so we dodge the issue. */
2382 result = (ptr_t)GlobalAlloc(0, SIZET_SAT_ADD(bytes, HBLKSIZE));
2383 /* Align it at HBLKSIZE boundary. */
2384 result = (ptr_t)(((word)result + HBLKSIZE - 1)
2385 & ~(word)(HBLKSIZE - 1));
2389 /* VirtualProtect only works on regions returned by a */
2390 /* single VirtualAlloc call. Thus we allocate one */
2391 /* extra page, which will prevent merging of blocks */
2392 /* in separate regions, and eliminate any temptation */
2393 /* to call VirtualProtect on a range spanning regions. */
2394 /* This wastes a small amount of memory, and risks */
2395 /* increased fragmentation. But better alternatives */
2396 /* would require effort. */
2397 # ifdef MPROTECT_VDB
2398 /* We can't check for GC_incremental here (because */
2399 /* GC_enable_incremental() might be called some time */
2400 /* later after the GC initialization). */
2402 # define VIRTUAL_ALLOC_PAD (GC_GWW_AVAILABLE() ? 0 : 1)
2404 # define VIRTUAL_ALLOC_PAD 1
2407 # define VIRTUAL_ALLOC_PAD 0
2409 /* Pass the MEM_WRITE_WATCH only if GetWriteWatch-based */
2410 /* VDBs are enabled and the GetWriteWatch function is */
2411 /* available. Otherwise we waste resources or possibly */
2412 /* cause VirtualAlloc to fail (observed in Windows 2000 */
2414 result = (ptr_t) VirtualAlloc(NULL,
2415 SIZET_SAT_ADD(bytes, VIRTUAL_ALLOC_PAD),
2416 GetWriteWatch_alloc_flag
2417 | (MEM_COMMIT | MEM_RESERVE)
2419 GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2421 # undef IGNORE_PAGES_EXECUTABLE
2423 # endif /* USE_WINALLOC */
2424 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2425 /* If I read the documentation correctly, this can */
2426 /* only happen if HBLKSIZE > 64 KB or not a power of 2. */
2427 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2428 if (0 != result) GC_heap_bases[GC_n_heap_bases++] = result;
2432 GC_API void GC_CALL GC_win32_free_heap(void)
2434 # ifndef MSWINRT_FLAVOR
2436 if (GLOBAL_ALLOC_TEST)
2439 while (GC_n_heap_bases-- > 0) {
2441 /* FIXME: Is it OK to use non-GC free() here? */
2443 GlobalFree(GC_heap_bases[GC_n_heap_bases]);
2445 GC_heap_bases[GC_n_heap_bases] = 0;
2451 /* Avoiding VirtualAlloc leak. */
2452 while (GC_n_heap_bases > 0) {
2453 VirtualFree(GC_heap_bases[--GC_n_heap_bases], 0, MEM_RELEASE);
2454 GC_heap_bases[GC_n_heap_bases] = 0;
2458 #endif /* USE_WINALLOC || CYGWIN32 */
2461 # define GC_AMIGA_AM
2462 # include "extra/AmigaOS.c"
2467 # include <stdlib.h>
2468 ptr_t GC_haiku_get_mem(size_t bytes)
2472 GC_ASSERT(GC_page_size != 0);
2473 if (posix_memalign(&mem, GC_page_size, bytes) == 0)
2481 /* For now, this only works on Win32/WinCE and some Unix-like */
2482 /* systems. If you have something else, don't define */
2485 #if !defined(NN_PLATFORM_CTR) && !defined(MSWIN32) && !defined(MSWINCE) \
2486 && !defined(MSWIN_XBOX1)
2487 # include <unistd.h>
2488 # ifdef SN_TARGET_PS3
2489 # include <sys/memory.h>
2491 # include <sys/mman.h>
2493 # include <sys/stat.h>
2494 # include <sys/types.h>
2497 /* Compute a page aligned starting address for the unmap */
2498 /* operation on a block of size bytes starting at start. */
2499 /* Return 0 if the block is too small to make this feasible. */
2500 STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes)
2502 ptr_t result = (ptr_t)(((word)start + GC_page_size - 1)
2503 & ~(GC_page_size - 1));
2505 if ((word)(result + GC_page_size) > (word)(start + bytes)) return 0;
2509 /* Compute end address for an unmap operation on the indicated */
2511 STATIC ptr_t GC_unmap_end(ptr_t start, size_t bytes)
2513 return (ptr_t)((word)(start + bytes) & ~(GC_page_size - 1));
2516 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
2517 /* memory using VirtualAlloc and VirtualFree. These functions */
2518 /* work on individual allocations of virtual memory, made */
2519 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
2520 /* The ranges we need to (de)commit may span several of these */
2521 /* allocations; therefore we use VirtualQuery to check */
2522 /* allocation lengths, and split up the range as necessary. */
2524 /* We assume that GC_remap is called on exactly the same range */
2525 /* as a previous call to GC_unmap. It is safe to consistently */
2526 /* round the endpoints in both places. */
2527 GC_INNER void GC_unmap(ptr_t start, size_t bytes)
2529 ptr_t start_addr = GC_unmap_start(start, bytes);
2530 ptr_t end_addr = GC_unmap_end(start, bytes);
2531 word len = end_addr - start_addr;
2533 if (0 == start_addr) return;
2534 # ifdef USE_WINALLOC
2536 MEMORY_BASIC_INFORMATION mem_info;
2539 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2540 != sizeof(mem_info))
2541 ABORT("Weird VirtualQuery result");
2542 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2543 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2544 ABORT("VirtualFree failed");
2545 GC_unmapped_bytes += free_len;
2546 start_addr += free_len;
2549 # elif defined(SN_TARGET_PS3)
2550 ps3_free_mem(start_addr, len);
2552 /* We immediately remap it to prevent an intervening mmap from */
2553 /* accidentally grabbing the same address space. */
2555 # if defined(AIX) || defined(CYGWIN32)
2556 /* On AIX, mmap(PROT_NONE) fails with ENOMEM unless the */
2557 /* environment variable XPG_SUS_ENV is set to ON. */
2558 /* On Cygwin, calling mmap() with the new protection flags on */
2559 /* an existing memory map with MAP_FIXED is broken. */
2560 /* However, calling mprotect() on the given address range */
2561 /* with PROT_NONE seems to work fine. */
2562 if (mprotect(start_addr, len, PROT_NONE))
2563 ABORT("mprotect(PROT_NONE) failed");
2565 void * result = mmap(start_addr, len, PROT_NONE,
2566 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2567 zero_fd, 0/* offset */);
2569 if (result != (void *)start_addr)
2570 ABORT("mmap(PROT_NONE) failed");
2571 # if defined(CPPCHECK) || defined(LINT2)
2572 /* Explicitly store the resource handle to a global variable. */
2573 GC_noop1((word)result);
2575 # endif /* !CYGWIN32 */
2577 GC_unmapped_bytes += len;
2581 GC_INNER void GC_remap(ptr_t start, size_t bytes)
2583 ptr_t start_addr = GC_unmap_start(start, bytes);
2584 ptr_t end_addr = GC_unmap_end(start, bytes);
2585 word len = end_addr - start_addr;
2586 if (0 == start_addr) return;
2588 /* FIXME: Handle out-of-memory correctly (at least for Win32) */
2589 # ifdef USE_WINALLOC
2591 MEMORY_BASIC_INFORMATION mem_info;
2595 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2596 != sizeof(mem_info))
2597 ABORT("Weird VirtualQuery result");
2598 alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2599 result = (ptr_t)VirtualAlloc(start_addr, alloc_len, MEM_COMMIT,
2601 ? PAGE_EXECUTE_READWRITE
2603 if (result != start_addr) {
2604 if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY ||
2605 GetLastError() == ERROR_OUTOFMEMORY) {
2606 ABORT("Not enough memory to process remapping");
2608 ABORT("VirtualAlloc remapping failed");
2612 GC_noop1((word)result);
2614 GC_unmapped_bytes -= alloc_len;
2615 start_addr += alloc_len;
2619 /* It was already remapped with PROT_NONE. */
2621 # if defined(NACL) || defined(NETBSD)
2622 /* NaCl does not expose mprotect, but mmap should work fine. */
2623 /* In case of NetBSD, mprotect fails (unlike mmap) even */
2624 /* without PROT_EXEC if PaX MPROTECT feature is enabled. */
2625 void *result = mmap(start_addr, len, (PROT_READ | PROT_WRITE)
2626 | (GC_pages_executable ? PROT_EXEC : 0),
2627 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2628 zero_fd, 0 /* offset */);
2629 if (result != (void *)start_addr)
2630 ABORT("mmap as mprotect failed");
2631 # if defined(CPPCHECK) || defined(LINT2)
2632 GC_noop1((word)result);
2635 if (mprotect(start_addr, len, (PROT_READ | PROT_WRITE)
2636 | (GC_pages_executable ? PROT_EXEC : 0)) != 0) {
2637 ABORT_ARG3("mprotect remapping failed",
2638 " at %p (length %lu), errcode= %d",
2639 (void *)start_addr, (unsigned long)len, errno);
2643 # undef IGNORE_PAGES_EXECUTABLE
2644 GC_unmapped_bytes -= len;
2648 /* Two adjacent blocks have already been unmapped and are about to */
2649 /* be merged. Unmap the whole block. This typically requires */
2650 /* that we unmap a small section in the middle that was not previously */
2651 /* unmapped due to alignment constraints. */
2652 GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
2655 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
2656 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
2657 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
2658 ptr_t start_addr = end1_addr;
2659 ptr_t end_addr = start2_addr;
2662 GC_ASSERT(start1 + bytes1 == start2);
2663 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
2664 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
2665 if (0 == start_addr) return;
2666 len = end_addr - start_addr;
2667 # ifdef USE_WINALLOC
2669 MEMORY_BASIC_INFORMATION mem_info;
2672 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2673 != sizeof(mem_info))
2674 ABORT("Weird VirtualQuery result");
2675 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2676 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2677 ABORT("VirtualFree failed");
2678 GC_unmapped_bytes += free_len;
2679 start_addr += free_len;
2684 /* Immediately remap as above. */
2685 # if defined(AIX) || defined(CYGWIN32)
2686 if (mprotect(start_addr, len, PROT_NONE))
2687 ABORT("mprotect(PROT_NONE) failed");
2689 void * result = mmap(start_addr, len, PROT_NONE,
2690 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2691 zero_fd, 0/* offset */);
2693 if (result != (void *)start_addr)
2694 ABORT("mmap(PROT_NONE) failed");
2695 # if defined(CPPCHECK) || defined(LINT2)
2696 GC_noop1((word)result);
2698 # endif /* !CYGWIN32 */
2699 GC_unmapped_bytes += len;
2704 #endif /* USE_MUNMAP */
2706 /* Routine for pushing any additional roots. In THREADS */
2707 /* environment, this is also responsible for marking from */
2708 /* thread stacks. */
2710 GC_push_other_roots_proc GC_push_other_roots = 0;
2714 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
2716 struct PCR_ThCtl_TInfoRep info;
2719 info.ti_stkLow = info.ti_stkHi = 0;
2720 result = PCR_ThCtl_GetInfo(t, &info);
2721 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
2725 /* Push the contents of an old object. We treat this as stack */
2726 /* data only because that makes it robust against mark stack */
2728 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
2730 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
2731 return(PCR_ERes_okay);
2734 extern struct PCR_MM_ProcsRep * GC_old_allocator;
2735 /* defined in pcr_interface.c. */
2737 STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2739 /* Traverse data allocated by previous memory managers. */
2740 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
2743 ABORT("Old object enumeration failed");
2745 /* Traverse all thread stacks. */
2747 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
2748 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
2749 ABORT("Thread stack marking failed");
2755 # if defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \
2756 || defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
2757 STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2759 GC_push_all_stacks();
2763 # ifdef SN_TARGET_PS3
2764 STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2766 ABORT("GC_default_push_other_roots is not implemented");
2769 void GC_push_thread_structures(void)
2771 ABORT("GC_push_thread_structures is not implemented");
2773 # endif /* SN_TARGET_PS3 */
2775 GC_push_other_roots_proc GC_push_other_roots = GC_default_push_other_roots;
2776 #endif /* THREADS */
2778 GC_API void GC_CALL GC_set_push_other_roots(GC_push_other_roots_proc fn)
2780 GC_push_other_roots = fn;
2783 GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void)
2785 return GC_push_other_roots;
2789 * Routines for accessing dirty bits on virtual pages.
2790 * There are six ways to maintain this information:
2791 * DEFAULT_VDB: A simple dummy implementation that treats every page
2792 * as possibly dirty. This makes incremental collection
2793 * useless, but the implementation is still correct.
2794 * Manual VDB: Stacks and static data are always considered dirty.
2795 * Heap pages are considered dirty if GC_dirty(p) has been
2796 * called on some pointer p pointing to somewhere inside
2797 * an object on that page. A GC_dirty() call on a large
2798 * object directly dirties only a single page, but for the
2799 * manual VDB we are careful to treat an object with a dirty
2800 * page as completely dirty.
2801 * In order to avoid races, an object must be marked dirty
2802 * after it is written, and a reference to the object
2803 * must be kept on a stack or in a register in the interim.
2804 * With threads enabled, an object directly reachable from the
2805 * stack at the time of a collection is treated as dirty.
2806 * In single-threaded mode, it suffices to ensure that no
2807 * collection can take place between the pointer assignment
2808 * and the GC_dirty() call.
2809 * PCR_VDB: Use PPCRs virtual dirty bit facility.
2810 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2811 * works under some SVR4 variants. Even then, it may be
2812 * too slow to be entirely satisfactory. Requires reading
2813 * dirty bits for entire address space. Implementations tend
2814 * to assume that the client is a (slow) debugger.
2815 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2816 * dirtied pages. The implementation (and implementability)
2817 * is highly system dependent. This usually fails when system
2818 * calls write to a protected page. We prevent the read system
2819 * call from doing so. It is the clients responsibility to
2820 * make sure that other system calls are similarly protected
2821 * or write only to the stack.
2822 * GWW_VDB: Use the Win32 GetWriteWatch functions, if available, to
2823 * read dirty bits. In case it is not available (because we
2824 * are running on Windows 95, Windows 2000 or earlier),
2825 * MPROTECT_VDB may be defined as a fallback strategy.
2828 #if (defined(CHECKSUMS) && defined(GWW_VDB)) || defined(PROC_VDB)
2829 /* Add all pages in pht2 to pht1. */
2830 STATIC void GC_or_pages(page_hash_table pht1, page_hash_table pht2)
2833 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2835 #endif /* CHECKSUMS && GWW_VDB || PROC_VDB */
2839 # define GC_GWW_BUF_LEN (MAXHINCR * HBLKSIZE / 4096 /* X86 page size */)
2840 /* Still susceptible to overflow, if there are very large allocations, */
2841 /* and everything is dirty. */
2842 static PVOID gww_buf[GC_GWW_BUF_LEN];
2844 # ifndef MPROTECT_VDB
2845 # define GC_gww_dirty_init GC_dirty_init
2848 GC_INNER GC_bool GC_gww_dirty_init(void)
2850 detect_GetWriteWatch();
2851 return GC_GWW_AVAILABLE();
2854 GC_INLINE void GC_gww_read_dirty(GC_bool output_unneeded)
2858 if (!output_unneeded)
2859 BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
2861 for (i = 0; i != GC_n_heap_sects; ++i) {
2865 PVOID * pages = gww_buf;
2868 count = GC_GWW_BUF_LEN;
2869 /* GetWriteWatch is documented as returning non-zero when it */
2870 /* fails, but the documentation doesn't explicitly say why it */
2871 /* would fail or what its behavior will be if it fails. It */
2872 /* does appear to fail, at least on recent Win2K instances, if */
2873 /* the underlying memory was not allocated with the appropriate */
2874 /* flag. This is common if GC_enable_incremental is called */
2875 /* shortly after GC initialization. To avoid modifying the */
2876 /* interface, we silently work around such a failure, it only */
2877 /* affects the initial (small) heap allocation. If there are */
2878 /* more dirty pages than will fit in the buffer, this is not */
2879 /* treated as a failure; we must check the page count in the */
2880 /* loop condition. Since each partial call will reset the */
2881 /* status of some pages, this should eventually terminate even */
2882 /* in the overflow case. */
2883 if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
2884 GC_heap_sects[i].hs_start,
2885 GC_heap_sects[i].hs_bytes,
2889 static int warn_count = 0;
2890 struct hblk * start = (struct hblk *)GC_heap_sects[i].hs_start;
2891 static struct hblk *last_warned = 0;
2892 size_t nblocks = divHBLKSZ(GC_heap_sects[i].hs_bytes);
2894 if (i != 0 && last_warned != start && warn_count++ < 5) {
2895 last_warned = start;
2896 WARN("GC_gww_read_dirty unexpectedly failed at %p: "
2897 "Falling back to marking all pages dirty\n", start);
2899 if (!output_unneeded) {
2902 for (j = 0; j < nblocks; ++j) {
2903 word hash = PHT_HASH(start + j);
2904 set_pht_entry_from_index(GC_grungy_pages, hash);
2907 count = 1; /* Done with this section. */
2908 } else /* succeeded */ if (!output_unneeded) {
2909 PVOID * pages_end = pages + count;
2911 while (pages != pages_end) {
2912 struct hblk * h = (struct hblk *) *pages++;
2913 struct hblk * h_end = (struct hblk *) ((char *) h + page_size);
2915 set_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h));
2916 } while ((word)(++h) < (word)h_end);
2919 } while (count == GC_GWW_BUF_LEN);
2920 /* FIXME: It's unclear from Microsoft's documentation if this loop */
2921 /* is useful. We suspect the call just fails if the buffer fills */
2922 /* up. But that should still be handled correctly. */
2926 GC_ASSERT(!output_unneeded);
2927 GC_or_pages(GC_written_pages, GC_grungy_pages);
2931 # define GC_GWW_AVAILABLE() FALSE
2932 #endif /* !GWW_VDB */
2935 /* All of the following assume the allocation lock is held. */
2937 /* The client asserts that unallocated pages in the heap are never */
2940 /* Initialize virtual dirty bit implementation. */
2941 GC_INNER GC_bool GC_dirty_init(void)
2943 GC_VERBOSE_LOG_PRINTF("Initializing DEFAULT_VDB...\n");
2944 /* GC_dirty_pages and GC_grungy_pages are already cleared. */
2947 #endif /* DEFAULT_VDB */
2949 #ifndef GC_DISABLE_INCREMENTAL
2950 # if !defined(THREADS) || defined(HAVE_LOCKFREE_AO_OR)
2951 # define async_set_pht_entry_from_index(db, index) \
2952 set_pht_entry_from_index_concurrent(db, index)
2953 # elif defined(AO_HAVE_test_and_set_acquire)
2954 /* We need to lock around the bitmap update (in the write fault */
2955 /* handler or GC_dirty) in order to avoid the risk of losing a bit. */
2956 /* We do this with a test-and-set spin lock if possible. */
2957 GC_INNER volatile AO_TS_t GC_fault_handler_lock = AO_TS_INITIALIZER;
2959 static void async_set_pht_entry_from_index(volatile page_hash_table db,
2962 GC_acquire_dirty_lock();
2963 set_pht_entry_from_index(db, index);
2964 GC_release_dirty_lock();
2967 # error No test_and_set operation: Introduces a race.
2968 # endif /* THREADS && !AO_HAVE_test_and_set_acquire */
2969 #endif /* !GC_DISABLE_INCREMENTAL */
2973 * This implementation maintains dirty bits itself by catching write
2974 * faults and keeping track of them. We assume nobody else catches
2975 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2976 * This means that clients must ensure that system calls don't write
2977 * to the write-protected heap. Probably the best way to do this is to
2978 * ensure that system calls write at most to pointer-free objects in the
2979 * heap, and do even that only if we are on a platform on which those
2980 * are not protected. Another alternative is to wrap system calls
2981 * (see example for read below), but the current implementation holds
2983 * We assume the page size is a multiple of HBLKSIZE.
2984 * We prefer them to be the same. We avoid protecting pointer-free
2985 * objects only if they are the same.
2988 /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2989 decrease the likelihood of some of the problems described below. */
2990 # include <mach/vm_map.h>
2991 STATIC mach_port_t GC_task_self = 0;
2992 # define PROTECT(addr,len) \
2993 if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
2994 FALSE, VM_PROT_READ \
2995 | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
2996 == KERN_SUCCESS) {} else ABORT("vm_protect(PROTECT) failed")
2997 # define UNPROTECT(addr,len) \
2998 if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
2999 FALSE, (VM_PROT_READ | VM_PROT_WRITE) \
3000 | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
3001 == KERN_SUCCESS) {} else ABORT("vm_protect(UNPROTECT) failed")
3003 # elif !defined(USE_WINALLOC)
3004 # include <sys/mman.h>
3005 # include <signal.h>
3006 # if !defined(CYGWIN32) && !defined(HAIKU)
3007 # include <sys/syscall.h>
3010 # define PROTECT(addr, len) \
3011 if (mprotect((caddr_t)(addr), (size_t)(len), \
3013 | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
3014 } else ABORT("mprotect failed")
3015 # define UNPROTECT(addr, len) \
3016 if (mprotect((caddr_t)(addr), (size_t)(len), \
3017 (PROT_READ | PROT_WRITE) \
3018 | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
3019 } else ABORT(GC_pages_executable ? \
3020 "un-mprotect executable page failed" \
3021 " (probably disabled by OS)" : \
3022 "un-mprotect failed")
3023 # undef IGNORE_PAGES_EXECUTABLE
3025 # else /* USE_WINALLOC */
3027 # include <signal.h>
3030 static DWORD protect_junk;
3031 # define PROTECT(addr, len) \
3032 if (VirtualProtect((addr), (len), \
3033 GC_pages_executable ? PAGE_EXECUTE_READ : \
3036 } else ABORT_ARG1("VirtualProtect failed", \
3037 ": errcode= 0x%X", (unsigned)GetLastError())
3038 # define UNPROTECT(addr, len) \
3039 if (VirtualProtect((addr), (len), \
3040 GC_pages_executable ? PAGE_EXECUTE_READWRITE : \
3043 } else ABORT("un-VirtualProtect failed")
3044 # endif /* USE_WINALLOC */
3046 # if defined(MSWIN32)
3047 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR;
3049 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER)((signed_word)-1)
3050 # elif defined(MSWINCE)
3051 typedef LONG (WINAPI *SIG_HNDLR_PTR)(struct _EXCEPTION_POINTERS *);
3053 # define SIG_DFL (SIG_HNDLR_PTR) (-1)
3054 # elif defined(DARWIN)
3055 typedef void (* SIG_HNDLR_PTR)();
3057 typedef void (* SIG_HNDLR_PTR)(int, siginfo_t *, void *);
3058 typedef void (* PLAIN_HNDLR_PTR)(int);
3061 # if defined(__GLIBC__)
3062 # if __GLIBC__ < 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ < 2
3063 # error glibc too old?
3068 STATIC SIG_HNDLR_PTR GC_old_segv_handler = 0;
3069 /* Also old MSWIN32 ACCESS_VIOLATION filter */
3070 # if defined(FREEBSD) || defined(HPUX) || defined(HURD) || defined(LINUX)
3071 STATIC SIG_HNDLR_PTR GC_old_bus_handler = 0;
3073 STATIC GC_bool GC_old_bus_handler_used_si = FALSE;
3076 # if !defined(MSWIN32) && !defined(MSWINCE)
3077 STATIC GC_bool GC_old_segv_handler_used_si = FALSE;
3078 # endif /* !MSWIN32 */
3079 #endif /* !DARWIN */
3082 /* This function is used only by the fault handler. Potential data */
3083 /* race between this function and GC_install_header, GC_remove_header */
3084 /* should not be harmful because the added or removed header should */
3085 /* be already unprotected. */
3086 GC_ATTR_NO_SANITIZE_THREAD
3087 static GC_bool is_header_found_async(void *addr)
3091 GET_HDR((ptr_t)addr, result);
3092 return result != NULL;
3094 return HDR_INNER(addr) != NULL;
3098 # define is_header_found_async(addr) (HDR(addr) != NULL)
3099 #endif /* !THREADS */
3103 # if !defined(MSWIN32) && !defined(MSWINCE)
3105 # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3106 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
3108 # define SIG_OK (sig == SIGSEGV)
3109 /* Catch SIGSEGV but ignore SIGBUS. */
3111 # if defined(FREEBSD)
3112 # ifndef SEGV_ACCERR
3113 # define SEGV_ACCERR 2
3115 # if defined(AARCH64) || defined(ARM32) || defined(MIPS)
3116 # define CODE_OK (si -> si_code == SEGV_ACCERR)
3117 # elif defined(POWERPC)
3118 # define AIM /* Pretend that we're AIM. */
3119 # include <machine/trap.h>
3120 # define CODE_OK (si -> si_code == EXC_DSI \
3121 || si -> si_code == SEGV_ACCERR)
3123 # define CODE_OK (si -> si_code == BUS_PAGE_FAULT \
3124 || si -> si_code == SEGV_ACCERR)
3126 # elif defined(OSF1)
3127 # define CODE_OK (si -> si_code == 2 /* experimentally determined */)
3128 # elif defined(IRIX5)
3129 # define CODE_OK (si -> si_code == EACCES)
3130 # elif defined(CYGWIN32) || defined(HAIKU) || defined(HURD)
3131 # define CODE_OK TRUE
3132 # elif defined(LINUX)
3133 # define CODE_OK TRUE
3134 /* Empirically c.trapno == 14, on IA32, but is that useful? */
3135 /* Should probably consider alignment issues on other */
3136 /* architectures. */
3137 # elif defined(HPUX)
3138 # define CODE_OK (si -> si_code == SEGV_ACCERR \
3139 || si -> si_code == BUS_ADRERR \
3140 || si -> si_code == BUS_UNKNOWN \
3141 || si -> si_code == SEGV_UNKNOWN \
3142 || si -> si_code == BUS_OBJERR)
3143 # elif defined(SUNOS5SIGS)
3144 # define CODE_OK (si -> si_code == SEGV_ACCERR)
3146 # ifndef NO_GETCONTEXT
3147 # include <ucontext.h>
3149 STATIC void GC_write_fault_handler(int sig, siginfo_t *si, void *raw_sc)
3151 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode \
3152 == STATUS_ACCESS_VIOLATION)
3153 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] \
3154 == 1) /* Write fault */
3155 STATIC LONG WINAPI GC_write_fault_handler(
3156 struct _EXCEPTION_POINTERS *exc_info)
3157 # endif /* MSWIN32 || MSWINCE */
3159 # if !defined(MSWIN32) && !defined(MSWINCE)
3160 char *addr = (char *)si->si_addr;
3162 char * addr = (char *) (exc_info -> ExceptionRecord
3163 -> ExceptionInformation[1]);
3166 if (SIG_OK && CODE_OK) {
3167 struct hblk * h = (struct hblk *)((word)addr & ~(GC_page_size-1));
3168 GC_bool in_allocd_block;
3175 /* Address is only within the correct physical page. */
3176 in_allocd_block = FALSE;
3177 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3178 if (is_header_found_async(&h[i])) {
3179 in_allocd_block = TRUE;
3184 in_allocd_block = is_header_found_async(addr);
3186 if (!in_allocd_block) {
3187 /* FIXME - We should make sure that we invoke the */
3188 /* old handler with the appropriate calling */
3189 /* sequence, which often depends on SA_SIGINFO. */
3191 /* Heap blocks now begin and end on page boundaries */
3192 SIG_HNDLR_PTR old_handler;
3194 # if defined(MSWIN32) || defined(MSWINCE)
3195 old_handler = GC_old_segv_handler;
3199 # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3200 if (sig == SIGBUS) {
3201 old_handler = GC_old_bus_handler;
3202 used_si = GC_old_bus_handler_used_si;
3206 old_handler = GC_old_segv_handler;
3207 used_si = GC_old_segv_handler_used_si;
3211 if (old_handler == (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
3212 # if !defined(MSWIN32) && !defined(MSWINCE)
3213 ABORT_ARG1("Unexpected bus error or segmentation fault",
3214 " at %p", (void *)addr);
3216 return(EXCEPTION_CONTINUE_SEARCH);
3220 * FIXME: This code should probably check if the
3221 * old signal handler used the traditional style and
3222 * if so call it using that style.
3224 # if defined(MSWIN32) || defined(MSWINCE)
3225 return((*old_handler)(exc_info));
3228 ((SIG_HNDLR_PTR)old_handler) (sig, si, raw_sc);
3230 /* FIXME: should pass nonstandard args as well. */
3231 ((PLAIN_HNDLR_PTR)(signed_word)old_handler)(sig);
3236 UNPROTECT(h, GC_page_size);
3237 /* We need to make sure that no collection occurs between */
3238 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
3239 /* a write by a third thread might go unnoticed. Reversing */
3240 /* the order is just as bad, since we would end up unprotecting */
3241 /* a page in a GC cycle during which it's not marked. */
3242 /* Currently we do this by disabling the thread stopping */
3243 /* signals while this handler is running. An alternative might */
3244 /* be to record the fact that we're about to unprotect, or */
3245 /* have just unprotected a page in the GC's thread structure, */
3246 /* and then to have the thread stopping code set the dirty */
3247 /* flag, if necessary. */
3248 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3249 word index = PHT_HASH(h+i);
3251 async_set_pht_entry_from_index(GC_dirty_pages, index);
3253 /* The write may not take place before dirty bits are read. */
3254 /* But then we'll fault again ... */
3255 # if defined(MSWIN32) || defined(MSWINCE)
3256 return(EXCEPTION_CONTINUE_EXECUTION);
3261 # if defined(MSWIN32) || defined(MSWINCE)
3262 return EXCEPTION_CONTINUE_SEARCH;
3264 ABORT_ARG1("Unexpected bus error or segmentation fault",
3265 " at %p", (void *)addr);
3269 # if defined(GC_WIN32_THREADS) && !defined(CYGWIN32)
3270 GC_INNER void GC_set_write_fault_handler(void)
3272 SetUnhandledExceptionFilter(GC_write_fault_handler);
3275 #endif /* !DARWIN */
3277 #if !defined(DARWIN)
3278 GC_INNER GC_bool GC_dirty_init(void)
3280 # if !defined(MSWIN32) && !defined(MSWINCE)
3281 struct sigaction act, oldact;
3282 act.sa_flags = SA_RESTART | SA_SIGINFO;
3283 act.sa_sigaction = GC_write_fault_handler;
3284 (void)sigemptyset(&act.sa_mask);
3285 # if defined(THREADS) && !defined(GC_OPENBSD_UTHREADS) \
3286 && !defined(GC_WIN32_THREADS) && !defined(NACL)
3287 /* Arrange to postpone the signal while we are in a write fault */
3288 /* handler. This effectively makes the handler atomic w.r.t. */
3289 /* stopping the world for GC. */
3290 (void)sigaddset(&act.sa_mask, GC_get_suspend_signal());
3292 # endif /* !MSWIN32 */
3293 GC_VERBOSE_LOG_PRINTF(
3294 "Initializing mprotect virtual dirty bit implementation\n");
3295 if (GC_page_size % HBLKSIZE != 0) {
3296 ABORT("Page size not multiple of HBLKSIZE");
3298 # if !defined(MSWIN32) && !defined(MSWINCE)
3299 /* act.sa_restorer is deprecated and should not be initialized. */
3300 # if defined(GC_IRIX_THREADS)
3301 sigaction(SIGSEGV, 0, &oldact);
3302 sigaction(SIGSEGV, &act, 0);
3305 int res = sigaction(SIGSEGV, &act, &oldact);
3306 if (res != 0) ABORT("Sigaction failed");
3309 if (oldact.sa_flags & SA_SIGINFO) {
3310 GC_old_segv_handler = oldact.sa_sigaction;
3311 GC_old_segv_handler_used_si = TRUE;
3313 GC_old_segv_handler = (SIG_HNDLR_PTR)(signed_word)oldact.sa_handler;
3314 GC_old_segv_handler_used_si = FALSE;
3316 if (GC_old_segv_handler == (SIG_HNDLR_PTR)(signed_word)SIG_IGN) {
3317 WARN("Previously ignored segmentation violation!?\n", 0);
3318 GC_old_segv_handler = (SIG_HNDLR_PTR)(signed_word)SIG_DFL;
3320 if (GC_old_segv_handler != (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
3321 GC_VERBOSE_LOG_PRINTF("Replaced other SIGSEGV handler\n");
3323 # if defined(HPUX) || defined(LINUX) || defined(HURD) \
3324 || (defined(FREEBSD) && (defined(__GLIBC__) || defined(SUNOS5SIGS)))
3325 sigaction(SIGBUS, &act, &oldact);
3326 if ((oldact.sa_flags & SA_SIGINFO) != 0) {
3327 GC_old_bus_handler = oldact.sa_sigaction;
3328 # if !defined(LINUX)
3329 GC_old_bus_handler_used_si = TRUE;
3332 GC_old_bus_handler = (SIG_HNDLR_PTR)(signed_word)oldact.sa_handler;
3334 if (GC_old_bus_handler == (SIG_HNDLR_PTR)(signed_word)SIG_IGN) {
3335 WARN("Previously ignored bus error!?\n", 0);
3336 # if !defined(LINUX)
3337 GC_old_bus_handler = (SIG_HNDLR_PTR)(signed_word)SIG_DFL;
3339 /* GC_old_bus_handler is not used by GC_write_fault_handler. */
3341 } else if (GC_old_bus_handler != (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
3342 GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
3344 # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
3345 # endif /* ! MS windows */
3346 # if defined(GWW_VDB)
3347 if (GC_gww_dirty_init())
3350 # if defined(MSWIN32)
3351 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
3352 if (GC_old_segv_handler != NULL) {
3353 GC_COND_LOG_PRINTF("Replaced other UnhandledExceptionFilter\n");
3355 GC_old_segv_handler = SIG_DFL;
3357 # elif defined(MSWINCE)
3358 /* MPROTECT_VDB is unsupported for WinCE at present. */
3359 /* FIXME: implement it (if possible). */
3361 # if defined(CPPCHECK) && defined(ADDRESS_SANITIZER)
3362 GC_noop1((word)&__asan_default_options);
3366 #endif /* !DARWIN */
3368 GC_API int GC_CALL GC_incremental_protection_needs(void)
3370 GC_ASSERT(GC_is_initialized);
3372 if (GC_page_size == HBLKSIZE) {
3373 return GC_PROTECTS_POINTER_HEAP;
3375 return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
3378 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
3380 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
3381 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
3383 STATIC void GC_protect_heap(void)
3386 GC_bool protect_all =
3387 (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
3389 for (i = 0; i < GC_n_heap_sects; i++) {
3390 ptr_t start = GC_heap_sects[i].hs_start;
3391 size_t len = GC_heap_sects[i].hs_bytes;
3394 PROTECT(start, len);
3396 struct hblk * current;
3397 struct hblk * current_start; /* Start of block to be protected. */
3398 struct hblk * limit;
3400 GC_ASSERT(PAGE_ALIGNED(len));
3401 GC_ASSERT(PAGE_ALIGNED(start));
3402 current_start = current = (struct hblk *)start;
3403 limit = (struct hblk *)(start + len);
3404 while ((word)current < (word)limit) {
3409 GC_ASSERT(PAGE_ALIGNED(current));
3410 GET_HDR(current, hhdr);
3411 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
3412 /* This can happen only if we're at the beginning of a */
3413 /* heap segment, and a block spans heap segments. */
3414 /* We will handle that block as part of the preceding */
3416 GC_ASSERT(current_start == current);
3417 current_start = ++current;
3420 if (HBLK_IS_FREE(hhdr)) {
3421 GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
3422 nhblks = divHBLKSZ(hhdr -> hb_sz);
3423 is_ptrfree = TRUE; /* dirty on alloc */
3425 nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
3426 is_ptrfree = IS_PTRFREE(hhdr);
3429 if ((word)current_start < (word)current) {
3430 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3432 current_start = (current += nhblks);
3437 if ((word)current_start < (word)current) {
3438 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3445 * Acquiring the allocation lock here is dangerous, since this
3446 * can be called from within GC_call_with_alloc_lock, and the cord
3447 * package does so. On systems that allow nested lock acquisition, this
3451 /* We no longer wrap read by default, since that was causing too many */
3452 /* problems. It is preferred that the client instead avoids writing */
3453 /* to the write-protected heap with a system call. */
3454 #endif /* MPROTECT_VDB */
3457 /* This implementation assumes a Solaris 2.X like /proc */
3458 /* pseudo-file-system from which we can read page modified bits. This */
3459 /* facility is far from optimal (e.g. we would like to get the info for */
3460 /* only some of the address space), but it avoids intercepting system */
3464 # include <sys/types.h>
3465 # include <sys/signal.h>
3466 # include <sys/syscall.h>
3467 # include <sys/stat.h>
3469 # ifdef GC_NO_SYS_FAULT_H
3470 /* This exists only to check PROC_VDB code compilation (on Linux). */
3471 # define PG_MODIFIED 1
3472 struct prpageheader {
3473 int dummy[2]; /* pr_tstamp */
3474 unsigned long pr_nmap;
3475 unsigned long pr_npage;
3480 char dummy1[64+8]; /* pr_mapname, pr_offset */
3482 unsigned pr_pagesize;
3486 # include <sys/fault.h>
3487 # include <sys/procfs.h>
3490 # define INITIAL_BUF_SZ 16384
3491 STATIC size_t GC_proc_buf_size = INITIAL_BUF_SZ;
3492 STATIC char *GC_proc_buf = NULL;
3493 STATIC int GC_proc_fd = 0;
3495 GC_INNER GC_bool GC_dirty_init(void)
3499 if (GC_bytes_allocd != 0 || GC_bytes_allocd_before_gc != 0) {
3500 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3501 GC_VERBOSE_LOG_PRINTF(
3502 "Allocated %lu bytes: all pages may have been written\n",
3503 (unsigned long)(GC_bytes_allocd + GC_bytes_allocd_before_gc));
3506 (void)snprintf(buf, sizeof(buf), "/proc/%ld/pagedata", (long)getpid());
3507 buf[sizeof(buf) - 1] = '\0';
3508 GC_proc_fd = open(buf, O_RDONLY);
3509 if (GC_proc_fd < 0) {
3510 WARN("/proc open failed; cannot enable GC incremental mode\n", 0);
3513 if (syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC) == -1)
3514 WARN("Could not set FD_CLOEXEC for /proc\n", 0);
3516 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
3517 if (GC_proc_buf == NULL)
3518 ABORT("Insufficient space for /proc read");
3522 GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded)
3526 char * bufp = GC_proc_buf;
3529 BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
3530 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3531 /* Retry with larger buffer. */
3532 size_t new_size = 2 * GC_proc_buf_size;
3535 WARN("/proc read failed: GC_proc_buf_size = %" WARN_PRIdPTR "\n",
3536 (signed_word)GC_proc_buf_size);
3537 new_buf = GC_scratch_alloc(new_size);
3539 GC_scratch_recycle_no_gww(bufp, GC_proc_buf_size);
3540 GC_proc_buf = bufp = new_buf;
3541 GC_proc_buf_size = new_size;
3543 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3544 WARN("Insufficient space for /proc read\n", 0);
3546 if (!output_unneeded)
3547 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3548 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3553 /* Copy dirty bits into GC_grungy_pages */
3554 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3555 # ifdef DEBUG_DIRTY_BITS
3556 GC_log_printf("Proc VDB read: pr_nmap= %u, pr_npage= %lu\n",
3557 nmaps, ((struct prpageheader *)bufp)->pr_npage);
3559 # if defined(GC_NO_SYS_FAULT_H) && defined(CPPCHECK)
3560 GC_noop1(((struct prpageheader *)bufp)->dummy[0]);
3562 bufp += sizeof(struct prpageheader);
3563 for (i = 0; i < nmaps; i++) {
3564 struct prasmap * map = (struct prasmap *)bufp;
3565 ptr_t vaddr = (ptr_t)(map -> pr_vaddr);
3566 unsigned long npages = map -> pr_npage;
3567 unsigned pagesize = map -> pr_pagesize;
3570 # if defined(GC_NO_SYS_FAULT_H) && defined(CPPCHECK)
3571 GC_noop1(map->dummy1[0] + map->dummy2[0]);
3573 # ifdef DEBUG_DIRTY_BITS
3575 "pr_vaddr= %p, npage= %lu, mflags= 0x%x, pagesize= 0x%x\n",
3576 (void *)vaddr, npages, map->pr_mflags, pagesize);
3579 bufp += sizeof(struct prasmap);
3580 limit = vaddr + pagesize * npages;
3581 for (; (word)vaddr < (word)limit; vaddr += pagesize) {
3582 if ((*bufp++) & PG_MODIFIED) {
3584 ptr_t next_vaddr = vaddr + pagesize;
3585 # ifdef DEBUG_DIRTY_BITS
3586 GC_log_printf("dirty page at: %p\n", (void *)vaddr);
3588 for (h = (struct hblk *)vaddr;
3589 (word)h < (word)next_vaddr; h++) {
3590 word index = PHT_HASH(h);
3592 set_pht_entry_from_index(GC_grungy_pages, index);
3596 bufp = (char *)(((word)bufp + (sizeof(long)-1))
3597 & ~(word)(sizeof(long)-1));
3599 # ifdef DEBUG_DIRTY_BITS
3600 GC_log_printf("Proc VDB read done\n");
3603 /* Update GC_written_pages (even if output_unneeded). */
3604 GC_or_pages(GC_written_pages, GC_grungy_pages);
3608 #endif /* PROC_VDB */
3612 # include "vd/PCR_VD.h"
3614 # define NPAGES (32*1024) /* 128 MB */
3616 PCR_VD_DB GC_grungy_bits[NPAGES];
3618 STATIC ptr_t GC_vd_base = NULL;
3619 /* Address corresponding to GC_grungy_bits[0] */
3620 /* HBLKSIZE aligned. */
3622 GC_INNER GC_bool GC_dirty_init(void)
3624 /* For the time being, we assume the heap generally grows up */
3625 GC_vd_base = GC_heap_sects[0].hs_start;
3626 if (GC_vd_base == 0) {
3627 ABORT("Bad initial heap segment");
3629 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3631 ABORT("Dirty bit initialization failed");
3635 #endif /* PCR_VDB */
3637 #ifndef GC_DISABLE_INCREMENTAL
3638 GC_INNER GC_bool GC_manual_vdb = FALSE;
3640 /* Manually mark the page containing p as dirty. Logically, this */
3641 /* dirties the entire object. */
3642 GC_INNER void GC_dirty_inner(const void *p)
3644 word index = PHT_HASH(p);
3646 # if defined(MPROTECT_VDB)
3647 /* Do not update GC_dirty_pages if it should be followed by the */
3648 /* page unprotection. */
3649 GC_ASSERT(GC_manual_vdb);
3651 async_set_pht_entry_from_index(GC_dirty_pages, index);
3654 /* Retrieve system dirty bits for the heap to a local buffer (unless */
3655 /* output_unneeded). Restore the systems notion of which pages are */
3656 /* dirty. We assume that either the world is stopped or it is OK to */
3657 /* lose dirty bits while it's happening (as in GC_enable_incremental).*/
3658 GC_INNER void GC_read_dirty(GC_bool output_unneeded)
3661 # if defined(MPROTECT_VDB)
3662 || !GC_GWW_AVAILABLE()
3665 if (!output_unneeded)
3666 BCOPY((/* no volatile */ void *)GC_dirty_pages, GC_grungy_pages,
3667 sizeof(GC_dirty_pages));
3668 BZERO((/* no volatile */ void *)GC_dirty_pages,
3669 sizeof(GC_dirty_pages));
3670 # ifdef MPROTECT_VDB
3678 GC_gww_read_dirty(output_unneeded);
3679 # elif defined(PROC_VDB)
3680 GC_proc_read_dirty(output_unneeded);
3681 # elif defined(PCR_VDB)
3682 /* lazily enable dirty bits on newly added heap sects */
3684 static int onhs = 0;
3685 int nhs = GC_n_heap_sects;
3686 for (; onhs < nhs; onhs++) {
3687 PCR_VD_WriteProtectEnable(
3688 GC_heap_sects[onhs].hs_start,
3689 GC_heap_sects[onhs].hs_bytes);
3692 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3694 ABORT("Dirty bit read failed");
3699 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
3700 /* If the actual page size is different, this returns TRUE if any */
3701 /* of the pages overlapping h are dirty. This routine may err on the */
3702 /* side of labeling pages as dirty (and this implementation does). */
3703 GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
3706 if (!GC_manual_vdb) {
3707 if ((word)h < (word)GC_vd_base
3708 || (word)h >= (word)(GC_vd_base + NPAGES * HBLKSIZE)) {
3711 return GC_grungy_bits[h-(struct hblk*)GC_vd_base] & PCR_VD_DB_dirtyBit;
3713 # elif defined(DEFAULT_VDB)
3717 return NULL == HDR(h)
3718 || get_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h));
3721 # if defined(CHECKSUMS) || defined(PROC_VDB)
3722 /* Could any valid GC heap pointer ever have been written to this page? */
3723 GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
3725 # if defined(GWW_VDB) || defined(PROC_VDB)
3726 # ifdef MPROTECT_VDB
3727 if (!GC_GWW_AVAILABLE())
3730 return NULL == HDR(h)
3731 || get_pht_entry_from_index(GC_written_pages, PHT_HASH(h));
3733 /* TODO: implement me for MANUAL_VDB. */
3738 # endif /* CHECKSUMS || PROC_VDB */
3740 /* We expect block h to be written shortly. Ensure that all pages */
3741 /* containing any part of the n hblks starting at h are no longer */
3742 /* protected. If is_ptrfree is false, also ensure that they will */
3743 /* subsequently appear to be dirty. Not allowed to call GC_printf */
3744 /* (and the friends) here, see Win32 GC_stop_world for the details. */
3745 GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
3750 if (!GC_auto_incremental)
3752 PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3753 PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
3754 # elif defined(MPROTECT_VDB)
3755 struct hblk * h_trunc; /* Truncated to page boundary */
3756 struct hblk * h_end; /* Page boundary following block end */
3757 struct hblk * current;
3759 if (!GC_auto_incremental || GC_GWW_AVAILABLE())
3761 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
3762 h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size - 1)
3763 & ~(GC_page_size - 1));
3764 if (h_end == h_trunc + 1 &&
3765 get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) {
3766 /* already marked dirty, and hence unprotected. */
3769 for (current = h_trunc; (word)current < (word)h_end; ++current) {
3770 word index = PHT_HASH(current);
3772 if (!is_ptrfree || (word)current < (word)h
3773 || (word)current >= (word)(h + nblocks)) {
3774 async_set_pht_entry_from_index(GC_dirty_pages, index);
3777 UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
3779 /* Ignore write hints. They don't help us here. */
3780 (void)h; (void)nblocks; (void)is_ptrfree;
3783 #endif /* !GC_DISABLE_INCREMENTAL */
3785 #if defined(MPROTECT_VDB) && defined(DARWIN)
3786 /* The following sources were used as a "reference" for this exception
3788 1. Apple's mach/xnu documentation
3789 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3790 omnigroup's macosx-dev list.
3791 www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
3792 3. macosx-nat.c from Apple's GDB source code.
3795 /* The bug that caused all this trouble should now be fixed. This should
3796 eventually be removed if all goes well. */
3798 /* #define BROKEN_EXCEPTION_HANDLING */
3800 #include <mach/mach.h>
3801 #include <mach/mach_error.h>
3802 #include <mach/exception.h>
3803 #include <mach/task.h>
3804 #include <pthread.h>
3808 /* Some of the following prototypes are missing in any header, although */
3809 /* they are documented. Some are in mach/exc.h file. */
3811 exc_server(mach_msg_header_t *, mach_msg_header_t *);
3813 extern kern_return_t
3814 exception_raise(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3815 exception_data_t, mach_msg_type_number_t);
3817 extern kern_return_t
3818 exception_raise_state(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3819 exception_data_t, mach_msg_type_number_t,
3820 thread_state_flavor_t*, thread_state_t,
3821 mach_msg_type_number_t, thread_state_t,
3822 mach_msg_type_number_t*);
3824 extern kern_return_t
3825 exception_raise_state_identity(mach_port_t, mach_port_t, mach_port_t,
3826 exception_type_t, exception_data_t,
3827 mach_msg_type_number_t, thread_state_flavor_t*,
3828 thread_state_t, mach_msg_type_number_t,
3829 thread_state_t, mach_msg_type_number_t*);
3831 GC_API_OSCALL kern_return_t
3832 catch_exception_raise(mach_port_t exception_port, mach_port_t thread,
3833 mach_port_t task, exception_type_t exception,
3834 exception_data_t code,
3835 mach_msg_type_number_t code_count);
3837 GC_API_OSCALL kern_return_t
3838 catch_exception_raise_state(mach_port_name_t exception_port,
3839 int exception, exception_data_t code,
3840 mach_msg_type_number_t codeCnt, int flavor,
3841 thread_state_t old_state, int old_stateCnt,
3842 thread_state_t new_state, int new_stateCnt);
3844 GC_API_OSCALL kern_return_t
3845 catch_exception_raise_state_identity(mach_port_name_t exception_port,
3846 mach_port_t thread, mach_port_t task, int exception,
3847 exception_data_t code, mach_msg_type_number_t codeCnt,
3848 int flavor, thread_state_t old_state, int old_stateCnt,
3849 thread_state_t new_state, int new_stateCnt);
3853 /* These should never be called, but just in case... */
3854 GC_API_OSCALL kern_return_t
3855 catch_exception_raise_state(mach_port_name_t exception_port GC_ATTR_UNUSED,
3856 int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
3857 mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
3858 thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
3859 thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
3861 ABORT_RET("Unexpected catch_exception_raise_state invocation");
3862 return(KERN_INVALID_ARGUMENT);
3865 GC_API_OSCALL kern_return_t
3866 catch_exception_raise_state_identity(
3867 mach_port_name_t exception_port GC_ATTR_UNUSED,
3868 mach_port_t thread GC_ATTR_UNUSED, mach_port_t task GC_ATTR_UNUSED,
3869 int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
3870 mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
3871 thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
3872 thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
3874 ABORT_RET("Unexpected catch_exception_raise_state_identity invocation");
3875 return(KERN_INVALID_ARGUMENT);
3878 #define MAX_EXCEPTION_PORTS 16
3881 mach_msg_type_number_t count;
3882 exception_mask_t masks[MAX_EXCEPTION_PORTS];
3883 exception_handler_t ports[MAX_EXCEPTION_PORTS];
3884 exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
3885 thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3888 STATIC struct ports_s {
3889 void (*volatile os_callback[3])(void);
3890 mach_port_t exception;
3891 # if defined(THREADS)
3896 /* This is to prevent stripping these routines as dead. */
3897 (void (*)(void))catch_exception_raise,
3898 (void (*)(void))catch_exception_raise_state,
3899 (void (*)(void))catch_exception_raise_state_identity
3902 0, /* for 'exception' */
3908 mach_msg_header_t head;
3915 } GC_mprotect_state_t;
3918 /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field, but it */
3919 /* is not documented. Use the source and see if they should be OK. */
3921 # define ID_RESUME 2
3923 /* This value is only used on the reply port. */
3926 STATIC GC_mprotect_state_t GC_mprotect_state = GC_MP_NORMAL;
3928 /* The following should ONLY be called when the world is stopped. */
3929 STATIC void GC_mprotect_thread_notify(mach_msg_id_t id)
3933 mach_msg_trailer_t trailer;
3935 mach_msg_return_t r;
3938 buf.msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3939 buf.msg.head.msgh_size = sizeof(buf.msg);
3940 buf.msg.head.msgh_remote_port = GC_ports.exception;
3941 buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3942 buf.msg.head.msgh_id = id;
3944 r = mach_msg(&buf.msg.head, MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_LARGE,
3945 sizeof(buf.msg), sizeof(buf), GC_ports.reply,
3946 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3947 if (r != MACH_MSG_SUCCESS)
3948 ABORT("mach_msg failed in GC_mprotect_thread_notify");
3949 if (buf.msg.head.msgh_id != ID_ACK)
3950 ABORT("Invalid ack in GC_mprotect_thread_notify");
3953 /* Should only be called by the mprotect thread */
3954 STATIC void GC_mprotect_thread_reply(void)
3957 mach_msg_return_t r;
3960 msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3961 msg.head.msgh_size = sizeof(msg);
3962 msg.head.msgh_remote_port = GC_ports.reply;
3963 msg.head.msgh_local_port = MACH_PORT_NULL;
3964 msg.head.msgh_id = ID_ACK;
3966 r = mach_msg(&msg.head, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
3967 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3968 if (r != MACH_MSG_SUCCESS)
3969 ABORT("mach_msg failed in GC_mprotect_thread_reply");
3972 GC_INNER void GC_mprotect_stop(void)
3974 GC_mprotect_thread_notify(ID_STOP);
3977 GC_INNER void GC_mprotect_resume(void)
3979 GC_mprotect_thread_notify(ID_RESUME);
3983 /* The compiler should optimize away any GC_mprotect_state computations */
3984 # define GC_mprotect_state GC_MP_NORMAL
3985 #endif /* !THREADS */
3988 mach_msg_header_t head;
3993 mach_msg_header_t head;
3994 mach_msg_body_t msgh_body;
3998 STATIC void *GC_mprotect_thread(void *arg)
4000 mach_msg_return_t r;
4001 /* These two structures contain some private kernel data. We don't */
4002 /* need to access any of it so we don't bother defining a proper */
4003 /* struct. The correct definitions are in the xnu source code. */
4004 struct mp_reply_s reply;
4005 struct mp_msg_s msg;
4008 if ((word)arg == GC_WORD_MAX) return 0; /* to prevent a compiler warning */
4009 # if defined(CPPCHECK)
4010 reply.data[0] = 0; /* to prevent "field unused" warnings */
4014 # if defined(THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
4015 GC_darwin_register_mach_handler_thread(mach_thread_self());
4019 r = mach_msg(&msg.head, MACH_RCV_MSG | MACH_RCV_LARGE |
4020 (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
4021 0, sizeof(msg), GC_ports.exception,
4022 GC_mprotect_state == GC_MP_DISCARDING ? 0
4023 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
4024 id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
4026 # if defined(THREADS)
4027 if(GC_mprotect_state == GC_MP_DISCARDING) {
4028 if(r == MACH_RCV_TIMED_OUT) {
4029 GC_mprotect_state = GC_MP_STOPPED;
4030 GC_mprotect_thread_reply();
4033 if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
4034 ABORT("Out of order mprotect thread request");
4036 # endif /* THREADS */
4038 if (r != MACH_MSG_SUCCESS) {
4039 ABORT_ARG2("mach_msg failed",
4040 ": errcode= %d (%s)", (int)r, mach_error_string(r));
4044 # if defined(THREADS)
4046 if(GC_mprotect_state != GC_MP_NORMAL)
4047 ABORT("Called mprotect_stop when state wasn't normal");
4048 GC_mprotect_state = GC_MP_DISCARDING;
4051 if(GC_mprotect_state != GC_MP_STOPPED)
4052 ABORT("Called mprotect_resume when state wasn't stopped");
4053 GC_mprotect_state = GC_MP_NORMAL;
4054 GC_mprotect_thread_reply();
4056 # endif /* THREADS */
4058 /* Handle the message (calls catch_exception_raise) */
4059 if(!exc_server(&msg.head, &reply.head))
4060 ABORT("exc_server failed");
4061 /* Send the reply */
4062 r = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0,
4063 MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
4065 if(r != MACH_MSG_SUCCESS) {
4066 /* This will fail if the thread dies, but the thread */
4067 /* shouldn't die... */
4068 # ifdef BROKEN_EXCEPTION_HANDLING
4069 GC_err_printf("mach_msg failed with %d %s while sending "
4070 "exc reply\n", (int)r, mach_error_string(r));
4072 ABORT("mach_msg failed while sending exception reply");
4079 /* All this SIGBUS code shouldn't be necessary. All protection faults should
4080 be going through the mach exception handler. However, it seems a SIGBUS is
4081 occasionally sent for some unknown reason. Even more odd, it seems to be
4082 meaningless and safe to ignore. */
4083 #ifdef BROKEN_EXCEPTION_HANDLING
4085 /* Updates to this aren't atomic, but the SIGBUS'es seem pretty rare. */
4086 /* Even if this doesn't get updated property, it isn't really a problem. */
4087 STATIC int GC_sigbus_count = 0;
4089 STATIC void GC_darwin_sigbus(int num, siginfo_t *sip, void *context)
4092 ABORT("Got a non-sigbus signal in the sigbus handler");
4094 /* Ugh... some seem safe to ignore, but too many in a row probably means
4095 trouble. GC_sigbus_count is reset for each mach exception that is
4097 if (GC_sigbus_count >= 8) {
4098 ABORT("Got more than 8 SIGBUSs in a row!");
4101 WARN("Ignoring SIGBUS\n", 0);
4104 #endif /* BROKEN_EXCEPTION_HANDLING */
4106 GC_INNER GC_bool GC_dirty_init(void)
4111 pthread_attr_t attr;
4112 exception_mask_t mask;
4114 # ifdef CAN_HANDLE_FORK
4115 if (GC_handle_fork) {
4116 /* To both support GC incremental mode and GC functions usage in */
4117 /* the forked child, pthread_atfork should be used to install */
4118 /* handlers that switch off GC_incremental in the child */
4119 /* gracefully (unprotecting all pages and clearing */
4120 /* GC_mach_handler_thread). For now, we just disable incremental */
4121 /* mode if fork() handling is requested by the client. */
4122 WARN("Can't turn on GC incremental mode as fork()"
4123 " handling requested\n", 0);
4128 GC_VERBOSE_LOG_PRINTF("Initializing mach/darwin mprotect"
4129 " virtual dirty bit implementation\n");
4130 # ifdef BROKEN_EXCEPTION_HANDLING
4131 WARN("Enabling workarounds for various darwin "
4132 "exception handling bugs\n", 0);
4134 if (GC_page_size % HBLKSIZE != 0) {
4135 ABORT("Page size not multiple of HBLKSIZE");
4138 GC_task_self = me = mach_task_self();
4140 r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.exception);
4141 /* TODO: WARN and return FALSE in case of a failure. */
4142 if (r != KERN_SUCCESS)
4143 ABORT("mach_port_allocate failed (exception port)");
4145 r = mach_port_insert_right(me, GC_ports.exception, GC_ports.exception,
4146 MACH_MSG_TYPE_MAKE_SEND);
4147 if (r != KERN_SUCCESS)
4148 ABORT("mach_port_insert_right failed (exception port)");
4150 # if defined(THREADS)
4151 r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.reply);
4152 if(r != KERN_SUCCESS)
4153 ABORT("mach_port_allocate failed (reply port)");
4156 /* The exceptions we want to catch */
4157 mask = EXC_MASK_BAD_ACCESS;
4159 r = task_get_exception_ports(me, mask, GC_old_exc_ports.masks,
4160 &GC_old_exc_ports.count, GC_old_exc_ports.ports,
4161 GC_old_exc_ports.behaviors,
4162 GC_old_exc_ports.flavors);
4163 if (r != KERN_SUCCESS)
4164 ABORT("task_get_exception_ports failed");
4166 r = task_set_exception_ports(me, mask, GC_ports.exception, EXCEPTION_DEFAULT,
4167 GC_MACH_THREAD_STATE);
4168 if (r != KERN_SUCCESS)
4169 ABORT("task_set_exception_ports failed");
4170 if (pthread_attr_init(&attr) != 0)
4171 ABORT("pthread_attr_init failed");
4172 if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
4173 ABORT("pthread_attr_setdetachedstate failed");
4175 # undef pthread_create
4176 /* This will call the real pthread function, not our wrapper */
4177 if (pthread_create(&thread, &attr, GC_mprotect_thread, NULL) != 0)
4178 ABORT("pthread_create failed");
4179 (void)pthread_attr_destroy(&attr);
4181 /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
4182 # ifdef BROKEN_EXCEPTION_HANDLING
4184 struct sigaction sa, oldsa;
4185 sa.sa_handler = (SIG_HNDLR_PTR)GC_darwin_sigbus;
4186 sigemptyset(&sa.sa_mask);
4187 sa.sa_flags = SA_RESTART|SA_SIGINFO;
4188 /* sa.sa_restorer is deprecated and should not be initialized. */
4189 if (sigaction(SIGBUS, &sa, &oldsa) < 0)
4190 ABORT("sigaction failed");
4191 if (oldsa.sa_handler != (SIG_HNDLR_PTR)(signed_word)SIG_DFL) {
4192 GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
4195 # endif /* BROKEN_EXCEPTION_HANDLING */
4196 # if defined(CPPCHECK)
4197 GC_noop1((word)GC_ports.os_callback[0]);
4202 /* The source code for Apple's GDB was used as a reference for the */
4203 /* exception forwarding code. This code is similar to be GDB code only */
4204 /* because there is only one way to do it. */
4205 STATIC kern_return_t GC_forward_exception(mach_port_t thread, mach_port_t task,
4206 exception_type_t exception,
4207 exception_data_t data,
4208 mach_msg_type_number_t data_count)
4213 exception_behavior_t behavior;
4214 thread_state_flavor_t flavor;
4216 thread_state_data_t thread_state;
4217 mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
4219 for (i=0; i < GC_old_exc_ports.count; i++)
4220 if (GC_old_exc_ports.masks[i] & (1 << exception))
4222 if (i == GC_old_exc_ports.count)
4223 ABORT("No handler for exception!");
4225 port = GC_old_exc_ports.ports[i];
4226 behavior = GC_old_exc_ports.behaviors[i];
4227 flavor = GC_old_exc_ports.flavors[i];
4229 if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4230 r = thread_get_state(thread, flavor, thread_state, &thread_state_count);
4231 if(r != KERN_SUCCESS)
4232 ABORT("thread_get_state failed in forward_exception");
4236 case EXCEPTION_STATE:
4237 r = exception_raise_state(port, thread, task, exception, data, data_count,
4238 &flavor, thread_state, thread_state_count,
4239 thread_state, &thread_state_count);
4241 case EXCEPTION_STATE_IDENTITY:
4242 r = exception_raise_state_identity(port, thread, task, exception, data,
4243 data_count, &flavor, thread_state,
4244 thread_state_count, thread_state,
4245 &thread_state_count);
4247 /* case EXCEPTION_DEFAULT: */ /* default signal handlers */
4248 default: /* user-supplied signal handlers */
4249 r = exception_raise(port, thread, task, exception, data, data_count);
4252 if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4253 r = thread_set_state(thread, flavor, thread_state, thread_state_count);
4254 if (r != KERN_SUCCESS)
4255 ABORT("thread_set_state failed in forward_exception");
4260 #define FWD() GC_forward_exception(thread, task, exception, code, code_count)
4263 # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE
4264 # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE_COUNT
4265 # define DARWIN_EXC_STATE_T arm_exception_state_t
4266 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(far)
4267 #elif defined(AARCH64)
4268 # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE64
4269 # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE64_COUNT
4270 # define DARWIN_EXC_STATE_T arm_exception_state64_t
4271 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(far)
4272 #elif defined(POWERPC)
4273 # if CPP_WORDSZ == 32
4274 # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE
4275 # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE_COUNT
4276 # define DARWIN_EXC_STATE_T ppc_exception_state_t
4278 # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE64
4279 # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE64_COUNT
4280 # define DARWIN_EXC_STATE_T ppc_exception_state64_t
4282 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(dar)
4283 #elif defined(I386) || defined(X86_64)
4284 # if CPP_WORDSZ == 32
4285 # if defined(i386_EXCEPTION_STATE_COUNT) \
4286 && !defined(x86_EXCEPTION_STATE32_COUNT)
4287 /* Use old naming convention for 32-bit x86. */
4288 # define DARWIN_EXC_STATE i386_EXCEPTION_STATE
4289 # define DARWIN_EXC_STATE_COUNT i386_EXCEPTION_STATE_COUNT
4290 # define DARWIN_EXC_STATE_T i386_exception_state_t
4292 # define DARWIN_EXC_STATE x86_EXCEPTION_STATE32
4293 # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE32_COUNT
4294 # define DARWIN_EXC_STATE_T x86_exception_state32_t
4297 # define DARWIN_EXC_STATE x86_EXCEPTION_STATE64
4298 # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE64_COUNT
4299 # define DARWIN_EXC_STATE_T x86_exception_state64_t
4301 # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(faultvaddr)
4302 #elif !defined(CPPCHECK)
4303 # error FIXME for non-arm/ppc/x86 darwin
4306 /* This violates the namespace rules but there isn't anything that can */
4307 /* be done about it. The exception handling stuff is hard coded to */
4308 /* call this. catch_exception_raise, catch_exception_raise_state and */
4309 /* and catch_exception_raise_state_identity are called from OS. */
4310 GC_API_OSCALL kern_return_t
4311 catch_exception_raise(mach_port_t exception_port GC_ATTR_UNUSED,
4312 mach_port_t thread, mach_port_t task GC_ATTR_UNUSED,
4313 exception_type_t exception, exception_data_t code,
4314 mach_msg_type_number_t code_count GC_ATTR_UNUSED)
4318 thread_state_flavor_t flavor = DARWIN_EXC_STATE;
4319 mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT;
4320 DARWIN_EXC_STATE_T exc_state;
4322 if (exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
4323 # ifdef DEBUG_EXCEPTION_HANDLING
4324 /* We aren't interested, pass it on to the old handler */
4325 GC_log_printf("Exception: 0x%x Code: 0x%x 0x%x in catch...\n",
4326 exception, code_count > 0 ? code[0] : -1,
4327 code_count > 1 ? code[1] : -1);
4332 r = thread_get_state(thread, flavor, (natural_t*)&exc_state,
4334 if(r != KERN_SUCCESS) {
4335 /* The thread is supposed to be suspended while the exception */
4336 /* handler is called. This shouldn't fail. */
4337 # ifdef BROKEN_EXCEPTION_HANDLING
4338 GC_err_printf("thread_get_state failed in catch_exception_raise\n");
4339 return KERN_SUCCESS;
4341 ABORT("thread_get_state failed in catch_exception_raise");
4345 /* This is the address that caused the fault */
4346 addr = (char*) exc_state.DARWIN_EXC_STATE_DAR;
4347 if (!is_header_found_async(addr)) {
4348 /* Ugh... just like the SIGBUS problem above, it seems we get */
4349 /* a bogus KERN_PROTECTION_FAILURE every once and a while. We wait */
4350 /* till we get a bunch in a row before doing anything about it. */
4351 /* If a "real" fault ever occurs it'll just keep faulting over and */
4352 /* over and we'll hit the limit pretty quickly. */
4353 # ifdef BROKEN_EXCEPTION_HANDLING
4354 static char *last_fault;
4355 static int last_fault_count;
4357 if(addr != last_fault) {
4359 last_fault_count = 0;
4361 if(++last_fault_count < 32) {
4362 if(last_fault_count == 1)
4363 WARN("Ignoring KERN_PROTECTION_FAILURE at %p\n", addr);
4364 return KERN_SUCCESS;
4367 GC_err_printf("Unexpected KERN_PROTECTION_FAILURE at %p; aborting...\n",
4369 /* Can't pass it along to the signal handler because that is */
4370 /* ignoring SIGBUS signals. We also shouldn't call ABORT here as */
4371 /* signals don't always work too well from the exception handler. */
4373 # else /* BROKEN_EXCEPTION_HANDLING */
4374 /* Pass it along to the next exception handler
4375 (which should call SIGBUS/SIGSEGV) */
4377 # endif /* !BROKEN_EXCEPTION_HANDLING */
4380 # ifdef BROKEN_EXCEPTION_HANDLING
4381 /* Reset the number of consecutive SIGBUSs */
4382 GC_sigbus_count = 0;
4385 if (GC_mprotect_state == GC_MP_NORMAL) { /* common case */
4386 struct hblk * h = (struct hblk*)((word)addr & ~(GC_page_size-1));
4389 UNPROTECT(h, GC_page_size);
4390 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
4391 word index = PHT_HASH(h+i);
4392 async_set_pht_entry_from_index(GC_dirty_pages, index);
4394 } else if (GC_mprotect_state == GC_MP_DISCARDING) {
4395 /* Lie to the thread for now. No sense UNPROTECT()ing the memory
4396 when we're just going to PROTECT() it again later. The thread
4397 will just fault again once it resumes */
4399 /* Shouldn't happen, i don't think */
4400 GC_err_printf("KERN_PROTECTION_FAILURE while world is stopped\n");
4403 return KERN_SUCCESS;
4407 #ifndef NO_DESC_CATCH_EXCEPTION_RAISE
4408 /* These symbols should have REFERENCED_DYNAMICALLY (0x10) bit set to */
4409 /* let strip know they are not to be stripped. */
4410 __asm__(".desc _catch_exception_raise, 0x10");
4411 __asm__(".desc _catch_exception_raise_state, 0x10");
4412 __asm__(".desc _catch_exception_raise_state_identity, 0x10");
4415 #endif /* DARWIN && MPROTECT_VDB */
4417 #ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
4418 GC_API int GC_CALL GC_incremental_protection_needs(void)
4420 GC_ASSERT(GC_is_initialized);
4421 return GC_PROTECTS_NONE;
4423 #endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
4426 /* Undo sbrk() redirection. */
4430 /* If value is non-zero then allocate executable memory. */
4431 GC_API void GC_CALL GC_set_pages_executable(int value)
4433 GC_ASSERT(!GC_is_initialized);
4434 /* Even if IGNORE_PAGES_EXECUTABLE is defined, GC_pages_executable is */
4435 /* touched here to prevent a compiler warning. */
4436 GC_pages_executable = (GC_bool)(value != 0);
4439 /* Returns non-zero if the GC-allocated memory is executable. */
4440 /* GC_get_pages_executable is defined after all the places */
4441 /* where GC_get_pages_executable is undefined. */
4442 GC_API int GC_CALL GC_get_pages_executable(void)
4444 # ifdef IGNORE_PAGES_EXECUTABLE
4445 return 1; /* Always allocate executable memory. */
4447 return (int)GC_pages_executable;
4451 /* Call stack save code for debugging. Should probably be in */
4452 /* mach_dep.c, but that requires reorganization. */
4454 /* I suspect the following works for most X86 *nix variants, so */
4455 /* long as the frame pointer is explicitly stored. In the case of gcc, */
4456 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
4457 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
4458 # include <features.h>
4461 struct frame *fr_savfp;
4464 long fr_arg[NARGS]; /* All the arguments go here. */
4471 # include <features.h>
4473 # if defined(SAVE_CALL_CHAIN)
4477 struct frame *fr_savfp;
4486 # elif defined (DRSNX)
4487 # include <sys/sparc/frame.h>
4488 # elif defined(OPENBSD)
4490 # elif defined(FREEBSD) || defined(NETBSD)
4491 # include <machine/frame.h>
4493 # include <sys/frame.h>
4496 # error We only know how to get the first 6 arguments
4500 #ifdef NEED_CALLINFO
4501 /* Fill in the pc and argument information for up to NFRAMES of my */
4502 /* callers. Ignore my frame and my callers frame. */
4505 # include <unistd.h>
4508 #endif /* NEED_CALLINFO */
4510 #if defined(GC_HAVE_BUILTIN_BACKTRACE)
4512 # include "private/msvc_dbg.h"
4514 # include <execinfo.h>
4518 #ifdef SAVE_CALL_CHAIN
4520 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
4521 && defined(GC_HAVE_BUILTIN_BACKTRACE)
4523 #ifdef REDIRECT_MALLOC
4524 /* Deal with possible malloc calls in backtrace by omitting */
4525 /* the infinitely recursing backtrace. */
4527 __thread /* If your compiler doesn't understand this */
4528 /* you could use something like pthread_getspecific. */
4530 GC_bool GC_in_save_callers = FALSE;
4533 GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4535 void * tmp_info[NFRAMES + 1];
4537 # define IGNORE_FRAMES 1
4539 /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
4540 /* points to our own frame. */
4541 # ifdef REDIRECT_MALLOC
4542 if (GC_in_save_callers) {
4543 info[0].ci_pc = (word)(&GC_save_callers);
4544 for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
4547 GC_in_save_callers = TRUE;
4550 GC_ASSERT(I_HOLD_LOCK());
4551 /* backtrace may call dl_iterate_phdr which is also */
4552 /* used by GC_register_dynamic_libraries, and */
4553 /* dl_iterate_phdr is not guaranteed to be reentrant. */
4555 GC_STATIC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
4556 npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
4557 if (npcs > IGNORE_FRAMES)
4558 BCOPY(&tmp_info[IGNORE_FRAMES], info,
4559 (npcs - IGNORE_FRAMES) * sizeof(void *));
4560 for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
4561 # ifdef REDIRECT_MALLOC
4562 GC_in_save_callers = FALSE;
4566 #else /* No builtin backtrace; do it ourselves */
4568 #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
4569 # define FR_SAVFP fr_fp
4570 # define FR_SAVPC fr_pc
4572 # define FR_SAVFP fr_savfp
4573 # define FR_SAVPC fr_savpc
4576 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
4582 GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4584 struct frame *frame;
4588 /* We assume this is turned on only with gcc as the compiler. */
4589 asm("movl %%ebp,%0" : "=r"(frame));
4592 frame = (struct frame *)GC_save_regs_in_stack();
4593 fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
4596 for (; !((word)fp HOTTER_THAN (word)frame)
4598 && !((word)GC_stackbottom HOTTER_THAN (word)fp)
4599 # elif defined(STACK_GROWS_UP)
4602 && nframes < NFRAMES;
4603 fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
4608 info[nframes].ci_pc = fp->FR_SAVPC;
4610 for (i = 0; i < NARGS; i++) {
4611 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
4613 # endif /* NARGS > 0 */
4615 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
4618 #endif /* No builtin backtrace */
4620 #endif /* SAVE_CALL_CHAIN */
4622 #ifdef NEED_CALLINFO
4624 /* Print info to stderr. We do NOT hold the allocation lock */
4625 GC_INNER void GC_print_callers(struct callinfo info[NFRAMES])
4628 static int reentry_count = 0;
4631 /* FIXME: This should probably use a different lock, so that we */
4632 /* become callable with or without the allocation lock. */
4638 GC_err_printf("\tCaller at allocation:\n");
4640 GC_err_printf("\tCall chain at allocation:\n");
4642 for (i = 0; i < NFRAMES; i++) {
4643 # if defined(LINUX) && !defined(SMALL_CONFIG)
4644 GC_bool stop = FALSE;
4647 if (0 == info[i].ci_pc)
4653 GC_err_printf("\t\targs: ");
4654 for (j = 0; j < NARGS; j++) {
4655 if (j != 0) GC_err_printf(", ");
4656 GC_err_printf("%d (0x%X)", ~(info[i].ci_arg[j]),
4657 ~(info[i].ci_arg[j]));
4659 GC_err_printf("\n");
4662 if (reentry_count > 1) {
4663 /* We were called during an allocation during */
4664 /* a previous GC_print_callers call; punt. */
4665 GC_err_printf("\t\t##PC##= 0x%lx\n",
4666 (unsigned long)info[i].ci_pc);
4672 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4673 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4675 backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
4676 if (sym_name != NULL) {
4681 (void)snprintf(buf, sizeof(buf), "##PC##= 0x%lx",
4682 (unsigned long)info[i].ci_pc);
4683 buf[sizeof(buf) - 1] = '\0';
4686 # if defined(LINUX) && !defined(SMALL_CONFIG)
4687 /* Try for a line number. */
4691 static char exe_name[EXE_SZ];
4693 char cmd_buf[CMD_SZ];
4694 # define RESULT_SZ 200
4695 static char result_buf[RESULT_SZ];
4698 # define PRELOAD_SZ 200
4699 char preload_buf[PRELOAD_SZ];
4700 static GC_bool found_exe_name = FALSE;
4701 static GC_bool will_fail = FALSE;
4703 /* Try to get it via a hairy and expensive scheme. */
4704 /* First we get the name of the executable: */
4707 if (!found_exe_name) {
4708 int ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
4710 if (ret_code < 0 || ret_code >= EXE_SZ
4711 || exe_name[0] != '/') {
4712 will_fail = TRUE; /* Don't try again. */
4715 exe_name[ret_code] = '\0';
4716 found_exe_name = TRUE;
4718 /* Then we use popen to start addr2line -e <exe> <addr> */
4719 /* There are faster ways to do this, but hopefully this */
4720 /* isn't time critical. */
4721 (void)snprintf(cmd_buf, sizeof(cmd_buf),
4722 "/usr/bin/addr2line -f -e %s 0x%lx",
4723 exe_name, (unsigned long)info[i].ci_pc);
4724 cmd_buf[sizeof(cmd_buf) - 1] = '\0';
4725 old_preload = GETENV("LD_PRELOAD");
4726 if (0 != old_preload) {
4727 size_t old_len = strlen(old_preload);
4728 if (old_len >= PRELOAD_SZ) {
4732 BCOPY(old_preload, preload_buf, old_len + 1);
4733 unsetenv ("LD_PRELOAD");
4735 pipe = popen(cmd_buf, "r");
4736 if (0 != old_preload
4737 && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
4738 WARN("Failed to reset LD_PRELOAD\n", 0);
4744 result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe);
4746 if (0 == result_len) {
4750 if (result_buf[result_len - 1] == '\n') --result_len;
4751 result_buf[result_len] = 0;
4752 if (result_buf[0] == '?'
4753 || (result_buf[result_len-2] == ':'
4754 && result_buf[result_len-1] == '0'))
4756 /* Get rid of embedded newline, if any. Test for "main" */
4758 char * nl = strchr(result_buf, '\n');
4760 && (word)nl < (word)(result_buf + result_len)) {
4763 if (strncmp(result_buf, "main",
4765 ? (size_t)((word)nl /* a cppcheck workaround */
4766 - COVERT_DATAFLOW(result_buf))
4767 : result_len) == 0) {
4771 if (result_len < RESULT_SZ - 25) {
4772 /* Add in hex address */
4773 (void)snprintf(&result_buf[result_len],
4774 sizeof(result_buf) - result_len,
4775 " [0x%lx]", (unsigned long)info[i].ci_pc);
4776 result_buf[sizeof(result_buf) - 1] = '\0';
4778 # if defined(CPPCHECK)
4779 GC_noop1((unsigned char)name[0]);
4780 /* name computed previously is discarded */
4785 GC_err_printf("\t\t%s\n", name);
4786 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4787 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4788 if (sym_name != NULL)
4789 free(sym_name); /* May call GC_[debug_]free; that's OK */
4792 # if defined(LINUX) && !defined(SMALL_CONFIG)
4802 #endif /* NEED_CALLINFO */
4804 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4805 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for */
4806 /* addresses in FIND_LEAK output. */
4807 void GC_print_address_map(void)
4811 GC_err_printf("---------- Begin address map ----------\n");
4812 maps = GC_get_maps();
4813 GC_err_puts(maps != NULL ? maps : "Failed to get map!\n");
4814 GC_err_printf("---------- End address map ----------\n");
4816 #endif /* LINUX && ELF */