From 06009b0a0858b7367fd6cc2ed879c48593f4d5ea Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Wed, 15 Aug 2018 00:50:59 +0300 Subject: [PATCH] New API to turn on manual VDB at runtime Manual VDB is now enabled by GC_set_manual_vdb_allowed(1) if called before entering the GC incremental mode. * alloc.c (GC_allocobj): Expand TRUE_INCREMENTAL macro. * darwin_stop_world.c [MPROTECT_VDB] (GC_stop_world, GC_start_world): Use GC_auto_incremental instead of GC_incremental. * mark.c (alloc_mark_stack): Likewise. * mark.c [PROC_VDB] (GC_push_all): Likewise. * mark.c [!NEED_FIXUP_POINTER && THREADS && MPROTECT_VDB] (GC_push_all_stack): Likewise. * pthread_support.c [CAN_HANDLE_FORK && GC_DARWIN_THREADS && MPROTECT_VDB] (GC_atfork_prepare): Likewise. * win32_threads.c [MPROTECT_VDB && !CYGWIN32] (GC_register_my_thread_inner): Likewise. * win32_threads.c [MPROTECT_VDB] (UNPROTECT_THREAD): Likewise. * doc/gcdescr.md (Generational Collection and Dirty Bits): Update documentation for the manual VDB. * include/gc.h (GC_end_stubborn_change): Update comment. * mark_rts.c (GC_push_all_stack_partially_eager): Likewise. * include/gc.h (GC_set_manual_vdb_allowed, GC_get_manual_vdb_allowed): New public function. * include/private/gc_priv.h (GC_grungy_pages, GC_dirty_pages): Define for all VDB modes. * include/private/gc_priv.h (GC_auto_incremental, GC_manual_vdb): Define. * include/private/gc_priv.h [!GC_DISABLE_INCREMENTAL] (GC_dirty): Use GC_manual_vdb instead of GC_incremental. * include/private/gcconfig.h (GWW_VDB, MPROTECT_VDB, PCR_VDB): Do not undefine if MANUAL_VDB. * mallocx.c (GC_generic_malloc_many): Always allocate a single object (and call GC_dirty_inner/REACHABLE_AFTER_DIRTY) if GC_manual_vdb. * misc.c [!CAN_HANDLE_FORK && DARWIN && MPROTECT_VDB && !THREADS && !SMALL_CONFIG] (GC_set_handle_fork): Do not ABORT if GC_manual_vdb. * misc.c [!SMALL_CONFIG] (manual_vdb_allowed): New static variable. * misc.c [!SMALL_CONFIG] (GC_set_manual_vdb_allowed, GC_get_manual_vdb_allowed): Implement. * misc.c [!CHECKSUMS && !SMALL_CONFIG] (GC_init, GC_enable_incremental): Set GC_manual_vdb and GC_incremental to true if manual_vdb_allowed; do not call GC_dirty_init if manual_vdb_allowed. * os_dep.c: Update comment about MANUAL_VDB. * os_dep.c [MANUAL_VDB] (GC_dirty_init, async_set_pht_entry_from_index): Remove. * os_dep.c [!GC_DISABLE_INCREMENTAL] (GC_manual_vdb): Define global variable. * os_dep.c [!GC_DISABLE_INCREMENTAL] (GC_dirty_inner): Define regardless of the VDB mode; add FIXME. * os_dep.c [!GC_DISABLE_INCREMENTAL] (GC_read_dirty, GC_page_was_dirty, GC_remove_protection): Implement for the case of GC_manual_vdb is true; do not depend on MANUAL_VDB. * tests/disclaim_test.c [TEST_MANUAL_VDB] (main): Call GC_set_manual_vdb_allowed(1) before GC_INIT. * tests/staticrootslib.c [TEST_MANUAL_VDB] (libsrl_init): Likewise. * tests/test_cpp.cc [TEST_MANUAL_VDB] (main): Likewise. * tests/test.c (INIT_MANUAL_VDB_ALLOWED): New macro. * tests/test.c (GC_COND_INIT): Invoke INIT_MANUAL_VDB_ALLOWED (before GC_OPT_INIT). * tests/test.c [!SMALL_CONFIG] (main): Call GC_get_manual_vdb_allowed. --- alloc.c | 9 +++- darwin_stop_world.c | 4 +- doc/gcdescr.md | 7 +-- include/gc.h | 16 ++++++- include/private/gc_priv.h | 47 +++++++++---------- include/private/gcconfig.h | 5 +- mallocx.c | 18 +++---- mark.c | 17 +++---- mark_rts.c | 2 +- misc.c | 52 ++++++++++++++++++--- os_dep.c | 96 +++++++++++++++++++------------------- pthread_support.c | 2 +- tests/disclaim_test.c | 3 ++ tests/staticrootslib.c | 3 ++ tests/test.c | 33 +++++++++++-- tests/test_cpp.cc | 3 ++ win32_threads.c | 5 +- 17 files changed, 202 insertions(+), 120 deletions(-) diff --git a/alloc.c b/alloc.c index fc01d772..2a53dfb3 100644 --- a/alloc.c +++ b/alloc.c @@ -1520,8 +1520,13 @@ GC_INNER ptr_t GC_allocobj(size_t gran, int kind) while (*flh == 0) { ENTER_GC(); - /* Do our share of marking work */ - if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1); +# ifndef GC_DISABLE_INCREMENTAL + if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED) { + /* True incremental mode, not just generational. */ + /* Do our share of marking work. */ + GC_collect_a_little_inner(1); + } +# endif /* Sweep blocks for objects of this size */ GC_ASSERT(!GC_is_full_gc || NULL == GC_obj_kinds[kind].ok_reclaim_list diff --git a/darwin_stop_world.c b/darwin_stop_world.c index 22208669..ff581a81 100644 --- a/darwin_stop_world.c +++ b/darwin_stop_world.c @@ -652,7 +652,7 @@ GC_INNER void GC_stop_world(void) } # ifdef MPROTECT_VDB - if(GC_incremental) { + if (GC_auto_incremental) { GC_mprotect_stop(); } # endif @@ -699,7 +699,7 @@ GC_INNER void GC_start_world(void) GC_log_printf("World starting\n"); # endif # ifdef MPROTECT_VDB - if(GC_incremental) { + if (GC_auto_incremental) { GC_mprotect_resume(); } # endif diff --git a/doc/gcdescr.md b/doc/gcdescr.md index 881a9615..fbc02085 100644 --- a/doc/gcdescr.md +++ b/doc/gcdescr.md @@ -397,9 +397,10 @@ We keep track of modified pages using one of several distinct mechanisms: performance may actually be better with `mprotect` and signals.) * (`PCR_VDB`) By relying on an external dirty bit implementation, in this case the one in Xerox PCR. - * (`MANUAL_VDB`) Through explicit mutator cooperation. This requires the - client code to call `GC_end_stubborn_change` (followed by a number of - `GC_reachable_here` calls), and is rarely used. + * Through explicit mutator cooperation. This enabled by + `GC_set_manual_vdb_allowed(1)` call, and requires the client code to call + `GC_ptr_store_and_dirty` or `GC_end_stubborn_change` (followed by a number + of `GC_reachable_here` calls), and is rarely used. * (`DEFAULT_VDB`) By treating all pages as dirty. This is the default if none of the other techniques is known to be usable. (Practical only for testing.) diff --git a/include/gc.h b/include/gc.h index d043942c..8db915e9 100644 --- a/include/gc.h +++ b/include/gc.h @@ -525,8 +525,8 @@ GC_API GC_ATTR_DEPRECATED void GC_CALL GC_change_stubborn(const void *); /* Inform the collector that the object has been changed. */ /* Only non-NULL pointer stores into the object are considered to be */ -/* changes. Matters only if the library has been compiled with */ -/* MANUAL_VDB defined (otherwise the function does nothing). */ +/* changes. Matters only if the incremental collection is enabled in */ +/* the manual VDB mode (otherwise the function does nothing). */ /* Should be followed typically by GC_reachable_here called for each */ /* of the stored pointers. */ GC_API void GC_CALL GC_end_stubborn_change(const void *) GC_ATTR_NONNULL(1); @@ -804,6 +804,18 @@ GC_API int GC_CALL GC_is_disabled(void); /* both functions is equal. */ GC_API void GC_CALL GC_enable(void); +/* Select whether to use the manual VDB mode for the incremental */ +/* collection. Has no effect if called after enabling the incremental */ +/* collection. The default value is off unless the collector is */ +/* compiled with MANUAL_VDB defined. The manual VDB mode should be */ +/* used only if the client has the appropriate GC_END_STUBBORN_CHANGE */ +/* and GC_reachable_here (or, alternatively, GC_PTR_STORE_AND_DIRTY) */ +/* calls (to ensure proper write barriers). Both the setter and getter */ +/* are not synchronized, and are defined only if the library has been */ +/* compiled without SMALL_CONFIG. */ +GC_API void GC_CALL GC_set_manual_vdb_allowed(int); +GC_API int GC_CALL GC_get_manual_vdb_allowed(void); + /* Enable incremental/generational collection. Not advisable unless */ /* dirty bits are available or most heap objects are pointer-free */ /* (atomic) or immutable. Don't use in leak finding mode. Ignored if */ diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h index 3fb435ce..571643b4 100644 --- a/include/private/gc_priv.h +++ b/include/private/gc_priv.h @@ -1372,13 +1372,10 @@ struct _GC_arrays { char _valid_offsets[VALID_OFFSET_SZ]; /* GC_valid_offsets[i] == TRUE ==> i */ /* is registered as a displacement. */ -# if defined(PROC_VDB) || defined(MPROTECT_VDB) \ - || defined(GWW_VDB) || defined(MANUAL_VDB) +# ifndef GC_DISABLE_INCREMENTAL # define GC_grungy_pages GC_arrays._grungy_pages page_hash_table _grungy_pages; /* Pages that were dirty at last */ /* GC_read_dirty. */ -# endif -# if defined(MPROTECT_VDB) || defined(MANUAL_VDB) # define GC_dirty_pages GC_arrays._dirty_pages volatile page_hash_table _dirty_pages; /* Pages dirtied since last GC_read_dirty. */ @@ -1563,19 +1560,6 @@ GC_EXTERN word GC_black_list_spacing; extern word GC_free_bytes[]; /* Both remain visible to GNU GCJ. */ #endif -#ifdef GC_DISABLE_INCREMENTAL -# define GC_incremental FALSE - /* Hopefully allow optimizer to remove some code. */ -# define TRUE_INCREMENTAL FALSE -#else - GC_EXTERN GC_bool GC_incremental; - /* Using incremental/generational collection. */ - /* Assumes dirty bits are being maintained. */ -# define TRUE_INCREMENTAL \ - (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED) - /* True incremental, not just generational, mode */ -#endif /* !GC_DISABLE_INCREMENTAL */ - GC_EXTERN word GC_root_size; /* Total size of registered root sections. */ GC_EXTERN GC_bool GC_debugging_started; @@ -2170,7 +2154,18 @@ GC_EXTERN GC_bool GC_print_back_height; /* accompanying routines are no-op in such a case. */ #endif -#ifndef GC_DISABLE_INCREMENTAL +#ifdef GC_DISABLE_INCREMENTAL +# define GC_incremental FALSE +# define GC_auto_incremental FALSE +# define GC_manual_vdb FALSE +# define GC_dirty(p) (void)(p) +# define REACHABLE_AFTER_DIRTY(p) (void)(p) + +#else /* !GC_DISABLE_INCREMENTAL */ + GC_EXTERN GC_bool GC_incremental; + /* Using incremental/generational collection. */ + /* Assumes dirty bits are being maintained. */ + /* Virtual dirty bit implementation: */ /* Each implementation exports the following: */ GC_INNER void GC_read_dirty(GC_bool output_unneeded); @@ -2196,16 +2191,18 @@ GC_EXTERN GC_bool GC_print_back_height; /* Returns true if dirty bits are maintained (otherwise */ /* it is OK to be called again if the client invokes */ /* GC_enable_incremental once more). */ -#endif /* !GC_DISABLE_INCREMENTAL */ -#ifdef MANUAL_VDB + GC_EXTERN GC_bool GC_manual_vdb; + /* The incremental collection is in the manual VDB */ + /* mode. Assumes GC_incremental is true. Should not */ + /* be modified once GC_incremental is set to true. */ + +# define GC_auto_incremental (GC_incremental && !GC_manual_vdb) + GC_INNER void GC_dirty_inner(const void *p); /* does not require locking */ -# define GC_dirty(p) (GC_incremental ? GC_dirty_inner(p) : (void)0) +# define GC_dirty(p) (GC_manual_vdb ? GC_dirty_inner(p) : (void)0) # define REACHABLE_AFTER_DIRTY(p) GC_reachable_here(p) -#else -# define GC_dirty(p) (void)(p) -# define REACHABLE_AFTER_DIRTY(p) (void)(p) -#endif +#endif /* !GC_DISABLE_INCREMENTAL */ /* Same as GC_base but excepts and returns a pointer to const object. */ #define GC_base_C(p) ((const void *)GC_base((/* no const */ void *)(p))) diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h index a167751a..40d2ff5b 100644 --- a/include/private/gcconfig.h +++ b/include/private/gcconfig.h @@ -3098,8 +3098,7 @@ EXTERN_C_BEGIN # define MUNMAP_THRESHOLD 2 #endif -#if defined(GC_DISABLE_INCREMENTAL) || defined(DEFAULT_VDB) \ - || defined(MANUAL_VDB) +#if defined(GC_DISABLE_INCREMENTAL) || defined(DEFAULT_VDB) # undef GWW_VDB # undef MPROTECT_VDB # undef PCR_VDB @@ -3150,7 +3149,7 @@ EXTERN_C_BEGIN #endif #if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB) \ - && !defined(GWW_VDB) && !defined(DEFAULT_VDB) && !defined(MANUAL_VDB) \ + && !defined(GWW_VDB) && !defined(DEFAULT_VDB) \ && !defined(GC_DISABLE_INCREMENTAL) # define DEFAULT_VDB #endif diff --git a/mallocx.c b/mallocx.c index a318d5b9..9e93c707 100644 --- a/mallocx.c +++ b/mallocx.c @@ -311,21 +311,17 @@ GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result) DCL_LOCK_STATE; GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0); - if (!SMALL_OBJ(lb) -# ifdef MANUAL_VDB - /* Currently a single object is allocated. */ - /* TODO: GC_dirty should be called for each linked object (but */ - /* the last one) to support multiple objects allocation. */ - || GC_incremental -# endif - ) { + /* Currently a single object is always allocated if manual VDB. */ + /* TODO: GC_dirty should be called for each linked object (but */ + /* the last one) to support multiple objects allocation. */ + if (!SMALL_OBJ(lb) || GC_manual_vdb) { op = GC_generic_malloc(lb, k); if (EXPECT(0 != op, TRUE)) obj_link(op) = 0; *result = op; -# ifdef MANUAL_VDB - if (GC_is_heap_ptr(result)) { - GC_dirty(result); +# ifndef GC_DISABLE_INCREMENTAL + if (GC_manual_vdb && GC_is_heap_ptr(result)) { + GC_dirty_inner(result); REACHABLE_AFTER_DIRTY(op); } # endif diff --git a/mark.c b/mark.c index a2960dfd..ee6dc830 100644 --- a/mark.c +++ b/mark.c @@ -1287,9 +1287,10 @@ static void alloc_mark_stack(size_t n) /* Don't recycle a stack segment obtained with the wrong flags. */ /* Win32 GetWriteWatch requires the right kind of memory. */ static GC_bool GC_incremental_at_stack_alloc = FALSE; - GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc); + GC_bool recycle_old = !GC_auto_incremental + || GC_incremental_at_stack_alloc; - GC_incremental_at_stack_alloc = GC_incremental; + GC_incremental_at_stack_alloc = GC_auto_incremental; # else # define recycle_old TRUE # endif @@ -1415,7 +1416,7 @@ GC_API void GC_CALL GC_push_all(void *bottom, void *top) GC_push_selected((ptr_t)bottom, (ptr_t)top, GC_page_was_dirty); } else { # ifdef PROC_VDB - if (GC_incremental) { + if (GC_auto_incremental) { /* Pages that were never dirtied cannot contain pointers. */ GC_push_selected((ptr_t)bottom, (ptr_t)top, GC_page_was_ever_dirty); } else @@ -1598,18 +1599,18 @@ GC_API void GC_CALL GC_push_all_eager(void *bottom, void *top) GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top) { -# if defined(THREADS) && defined(MPROTECT_VDB) - GC_push_all_eager(bottom, top); -# else # ifndef NEED_FIXUP_POINTER - if (GC_all_interior_pointers) { + if (GC_all_interior_pointers +# if defined(THREADS) && defined(MPROTECT_VDB) + && !GC_auto_incremental +# endif + ) { GC_push_all(bottom, top); } else # endif /* else */ { GC_push_all_eager(bottom, top); } -# endif } #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK) diff --git a/mark_rts.c b/mark_rts.c index 541a0050..03b287f5 100644 --- a/mark_rts.c +++ b/mark_rts.c @@ -704,7 +704,7 @@ GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi, * register values are not lost. * Cold_gc_frame delimits the stack section that must be scanned * eagerly. A zero value indicates that no eager scanning is needed. - * We don't need to worry about the MANUAL_VDB case here, since this + * We don't need to worry about the manual VDB case here, since this * is only called in the single-threaded case. We assume that we * cannot collect between an assignment and the corresponding * GC_dirty() call. diff --git a/misc.c b/misc.c index c64fe2c1..7f1fbd1c 100644 --- a/misc.c +++ b/misc.c @@ -220,7 +220,11 @@ GC_API void GC_CALL GC_set_handle_fork(int value GC_ATTR_UNUSED) # elif defined(THREADS) || (defined(DARWIN) && defined(MPROTECT_VDB)) if (!GC_is_initialized && value) { # ifndef SMALL_CONFIG - GC_init(); /* just to initialize GC_stderr */ + GC_init(); /* to initialize GC_manual_vdb and GC_stderr */ +# ifndef THREADS + if (GC_manual_vdb) + return; +# endif # endif ABORT("fork() handling unsupported"); } @@ -841,6 +845,24 @@ GC_API int GC_CALL GC_is_init_called(void) void * context GC_ATTR_UNUSED) {} #endif +#ifndef SMALL_CONFIG +# ifdef MANUAL_VDB + static GC_bool manual_vdb_allowed = TRUE; +# else + static GC_bool manual_vdb_allowed = FALSE; +# endif + + GC_API void GC_CALL GC_set_manual_vdb_allowed(int value) + { + manual_vdb_allowed = (GC_bool)value; + } + + GC_API int GC_CALL GC_get_manual_vdb_allowed(void) + { + return (int)manual_vdb_allowed; + } +#endif /* !SMALL_CONFIG */ + STATIC word GC_parse_mem_size_arg(const char *str) { word result = 0; /* bad value */ @@ -1236,10 +1258,20 @@ GC_API void GC_CALL GC_init(void) # endif # ifndef GC_DISABLE_INCREMENTAL if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) { - /* For GWW_VDB on Win32, this needs to happen before any */ - /* heap memory is allocated. */ - GC_incremental = GC_dirty_init(); - GC_ASSERT(GC_bytes_allocd == 0); +# if defined(CHECKSUMS) || defined(SMALL_CONFIG) + /* TODO: Implement CHECKSUMS for manual VDB. */ +# else + if (manual_vdb_allowed) { + GC_manual_vdb = TRUE; + GC_incremental = TRUE; + } else +# endif + /* else */ { + /* For GWW_VDB on Win32, this needs to happen before any */ + /* heap memory is allocated. */ + GC_incremental = GC_dirty_init(); + GC_ASSERT(GC_bytes_allocd == 0); + } } # endif @@ -1371,7 +1403,15 @@ GC_API void GC_CALL GC_enable_incremental(void) GC_init(); LOCK(); } else { - GC_incremental = GC_dirty_init(); +# if !defined(CHECKSUMS) && !defined(SMALL_CONFIG) + if (manual_vdb_allowed) { + GC_manual_vdb = TRUE; + GC_incremental = TRUE; + } else +# endif + /* else */ { + GC_incremental = GC_dirty_init(); + } } if (GC_incremental && !GC_dont_gc) { /* Can't easily do it if GC_dont_gc. */ diff --git a/os_dep.c b/os_dep.c index 8f3db5bd..56e8a841 100644 --- a/os_dep.c +++ b/os_dep.c @@ -2797,12 +2797,12 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) * DEFAULT_VDB: A simple dummy implementation that treats every page * as possibly dirty. This makes incremental collection * useless, but the implementation is still correct. - * MANUAL_VDB: Stacks and static data are always considered dirty. + * Manual VDB: Stacks and static data are always considered dirty. * Heap pages are considered dirty if GC_dirty(p) has been * called on some pointer p pointing to somewhere inside * an object on that page. A GC_dirty() call on a large - * object directly dirties only a single page, but for - * MANUAL_VDB we are careful to treat an object with a dirty + * object directly dirties only a single page, but for the + * manual VDB we are careful to treat an object with a dirty * page as completely dirty. * In order to avoid races, an object must be marked dirty * after it is written, and a reference to the object @@ -2947,30 +2947,10 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) GC_INNER GC_bool GC_dirty_init(void) { GC_VERBOSE_LOG_PRINTF("Initializing DEFAULT_VDB...\n"); - return TRUE; - } -#endif /* DEFAULT_VDB */ - -#ifdef MANUAL_VDB - /* Initialize virtual dirty bit implementation. */ - GC_INNER GC_bool GC_dirty_init(void) - { - GC_VERBOSE_LOG_PRINTF("Initializing MANUAL_VDB...\n"); /* GC_dirty_pages and GC_grungy_pages are already cleared. */ return TRUE; } - -# define async_set_pht_entry_from_index(db, index) \ - set_pht_entry_from_index(db, index) /* for now */ - - /* Mark the page containing p as dirty. Logically, this dirties the */ - /* entire object. */ - GC_INNER void GC_dirty_inner(const void *p) - { - word index = PHT_HASH(p); - async_set_pht_entry_from_index(GC_dirty_pages, index); - } -#endif /* MANUAL_VDB */ +#endif /* DEFAULT_VDB */ #ifdef MPROTECT_VDB /* @@ -3721,33 +3701,50 @@ GC_INNER GC_bool GC_dirty_init(void) #endif /* PCR_VDB */ #ifndef GC_DISABLE_INCREMENTAL + GC_INNER GC_bool GC_manual_vdb = FALSE; + + /* Manually mark the page containing p as dirty. Logically, this */ + /* dirties the entire object. */ + GC_INNER void GC_dirty_inner(const void *p) + { + word index = PHT_HASH(p); + +# if defined(MPROTECT_VDB) + /* Do not update GC_dirty_pages if it should be followed by the */ + /* page unprotection. */ + GC_ASSERT(GC_manual_vdb); +# endif + set_pht_entry_from_index(GC_dirty_pages, index); /* FIXME: concurrent */ + } + /* Retrieve system dirty bits for the heap to a local buffer (unless */ /* output_unneeded). Restore the systems notion of which pages are */ /* dirty. We assume that either the world is stopped or it is OK to */ /* lose dirty bits while it's happening (as in GC_enable_incremental).*/ GC_INNER void GC_read_dirty(GC_bool output_unneeded) { -# if defined(MANUAL_VDB) || defined(MPROTECT_VDB) - if (!GC_GWW_AVAILABLE()) - { - if (!output_unneeded) - BCOPY((/* no volatile */ void *)GC_dirty_pages, GC_grungy_pages, - sizeof(GC_dirty_pages)); - BZERO((/* no volatile */ void *)GC_dirty_pages, + if (GC_manual_vdb +# if defined(MPROTECT_VDB) + || !GC_GWW_AVAILABLE() +# endif + ) { + if (!output_unneeded) + BCOPY((/* no volatile */ void *)GC_dirty_pages, GC_grungy_pages, sizeof(GC_dirty_pages)); -# ifdef MPROTECT_VDB + BZERO((/* no volatile */ void *)GC_dirty_pages, + sizeof(GC_dirty_pages)); +# ifdef MPROTECT_VDB + if (!GC_manual_vdb) GC_protect_heap(); -# endif - return; - } -# endif +# endif + return; + } # ifdef GWW_VDB GC_gww_read_dirty(output_unneeded); # elif defined(PROC_VDB) GC_proc_read_dirty(output_unneeded); # elif defined(PCR_VDB) - (void)output_unneeded; /* lazily enable dirty bits on newly added heap sects */ { static int onhs = 0; @@ -3762,8 +3759,6 @@ GC_INNER GC_bool GC_dirty_init(void) != PCR_ERes_okay) { ABORT("Dirty bit read failed"); } -# elif !defined(MPROTECT_VDB) - (void)output_unneeded; # endif } @@ -3774,18 +3769,19 @@ GC_INNER GC_bool GC_dirty_init(void) GC_INNER GC_bool GC_page_was_dirty(struct hblk *h) { # ifdef PCR_VDB - if ((word)h < (word)GC_vd_base - || (word)h >= (word)(GC_vd_base + NPAGES*HBLKSIZE)) { - return TRUE; + if (!GC_manual_vdb) { + if ((word)h < (word)GC_vd_base + || (word)h >= (word)(GC_vd_base + NPAGES * HBLKSIZE)) { + return TRUE; + } + return GC_grungy_bits[h-(struct hblk*)GC_vd_base] & PCR_VD_DB_dirtyBit; } - return GC_grungy_bits[h - (struct hblk*)GC_vd_base] & PCR_VD_DB_dirtyBit; # elif defined(DEFAULT_VDB) - (void)h; - return TRUE; -# else - return NULL == HDR(h) - || get_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h)); + if (!GC_manual_vdb) + return TRUE; # endif + return NULL == HDR(h) + || get_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h)); } # if defined(CHECKSUMS) || defined(PROC_VDB) @@ -3817,6 +3813,8 @@ GC_INNER GC_bool GC_dirty_init(void) { # ifdef PCR_VDB (void)is_ptrfree; + if (!GC_auto_incremental) + return; PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE); PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE); # elif defined(MPROTECT_VDB) @@ -3824,7 +3822,7 @@ GC_INNER GC_bool GC_dirty_init(void) struct hblk * h_end; /* Page boundary following block end */ struct hblk * current; - if (!GC_incremental || GC_GWW_AVAILABLE()) + if (!GC_auto_incremental || GC_GWW_AVAILABLE()) return; h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1)); h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size - 1) diff --git a/pthread_support.c b/pthread_support.c index 071e27c0..fb8db26b 100644 --- a/pthread_support.c +++ b/pthread_support.c @@ -1149,7 +1149,7 @@ static void fork_child_proc(void) { if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); # if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB) - if (GC_incremental) { + if (GC_auto_incremental) { GC_ASSERT(0 == GC_handle_fork); ABORT("Unable to fork while mprotect_thread is running"); } diff --git a/tests/disclaim_test.c b/tests/disclaim_test.c index 5176bee5..edb6d0f4 100644 --- a/tests/disclaim_test.c +++ b/tests/disclaim_test.c @@ -223,6 +223,9 @@ int main(void) # endif GC_set_all_interior_pointers(0); /* for a stricter test */ +# ifdef TEST_MANUAL_VDB + GC_set_manual_vdb_allowed(1); +# endif GC_INIT(); GC_init_finalized_malloc(); # ifndef NO_INCREMENTAL diff --git a/tests/staticrootslib.c b/tests/staticrootslib.c index c7c0725a..5275ec8b 100644 --- a/tests/staticrootslib.c +++ b/tests/staticrootslib.c @@ -52,6 +52,9 @@ static struct treenode *root_nz[10] = { (struct treenode *)(GC_word)2 }; GC_TEST_EXPORT_API void * libsrl_init(void) { +# ifdef TEST_MANUAL_VDB + GC_set_manual_vdb_allowed(1); +# endif # ifndef STATICROOTSLIB_INIT_IN_MAIN GC_INIT(); # endif diff --git a/tests/test.c b/tests/test.c index 17f61d3d..1023b842 100644 --- a/tests/test.c +++ b/tests/test.c @@ -148,6 +148,14 @@ #include +#ifdef TEST_MANUAL_VDB +# define INIT_MANUAL_VDB_ALLOWED GC_set_manual_vdb_allowed(1) +#elif !defined(SMALL_CONFIG) +# define INIT_MANUAL_VDB_ALLOWED GC_set_manual_vdb_allowed(0) +#else +# define INIT_MANUAL_VDB_ALLOWED /* empty */ +#endif + #define CHECK_GCLIB_VERSION \ if (GC_get_version() != ((GC_VERSION_MAJOR<<16) \ | (GC_VERSION_MINOR<<8) \ @@ -177,7 +185,8 @@ #endif #define GC_COND_INIT() \ - INIT_FORK_SUPPORT; GC_OPT_INIT; CHECK_GCLIB_VERSION; \ + INIT_FORK_SUPPORT; INIT_MANUAL_VDB_ALLOWED; \ + GC_OPT_INIT; CHECK_GCLIB_VERSION; \ INIT_PRINT_STATS; INIT_FIND_LEAK; INIT_PERF_MEASUREMENT #define CHECK_OUT_OF_MEMORY(p) \ @@ -1862,7 +1871,13 @@ void GC_CALLBACK warn_proc(char *msg, GC_word p) GC_enable_incremental(); # endif if (GC_is_incremental_mode()) { - GC_printf("Switched to incremental mode\n"); +# ifndef SMALL_CONFIG + if (GC_get_manual_vdb_allowed()) { + GC_printf("Switched to incremental mode (manual VDB)\n"); + } else +# endif + /* else */ { + GC_printf("Switched to incremental mode\n"); # ifdef PROC_VDB GC_printf("Reading dirty bits from /proc\n"); # elif defined(GWW_VDB) @@ -1873,6 +1888,7 @@ void GC_CALLBACK warn_proc(char *msg, GC_word p) # elif defined(MPROTECT_VDB) GC_printf("Emulating dirty bits with mprotect/signals\n"); # endif /* MPROTECT_VDB && !GWW_VDB */ + } } # endif set_print_procs(); @@ -2294,10 +2310,17 @@ int main(void) GC_enable_incremental(); # endif if (GC_is_incremental_mode()) { - GC_printf("Switched to incremental mode\n"); -# ifdef MPROTECT_VDB - GC_printf("Emulating dirty bits with mprotect/signals\n"); +# ifndef SMALL_CONFIG + if (GC_get_manual_vdb_allowed()) { + GC_printf("Switched to incremental mode (manual VDB)\n"); + } else # endif + /* else */ { + GC_printf("Switched to incremental mode\n"); +# ifdef MPROTECT_VDB + GC_printf("Emulating dirty bits with mprotect/signals\n"); +# endif + } } # endif GC_set_min_bytes_allocd(1); diff --git a/tests/test_cpp.cc b/tests/test_cpp.cc index f97be6f4..6f7efad5 100644 --- a/tests/test_cpp.cc +++ b/tests/test_cpp.cc @@ -298,6 +298,9 @@ void* Undisguise( GC_word i ) { GC_set_all_interior_pointers(1); /* needed due to C++ multiple inheritance used */ +# ifdef TEST_MANUAL_VDB + GC_set_manual_vdb_allowed(1); +# endif GC_INIT(); # ifndef NO_INCREMENTAL GC_enable_incremental(); diff --git a/win32_threads.c b/win32_threads.c index a16aa9e2..2701b11c 100644 --- a/win32_threads.c +++ b/win32_threads.c @@ -393,7 +393,7 @@ STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb, /* documentation. There is empirical evidence that it */ /* isn't. - HB */ # if defined(MPROTECT_VDB) && !defined(CYGWIN32) - if (GC_incremental + if (GC_auto_incremental # ifdef GWW_VDB && !GC_gww_dirty_init() # endif @@ -622,7 +622,8 @@ GC_API void GC_CALL GC_register_altstack(void *stack GC_ATTR_UNUSED, /* lock may be required for fault handling. */ #if defined(MPROTECT_VDB) # define UNPROTECT_THREAD(t) \ - if (!GC_win32_dll_threads && GC_incremental && t != &first_thread) { \ + if (!GC_win32_dll_threads && GC_auto_incremental \ + && t != &first_thread) { \ GC_ASSERT(SMALL_OBJ(GC_size(t))); \ GC_remove_protection(HBLKPTR(t), 1, FALSE); \ } else (void)0 -- 2.40.0