(a cherry-pick of commit
c664ef4f from 'release-7_6')
This change matters only in case of MANUAL_VDB mode.
Also, GC_reachable_here calls are inserted after GC_dirty.
Also, this commit actually disables multiple objects allocation in
GC_generic_malloc_many if MANUAL_VDB and the incremental mode are on.
* finalize.c (GC_grow_table, GC_register_disappearing_link,
GC_unregister_disappearing_link_inner,
GC_move_disappearing_link_inner,
GC_register_finalizer_inner, ITERATE_DL_HASHTBL_END,
DELETE_DL_HASHTBL_ENTRY, GC_finalize, GC_enqueue_all_finalizers): Call
GC_dirty where needed.
* gcj_mlc.c [GC_GCJ_SUPPORT] (GC_gcj_malloc, GC_debug_gcj_malloc,
GC_gcj_malloc_ignore_off_page): Likewise.
* gcj_mlc.c (GC_gcj_malloc, GC_debug_gcj_malloc,
GC_gcj_malloc_ignore_off_page): Call
REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr) after GC_dirty(op).
* mallocx.c [MANUAL_VDB] (GC_generic_malloc_many): Always fall back to
GC_generic_malloc (unless !GC_incremental).
* mallocx.c [MANUAL_VDB] (GC_generic_malloc_many): If
GC_is_heap_ptr(result) then call GC_dirty(result) and
REACHABLE_AFTER_DIRTY(op) after storing op pointer.
* pthread_start.c [GC_PTHREADS && !GC_WIN32_THREADS]
(GC_inner_start_routine): Likewise.
* pthread_support.c [GC_PTHREADS && !GC_WIN32_THREADS] (GC_new_thread,
GC_delete_thread, GC_delete_gc_thread): Likewise.
* specific.c [USE_CUSTOM_SPECIFIC] (GC_setspecific,
GC_remove_specific_after_fork): Likewise.
* typd_mlc.c (GC_make_sequence_descriptor, GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page, GC_calloc_explicitly_typed):
Likewise.
* win32_threads.c (GC_new_thread, GC_delete_gc_thread_no_free,
GC_delete_thread, GC_CreateThread): Likewise.
* win32_threads.c [!CYGWIN32 && !MSWINCE && !MSWIN_XBOX1]
(GC_beginthreadex): Likewise.
* win32_threads.c [GC_PTHREADS] (GC_pthread_create,
GC_pthread_start_inner): Likewise.
* typd_mlc.c (GC_make_sequence_descriptor): Call REACHABLE_AFTER_DIRTY
for the stored pointers after GC_dirty(result).
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page, GC_calloc_explicitly_typed):
Call REACHABLE_AFTER_DIRTY(d) after GC_dirty(op).
* win32_threads.c (GC_CreateThread, GC_beginthreadex,
GC_pthread_create): Call REACHABLE_AFTER_DIRTY for the stored pointer
after GC_dirty.
* include/gc_inline.h (GC_FAST_MALLOC_GRANS): Call
GC_end_stubborn_change(my_fl) after GC_FAST_M_AO_STORE() call unless
kind is GC_I_PTRFREE.
* include/gc_inline.h (GC_FAST_MALLOC_GRANS): Call
GC_reachable_here(next) after GC_end_stubborn_change(my_fl).
* include/gc_inline.h (GC_CONS): Call GC_end_stubborn_change(result).
* include/gc_inline.h (GC_CONS): Call GC_reachable_here for the stored
pointers after GC_end_stubborn_change call.
* include/gc_inline.h (GC_CONS): Declare l and r local variables;
compute first and second expression even in case of
GC_MALLOC_WORDS_KIND failure; pass l and r to GC_reachable_here (instead
of first and second).
* include/private/gc_priv.h (REACHABLE_AFTER_DIRTY): New macro.
size_t new_hash = HASH3(real_key, new_size, log_new_size);
p -> next = new_table[new_hash];
+ GC_dirty(p);
new_table[new_hash] = p;
p = next;
}
}
*log_size_ptr = log_new_size;
*table = new_table;
+ GC_dirty(new_table); /* entire object */
}
GC_API int GC_CALL GC_register_disappearing_link(void * * link)
new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
dl_set_next(new_dl, dl_hashtbl -> head[index]);
+ GC_dirty(new_dl);
dl_hashtbl -> head[index] = new_dl;
dl_hashtbl -> entries++;
+ GC_dirty(dl_hashtbl->head + index);
UNLOCK();
return GC_SUCCESS;
}
/* Remove found entry from the table. */
if (NULL == prev_dl) {
dl_hashtbl -> head[index] = dl_next(curr_dl);
+ GC_dirty(dl_hashtbl->head + index);
} else {
dl_set_next(prev_dl, dl_next(curr_dl));
+ GC_dirty(prev_dl);
}
dl_hashtbl -> entries--;
break;
dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
} else {
dl_set_next(prev_dl, dl_next(curr_dl));
+ GC_dirty(prev_dl);
}
curr_dl -> dl_hidden_link = new_hidden_link;
dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
dl_hashtbl -> head[new_index] = curr_dl;
+ GC_dirty(curr_dl);
+ GC_dirty(dl_hashtbl->head); /* entire object */
return GC_SUCCESS;
}
GC_fo_head[index] = fo_next(curr_fo);
} else {
fo_set_next(prev_fo, fo_next(curr_fo));
+ GC_dirty(prev_fo);
}
if (fn == 0) {
GC_fo_entries--;
curr_fo -> fo_fn = fn;
curr_fo -> fo_client_data = (ptr_t)cd;
curr_fo -> fo_mark_proc = mp;
+ GC_dirty(curr_fo);
/* Reinsert it. We deleted it first to maintain */
/* consistency in the event of a signal. */
if (prev_fo == 0) {
GC_fo_head[index] = curr_fo;
} else {
fo_set_next(prev_fo, curr_fo);
+ GC_dirty(prev_fo);
}
}
+ if (NULL == prev_fo)
+ GC_dirty(GC_fo_head + index);
UNLOCK();
# ifndef DBG_HDRS_ALL
if (EXPECT(new_fo != 0, FALSE)) {
new_fo -> fo_object_size = hhdr -> hb_sz;
new_fo -> fo_mark_proc = mp;
fo_set_next(new_fo, GC_fo_head[index]);
+ GC_dirty(new_fo);
GC_fo_entries++;
GC_fo_head[index] = new_fo;
+ GC_dirty(GC_fo_head + index);
UNLOCK();
}
size_t i; \
size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
(size_t)1 << dl_hashtbl->log_size; \
+ GC_bool needs_barrier = FALSE; \
for (i = 0; i < dl_size; i++) { \
curr_dl = dl_hashtbl -> head[i]; \
prev_dl = NULL; \
curr_dl = dl_next(curr_dl); \
} \
} \
+ if (needs_barrier) \
+ GC_dirty(dl_hashtbl -> head); /* entire object */ \
}
#define DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr_dl, prev_dl, next_dl) \
next_dl = dl_next(curr_dl); \
if (NULL == prev_dl) { \
dl_hashtbl -> head[i] = next_dl; \
+ needs_barrier = TRUE; \
} else { \
dl_set_next(prev_dl, next_dl); \
+ GC_dirty(prev_dl); \
} \
GC_clear_mark_bit(curr_dl); \
dl_hashtbl -> entries--; \
size_t i;
size_t fo_size = log_fo_table_size == -1 ? 0 :
(size_t)1 << log_fo_table_size;
+ GC_bool needs_barrier = FALSE;
# ifndef SMALL_CONFIG
/* Save current GC_[dl/ll]_entries value for stats printing */
next_fo = fo_next(curr_fo);
if (prev_fo == 0) {
GC_fo_head[i] = next_fo;
+ needs_barrier = TRUE;
} else {
fo_set_next(prev_fo, next_fo);
+ GC_dirty(prev_fo);
}
GC_fo_entries--;
/* Add to list of objects awaiting finalization. */
fo_set_next(curr_fo, GC_finalize_now);
+ GC_dirty(curr_fo);
GC_finalize_now = curr_fo;
/* unhide object pointer so any future collections will */
/* see it. */
if (!GC_is_marked(real_ptr)) {
GC_set_mark_bit(real_ptr);
} else {
- if (prev_fo == 0)
+ if (prev_fo == 0) {
GC_finalize_now = next_fo;
- else
+ } else {
fo_set_next(prev_fo, next_fo);
-
+ GC_dirty(prev_fo);
+ }
curr_fo -> fo_hidden_base =
GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
GC_bytes_finalized -=
curr_fo->fo_object_size + sizeof(struct finalizable_object);
i = HASH2(real_ptr, log_fo_table_size);
- fo_set_next (curr_fo, GC_fo_head[i]);
+ fo_set_next(curr_fo, GC_fo_head[i]);
+ GC_dirty(curr_fo);
GC_fo_entries++;
GC_fo_head[i] = curr_fo;
curr_fo = prev_fo;
+ needs_barrier = TRUE;
}
}
prev_fo = curr_fo;
}
}
}
+ if (needs_barrier)
+ GC_dirty(GC_fo_head); /* entire object */
GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
# ifndef GC_LONG_REFS_NOT_NEEDED
/* Add to list of objects awaiting finalization. */
fo_set_next(curr_fo, GC_finalize_now);
+ GC_dirty(curr_fo);
GC_finalize_now = curr_fo;
/* unhide object pointer so any future collections will */
}
*(void **)op = ptr_to_struct_containing_descr;
UNLOCK();
- return((void *) op);
+ GC_dirty(op);
+ REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr);
+ return (void *)op;
}
/* Similar to GC_gcj_malloc, but add debug info. This is allocated */
ADD_CALL_CHAIN(result, ra);
result = GC_store_debug_info_inner(result, (word)lb, s, i);
UNLOCK();
+ GC_dirty(result);
+ REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr);
return result;
}
}
*(void **)op = ptr_to_struct_containing_descr;
UNLOCK();
- return((void *) op);
+ GC_dirty(op);
+ REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr);
+ return (void *)op;
}
#endif /* GC_GCJ_SUPPORT */
*my_fl = next; \
init; \
GC_PREFETCH_FOR_WRITE(next); \
+ if ((kind) != GC_I_PTRFREE) { \
+ GC_end_stubborn_change(my_fl); \
+ GC_reachable_here(next); \
+ } \
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
GC_ASSERT((kind) == GC_I_PTRFREE \
|| ((GC_word *)result)[1] == 0); \
# define GC_CONS(result, first, second, tiny_fl) \
do { \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(2); \
+ void *l = (void *)(first); \
+ void *r = (void *)(second); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, GC_I_NORMAL, \
GC_malloc(grans * GC_GRANULE_BYTES), \
(void)0); \
if ((result) != NULL) { \
- *(void **)(result) = (void *)(first); \
- ((void **)(result))[1] = (void *)(second); \
+ *(void **)(result) = l; \
+ ((void **)(result))[1] = r; \
+ GC_end_stubborn_change(result); \
+ GC_reachable_here(l); \
+ GC_reachable_here(r); \
} \
} while (0)
#ifdef MANUAL_VDB
GC_INNER void GC_dirty_inner(const void *p); /* does not require locking */
# define GC_dirty(p) (GC_incremental ? GC_dirty_inner(p) : (void)0)
+# define REACHABLE_AFTER_DIRTY(p) GC_reachable_here(p)
#else
# define GC_dirty(p) (void)(p)
+# define REACHABLE_AFTER_DIRTY(p) (void)(p)
#endif
/* Same as GC_base but excepts and returns a pointer to const object. */
DCL_LOCK_STATE;
GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
- if (!SMALL_OBJ(lb)) {
+ if (!SMALL_OBJ(lb)
+# ifdef MANUAL_VDB
+ /* Currently a single object is allocated. */
+ /* TODO: GC_dirty should be called for each linked object (but */
+ /* the last one) to support multiple objects allocation. */
+ || GC_incremental
+# endif
+ ) {
op = GC_generic_malloc(lb, k);
if (EXPECT(0 != op, TRUE))
obj_link(op) = 0;
*result = op;
+# ifdef MANUAL_VDB
+ if (GC_is_heap_ptr(result)) {
+ GC_dirty(result);
+ REACHABLE_AFTER_DIRTY(op);
+ }
+# endif
return;
}
lw = BYTES_TO_WORDS(lb);
GC_log_printf("Finishing thread %p\n", (void *)pthread_self());
# endif
me -> status = result;
+ GC_dirty(me);
# ifndef NACL
pthread_cleanup_pop(1);
/* Cleanup acquires lock, ensuring that we can't exit while */
GC_nacl_initialize_gc_thread();
# endif
GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
+ if (EXPECT(result != &first_thread, TRUE))
+ GC_dirty(result);
return(result);
}
GC_threads[hv] = p -> next;
} else {
prev -> next = p -> next;
+ GC_dirty(prev);
}
if (p != &first_thread) {
# ifdef GC_DARWIN_THREADS
GC_threads[hv] = p -> next;
} else {
prev -> next = p -> next;
+ GC_dirty(prev);
}
# ifdef GC_DARWIN_THREADS
mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
/* There can only be one writer at a time, but this needs to be */
/* atomic with respect to concurrent readers. */
AO_store_release(&key->hash[hash_val].ao, (AO_t)entry);
+ GC_dirty((/* no volatile */ void *)entry);
+ GC_dirty(key->hash + hash_val);
pthread_mutex_unlock(&(key -> lock));
return 0;
}
entry -> qtid = INVALID_QTID;
if (NULL == prev) {
key->hash[hash_val].p = entry->next;
+ GC_dirty(key->hash + hash_val);
} else {
prev->next = entry->next;
+ GC_dirty(prev);
}
/* Atomic! concurrent accesses still work. */
/* They must, since readers don't lock. */
result -> sd_tag = SEQUENCE_TAG;
result -> sd_first = first;
result -> sd_second = second;
+ GC_dirty(result);
+ REACHABLE_AFTER_DIRTY(first);
+ REACHABLE_AFTER_DIRTY(second);
}
return((complex_descriptor *)result);
}
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
}
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
- } else {
- op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
- if (op != NULL) {
- lg = BYTES_TO_GRANULES(GC_size(op));
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
- }
- }
- return((void *) op);
+ } else {
+ op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
+ if (NULL == op)
+ return NULL;
+ lg = BYTES_TO_GRANULES(GC_size(op));
+ }
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ GC_dirty(op + GRANULES_TO_WORDS(lg) - 1);
+ REACHABLE_AFTER_DIRTY(d);
+ return((void *) op);
}
GC_API GC_ATTR_MALLOC void * GC_CALL
lg = BYTES_TO_GRANULES(GC_size(op));
}
((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ GC_dirty(op + GRANULES_TO_WORDS(lg) - 1);
+ REACHABLE_AFTER_DIRTY(d);
return op;
}
size_t lw = GRANULES_TO_WORDS(lg);
((word *)op)[lw - 1] = (word)complex_descr;
+ GC_dirty(op + lw - 1);
+ REACHABLE_AFTER_DIRTY(complex_descr);
/* Make sure the descriptor is cleared once there is any danger */
/* it may have been collected. */
if (GC_general_register_disappearing_link((void * *)((word *)op+lw-1),
GC_ASSERT(result -> flags == 0);
# endif
GC_ASSERT(result -> thread_blocked_sp == NULL);
+ if (EXPECT(result != &first_thread, TRUE))
+ GC_dirty(result);
return(result);
}
GC_threads[hv] = p -> tm.next;
} else {
prev -> tm.next = p -> tm.next;
+ GC_dirty(prev);
}
}
}
GC_threads[hv] = p -> tm.next;
} else {
prev -> tm.next = p -> tm.next;
+ GC_dirty(prev);
}
if (p != &first_thread) {
GC_INTERNAL_FREE(p);
/* set up thread arguments */
args -> start = lpStartAddress;
args -> param = lpParameter;
+ GC_dirty(args);
+ REACHABLE_AFTER_DIRTY(lpParameter);
GC_need_to_lock = TRUE;
thread_h = CreateThread(lpThreadAttributes, dwStackSize, GC_win32_start,
/* set up thread arguments */
args -> start = (LPTHREAD_START_ROUTINE)start_address;
args -> param = arglist;
+ GC_dirty(args);
+ REACHABLE_AFTER_DIRTY(arglist);
GC_need_to_lock = TRUE;
thread_h = _beginthreadex(security, stack_size,
si -> start_routine = start_routine;
si -> arg = arg;
+ GC_dirty(si);
+ REACHABLE_AFTER_DIRTY(arg);
if (attr != 0 &&
pthread_attr_getdetachstate(attr, &si->detached)
== PTHREAD_CREATE_DETACHED) {
pthread_cleanup_push(GC_thread_exit_proc, (void *)me);
result = (*start)(start_arg);
me -> status = result;
+ GC_dirty(me);
pthread_cleanup_pop(1);
# ifdef DEBUG_THREADS