static void push_in_progress(ptr_t p)
{
if (n_in_progress >= in_progress_size) {
- if (in_progress_size == 0) {
+ ptr_t * new_in_progress_space;
+
+ if (NULL == in_progress_space) {
in_progress_size = ROUNDUP_PAGESIZE_IF_MMAP(INITIAL_IN_PROGRESS
* sizeof(ptr_t))
/ sizeof(ptr_t);
- in_progress_space = (ptr_t *)GET_MEM(in_progress_size * sizeof(ptr_t));
- GC_add_to_our_memory((ptr_t)in_progress_space,
- in_progress_size * sizeof(ptr_t));
+ new_in_progress_space =
+ (ptr_t *)GET_MEM(in_progress_size * sizeof(ptr_t));
} else {
- ptr_t * new_in_progress_space;
in_progress_size *= 2;
new_in_progress_space = (ptr_t *)
GET_MEM(in_progress_size * sizeof(ptr_t));
- GC_add_to_our_memory((ptr_t)new_in_progress_space,
- in_progress_size * sizeof(ptr_t));
if (new_in_progress_space != NULL)
BCOPY(in_progress_space, new_in_progress_space,
n_in_progress * sizeof(ptr_t));
- in_progress_space = new_in_progress_space;
- /* FIXME: This just drops the old space. */
}
+ GC_add_to_our_memory((ptr_t)new_in_progress_space,
+ in_progress_size * sizeof(ptr_t));
+# ifndef GWW_VDB
+ GC_scratch_recycle_no_gww(in_progress_space,
+ n_in_progress * sizeof(ptr_t));
+# elif defined(LINT2)
+ /* TODO: implement GWW-aware recycling as in alloc_mark_stack */
+ GC_noop1((word)in_progress_space);
+# endif
+ in_progress_space = new_in_progress_space;
}
if (in_progress_space == 0)
ABORT("MAKE_BACK_GRAPH: Out of in-progress space: "
": fd = %d, errno = %d", fd, errno);
}
if (needed_sz >= current_sz) {
+ GC_scratch_recycle_no_gww(addr_map,
+ (size_t)current_sz * sizeof(prmap_t));
current_sz = needed_sz * 2 + 1;
/* Expansion, plus room for 0 record */
addr_map = (prmap_t *)GC_scratch_alloc(
/* small objects. Deallocation is not */
/* possible. May return NULL. */
+#ifdef GWW_VDB
+ /* GC_scratch_recycle_no_gww() not used. */
+#else
+# define GC_scratch_recycle_no_gww GC_scratch_recycle_inner
+#endif
+GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes);
+ /* Reuse the memory region by the heap. */
+
/* Heap block layout maps: */
GC_INNER GC_bool GC_add_map_entry(size_t sz);
/* Add a heap block map for objects of */
#endif /* PARALLEL_MARK */
+GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes)
+{
+ if (ptr != NULL) {
+ size_t page_offset = (word)ptr & (GC_page_size - 1);
+ size_t displ = 0;
+ size_t recycled_bytes;
+
+ GC_ASSERT(bytes != 0);
+ GC_ASSERT(GC_page_size != 0);
+ /* TODO: Assert correct memory flags if GWW_VDB */
+ if (page_offset != 0)
+ displ = GC_page_size - page_offset;
+ recycled_bytes = (bytes - displ) & ~(GC_page_size - 1);
+ GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
+ (unsigned long)recycled_bytes, (unsigned long)bytes,
+ ptr);
+ if (recycled_bytes > 0)
+ GC_add_to_heap((struct hblk *)((word)ptr + displ), recycled_bytes);
+ }
+}
+
/* Allocate or reallocate space for mark stack of size n entries. */
/* May silently fail. */
static void alloc_mark_stack(size_t n)
if (new_stack != 0) {
if (recycle_old) {
/* Recycle old space */
- size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);
- size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
- size_t displ = 0;
-
- if (0 != page_offset) displ = GC_page_size - page_offset;
- size = (size - displ) & ~(GC_page_size - 1);
- if (size > 0) {
- GC_add_to_heap((struct hblk *)
- ((word)GC_mark_stack + displ), (word)size);
- }
+ GC_scratch_recycle_inner(GC_mark_stack,
+ GC_mark_stack_size * sizeof(struct GC_ms_entry));
}
GC_mark_stack = new_stack;
GC_mark_stack_size = n;
int f;
while (maps_size >= maps_buf_sz) {
+ GC_scratch_recycle_no_gww(maps_buf, maps_buf_sz);
/* Grow only by powers of 2, since we leak "too small" buffers.*/
while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
maps_buf = GC_scratch_alloc(maps_buf_sz);
(signed_word)GC_proc_buf_size);
new_buf = GC_scratch_alloc(new_size);
if (new_buf != 0) {
+ GC_scratch_recycle_no_gww(bufp, GC_proc_buf_size);
GC_proc_buf = bufp = new_buf;
GC_proc_buf_size = new_size;
}