2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 * Copyright (c) 2008-2019 Ivan Maidanski
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
19 #include "private/gc_priv.h"
22 #if !defined(MACOS) && !defined(MSWINCE)
24 # if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2) \
26 # include <sys/types.h>
31 * Separate free lists are maintained for different sized objects
33 * The call GC_allocobj(i,k) ensures that the freelist for
34 * kind k objects of size i points to a non-empty
35 * free list. It returns a pointer to the first entry on the free list.
36 * In a single-threaded world, GC_allocobj may be called to allocate
37 * an object of small size lb (and NORMAL kind) as follows
38 * (GC_generic_malloc_inner is a wrapper over GC_allocobj which also
39 * fills in GC_size_map if needed):
41 * lg = GC_size_map[lb];
42 * op = GC_objfreelist[lg];
44 * op = GC_generic_malloc_inner(lb, NORMAL);
46 * GC_objfreelist[lg] = obj_link(op);
47 * GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
50 * Note that this is very fast if the free list is non-empty; it should
51 * only involve the execution of 4 or 5 simple instructions.
52 * All composite objects on freelists are cleared, except for
57 * The allocator uses GC_allochblk to allocate large chunks of objects.
58 * These chunks all start on addresses which are multiples of
59 * HBLKSZ. Each allocated chunk has an associated header,
60 * which can be located quickly based on the address of the chunk.
61 * (See headers.c for details.)
62 * This makes it possible to check quickly whether an
63 * arbitrary address corresponds to an object administered by the
67 word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
72 static unsigned long full_gc_total_time = 0; /* in ms, may wrap */
73 static GC_bool measure_performance = FALSE;
74 /* Do performance measurements if set to true (e.g., */
75 /* accumulation of the total time of full collections). */
77 GC_API void GC_CALL GC_start_performance_measurement(void)
79 measure_performance = TRUE;
82 GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void)
84 return full_gc_total_time;
86 #endif /* !NO_CLOCK */
88 #ifndef GC_DISABLE_INCREMENTAL
89 GC_INNER GC_bool GC_incremental = FALSE; /* By default, stop the world. */
92 GC_API int GC_CALL GC_is_incremental_mode(void)
94 return (int)GC_incremental;
98 int GC_parallel = FALSE; /* By default, parallel GC is off. */
101 #if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
102 int GC_full_freq = GC_FULL_FREQ;
104 int GC_full_freq = 19; /* Every 20th collection is a full */
105 /* collection, whether we need it */
109 STATIC GC_bool GC_need_full_gc = FALSE;
110 /* Need full GC do to heap growth. */
112 #ifdef THREAD_LOCAL_ALLOC
113 GC_INNER GC_bool GC_world_stopped = FALSE;
116 STATIC word GC_used_heap_size_after_full = 0;
118 /* GC_copyright symbol is externally visible. */
120 extern const char * const GC_copyright[];
122 const char * const GC_copyright[] =
123 {"Copyright 1988, 1989 Hans-J. Boehm and Alan J. Demers ",
124 "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
125 "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
126 "Copyright (c) 1999-2009 by Hewlett-Packard Company. All rights reserved. ",
127 "Copyright (c) 2008-2019 Ivan Maidanski ",
128 "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
129 " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
130 "See source code for details." };
132 /* Version macros are now defined in gc_version.h, which is included by */
133 /* gc.h, which is included by gc_priv.h. */
134 #ifndef GC_NO_VERSION_VAR
136 extern const unsigned GC_version;
138 const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
139 (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
142 GC_API unsigned GC_CALL GC_get_version(void)
144 return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
148 /* some more variables */
150 #ifdef GC_DONT_EXPAND
151 int GC_dont_expand = TRUE;
153 int GC_dont_expand = FALSE;
156 #if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
157 word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /* must be > 0 */
159 word GC_free_space_divisor = 3;
162 GC_INNER int GC_CALLBACK GC_never_stop_func(void)
167 #if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
168 unsigned long GC_time_limit = GC_TIME_LIMIT;
169 /* We try to keep pause times from exceeding */
170 /* this by much. In milliseconds. */
171 #elif defined(PARALLEL_MARK)
172 unsigned long GC_time_limit = GC_TIME_UNLIMITED;
173 /* The parallel marker cannot be interrupted for */
174 /* now, so the time limit is absent by default. */
176 unsigned long GC_time_limit = 50;
180 STATIC unsigned long GC_time_lim_nsec = 0;
181 /* The nanoseconds add-on to GC_time_limit */
182 /* value. Not updated by GC_set_time_limit(). */
183 /* Ignored if the value of GC_time_limit is */
184 /* GC_TIME_UNLIMITED. */
186 # define TV_NSEC_LIMIT (1000UL * 1000) /* amount of nanoseconds in 1 ms */
188 GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s tv)
190 GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
191 GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
192 GC_time_limit = tv.tv_ms;
193 GC_time_lim_nsec = tv.tv_nsec;
196 GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void)
198 struct GC_timeval_s tv;
200 tv.tv_ms = GC_time_limit;
201 tv.tv_nsec = GC_time_lim_nsec;
205 STATIC CLOCK_TYPE GC_start_time = CLOCK_TYPE_INITIALIZER;
206 /* Time at which we stopped world. */
207 /* used only in GC_timeout_stop_func. */
208 #endif /* !NO_CLOCK */
210 STATIC int GC_n_attempts = 0; /* Number of attempts at finishing */
211 /* collection within GC_time_limit. */
213 STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
214 /* accessed holding the lock. */
216 GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
219 GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
221 GC_default_stop_func = stop_func;
225 GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
227 GC_stop_func stop_func;
230 stop_func = GC_default_stop_func;
235 #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
236 # define GC_timeout_stop_func GC_default_stop_func
238 STATIC int GC_CALLBACK GC_timeout_stop_func (void)
240 CLOCK_TYPE current_time;
241 static unsigned count = 0;
242 unsigned long time_diff, nsec_diff;
244 if ((*GC_default_stop_func)())
247 if ((count++ & 3) != 0) return(0);
248 GET_TIME(current_time);
249 time_diff = MS_TIME_DIFF(current_time,GC_start_time);
250 nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
251 if (time_diff >= GC_time_limit
252 && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
253 GC_COND_LOG_PRINTF("Abandoning stopped marking after %lu ms %lu ns"
255 time_diff, nsec_diff, GC_n_attempts);
260 #endif /* !GC_DISABLE_INCREMENTAL */
263 GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
266 static size_t min_bytes_allocd_minimum = 1;
267 /* The lowest value returned by min_bytes_allocd(). */
269 GC_API void GC_CALL GC_set_min_bytes_allocd(size_t value)
271 GC_ASSERT(value > 0);
272 min_bytes_allocd_minimum = value;
275 GC_API size_t GC_CALL GC_get_min_bytes_allocd(void)
277 return min_bytes_allocd_minimum;
280 /* Return the minimum number of bytes that must be allocated between */
281 /* collections to amortize the collection cost. Should be non-zero. */
282 static word min_bytes_allocd(void)
286 word total_root_size; /* includes double stack size, */
287 /* since the stack is expensive */
289 word scan_size; /* Estimate of memory to be scanned */
290 /* during normal GC. */
293 if (GC_need_to_lock) {
294 /* We are multi-threaded... */
295 stack_size = GC_total_stacksize;
296 /* For now, we just use the value computed during the latest GC. */
297 # ifdef DEBUG_THREADS
298 GC_log_printf("Total stacks size: %lu\n",
299 (unsigned long)stack_size);
304 # ifdef STACK_NOT_SCANNED
306 # elif defined(STACK_GROWS_UP)
307 stack_size = GC_approx_sp() - GC_stackbottom;
309 stack_size = GC_stackbottom - GC_approx_sp();
313 total_root_size = 2 * stack_size + GC_root_size;
314 scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
316 result = scan_size / GC_free_space_divisor;
317 if (GC_incremental) {
320 return result > min_bytes_allocd_minimum
321 ? result : min_bytes_allocd_minimum;
324 STATIC word GC_non_gc_bytes_at_gc = 0;
325 /* Number of explicitly managed bytes of storage */
326 /* at last collection. */
328 /* Return the number of bytes allocated, adjusted for explicit storage */
329 /* management, etc.. This number is used in deciding when to trigger */
331 STATIC word GC_adj_bytes_allocd(void)
334 signed_word expl_managed = (signed_word)GC_non_gc_bytes
335 - (signed_word)GC_non_gc_bytes_at_gc;
337 /* Don't count what was explicitly freed, or newly allocated for */
338 /* explicit management. Note that deallocating an explicitly */
339 /* managed object should not alter result, assuming the client */
340 /* is playing by the rules. */
341 result = (signed_word)GC_bytes_allocd
342 + (signed_word)GC_bytes_dropped
343 - (signed_word)GC_bytes_freed
344 + (signed_word)GC_finalizer_bytes_freed
346 if (result > (signed_word)GC_bytes_allocd) {
347 result = GC_bytes_allocd;
348 /* probably client bug or unfortunate scheduling */
350 result += GC_bytes_finalized;
351 /* We count objects enqueued for finalization as though they */
352 /* had been reallocated this round. Finalization is user */
353 /* visible progress. And if we don't count this, we have */
354 /* stability problems for programs that finalize all objects. */
355 if (result < (signed_word)(GC_bytes_allocd >> 3)) {
356 /* Always count at least 1/8 of the allocations. We don't want */
357 /* to collect too infrequently, since that would inhibit */
358 /* coalescing of free storage blocks. */
359 /* This also makes us partially robust against client bugs. */
360 return(GC_bytes_allocd >> 3);
367 /* Clear up a few frames worth of garbage left at the top of the stack. */
368 /* This is used to prevent us from accidentally treating garbage left */
369 /* on the stack by other parts of the collector as roots. This */
370 /* differs from the code in misc.c, which actually tries to keep the */
371 /* stack clear of long-lived, client-generated garbage. */
372 STATIC void GC_clear_a_few_frames(void)
374 # ifndef CLEAR_NWORDS
375 # define CLEAR_NWORDS 64
377 volatile word frames[CLEAR_NWORDS];
378 BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
381 /* Heap size at which we need a collection to avoid expanding past */
382 /* limits used by blacklisting. */
383 STATIC word GC_collect_at_heapsize = GC_WORD_MAX;
385 /* Have we allocated enough to amortize a collection? */
386 GC_INNER GC_bool GC_should_collect(void)
388 static word last_min_bytes_allocd;
389 static word last_gc_no;
390 if (last_gc_no != GC_gc_no) {
391 last_min_bytes_allocd = min_bytes_allocd();
392 last_gc_no = GC_gc_no;
394 return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
395 || GC_heapsize >= GC_collect_at_heapsize);
398 /* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
399 /* Called at start of full collections. */
400 /* Not called if 0. Called with the allocation */
401 /* lock held. Not used by GC itself. */
403 GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
407 GC_start_call_back = fn;
411 GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
413 GC_start_callback_proc fn;
416 fn = GC_start_call_back;
421 GC_INLINE void GC_notify_full_gc(void)
423 if (GC_start_call_back != 0) {
424 (*GC_start_call_back)();
428 STATIC GC_bool GC_is_full_gc = FALSE;
430 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
431 STATIC void GC_finish_collection(void);
434 * Initiate a garbage collection if appropriate.
436 * between partial, full, and stop-world collections.
438 STATIC void GC_maybe_gc(void)
440 GC_ASSERT(I_HOLD_LOCK());
441 ASSERT_CANCEL_DISABLED();
442 if (GC_should_collect()) {
443 static int n_partial_gcs = 0;
445 if (!GC_incremental) {
446 /* TODO: If possible, GC_default_stop_func should be used here */
447 GC_try_to_collect_inner(GC_never_stop_func);
451 # ifdef PARALLEL_MARK
453 GC_wait_for_reclaim();
455 if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
457 "***>Full mark for collection #%lu after %lu allocd bytes\n",
458 (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
459 GC_promote_black_lists();
460 (void)GC_reclaim_all((GC_stop_func)0, TRUE);
464 GC_is_full_gc = TRUE;
469 /* We try to mark with the world stopped. */
470 /* If we run out of time, this turns into */
471 /* incremental marking. */
473 if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
475 /* TODO: If possible, GC_default_stop_func should be */
476 /* used instead of GC_never_stop_func here. */
477 if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
478 GC_never_stop_func : GC_timeout_stop_func)) {
479 # ifdef SAVE_CALL_CHAIN
480 GC_save_callers(GC_last_stack);
482 GC_finish_collection();
484 if (!GC_is_full_gc) {
485 /* Count this as the first attempt */
492 STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
494 GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc fn)
496 /* fn may be 0 (means no event notifier). */
499 GC_on_collection_event = fn;
503 GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void)
505 GC_on_collection_event_proc fn;
508 fn = GC_on_collection_event;
513 /* Stop the world garbage collection. If stop_func is not */
514 /* GC_never_stop_func then abort if stop_func returns TRUE. */
515 /* Return TRUE if we successfully completed the collection. */
516 GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
519 CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
520 GC_bool start_time_valid;
523 ASSERT_CANCEL_DISABLED();
524 GC_ASSERT(I_HOLD_LOCK());
525 if (GC_dont_gc || (*stop_func)()) return FALSE;
526 if (GC_on_collection_event)
527 GC_on_collection_event(GC_EVENT_START);
528 if (GC_incremental && GC_collection_in_progress()) {
530 "GC_try_to_collect_inner: finishing collection in progress\n");
531 /* Just finish collection already in progress. */
532 while(GC_collection_in_progress()) {
533 if ((*stop_func)()) {
534 /* TODO: Notify GC_EVENT_ABANDON */
537 GC_collect_a_little_inner(1);
542 start_time_valid = FALSE;
543 if ((GC_print_stats | (int)measure_performance) != 0) {
545 GC_log_printf("Initiating full world-stop collection!\n");
546 start_time_valid = TRUE;
547 GET_TIME(start_time);
550 GC_promote_black_lists();
551 /* Make sure all blocks have been reclaimed, so sweep routines */
552 /* don't see cleared mark bits. */
553 /* If we're guaranteed to finish, then this is unnecessary. */
554 /* In the find_leak case, we have to finish to guarantee that */
555 /* previously unmarked objects are not reported as leaks. */
556 # ifdef PARALLEL_MARK
558 GC_wait_for_reclaim();
560 if ((GC_find_leak || stop_func != GC_never_stop_func)
561 && !GC_reclaim_all(stop_func, FALSE)) {
562 /* Aborted. So far everything is still consistent. */
563 /* TODO: Notify GC_EVENT_ABANDON */
566 GC_invalidate_mark_state(); /* Flush mark stack. */
568 # ifdef SAVE_CALL_CHAIN
569 GC_save_callers(GC_last_stack);
571 GC_is_full_gc = TRUE;
572 if (!GC_stopped_mark(stop_func)) {
573 if (!GC_incremental) {
574 /* We're partially done and have no way to complete or use */
575 /* current work. Reestablish invariants as cheaply as */
577 GC_invalidate_mark_state();
578 GC_unpromote_black_lists();
579 } /* else we claim the world is already still consistent. We'll */
580 /* finish incrementally. */
581 /* TODO: Notify GC_EVENT_ABANDON */
584 GC_finish_collection();
586 if (start_time_valid) {
587 CLOCK_TYPE current_time;
588 unsigned long time_diff;
590 GET_TIME(current_time);
591 time_diff = MS_TIME_DIFF(current_time, start_time);
592 if (measure_performance)
593 full_gc_total_time += time_diff; /* may wrap */
595 GC_log_printf("Complete collection took %lu ms %lu ns\n", time_diff,
596 NS_FRAC_TIME_DIFF(current_time, start_time));
599 if (GC_on_collection_event)
600 GC_on_collection_event(GC_EVENT_END);
605 * Perform n units of garbage collection work. A unit is intended to touch
606 * roughly GC_rate pages. Every once in a while, we do more than that.
607 * This needs to be a fairly large number with our current incremental
608 * GC strategy, since otherwise we allocate too much during GC, and the
609 * cleanup gets expensive.
615 #ifndef MAX_PRIOR_ATTEMPTS
616 # define MAX_PRIOR_ATTEMPTS 1
618 /* Maximum number of prior attempts at world stop marking */
619 /* A value of 1 means that we finish the second time, no matter */
620 /* how long it takes. Doesn't count the initial root scan */
623 STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */
624 /* that we have made. */
626 STATIC int GC_rate = GC_RATE;
628 GC_API void GC_CALL GC_set_rate(int value)
630 GC_ASSERT(value > 0);
634 GC_API int GC_CALL GC_get_rate(void)
639 static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
641 GC_API void GC_CALL GC_set_max_prior_attempts(int value)
643 GC_ASSERT(value >= 0);
644 max_prior_attempts = value;
647 GC_API int GC_CALL GC_get_max_prior_attempts(void)
649 return max_prior_attempts;
652 GC_INNER void GC_collect_a_little_inner(int n)
654 IF_CANCEL(int cancel_state;)
656 GC_ASSERT(I_HOLD_LOCK());
657 if (GC_dont_gc) return;
659 DISABLE_CANCEL(cancel_state);
660 if (GC_incremental && GC_collection_in_progress()) {
662 int max_deficit = GC_rate * n;
664 # ifdef PARALLEL_MARK
665 if (GC_time_limit != GC_TIME_UNLIMITED)
666 GC_parallel_mark_disabled = TRUE;
668 for (i = GC_deficit; i < max_deficit; i++) {
669 if (GC_mark_some(NULL))
672 # ifdef PARALLEL_MARK
673 GC_parallel_mark_disabled = FALSE;
676 if (i < max_deficit) {
677 /* Need to finish a collection. */
678 # ifdef SAVE_CALL_CHAIN
679 GC_save_callers(GC_last_stack);
681 # ifdef PARALLEL_MARK
683 GC_wait_for_reclaim();
685 if (GC_n_attempts < max_prior_attempts
686 && GC_time_limit != GC_TIME_UNLIMITED) {
688 GET_TIME(GC_start_time);
690 if (GC_stopped_mark(GC_timeout_stop_func)) {
691 GC_finish_collection();
696 /* TODO: If possible, GC_default_stop_func should be */
698 (void)GC_stopped_mark(GC_never_stop_func);
699 GC_finish_collection();
702 if (GC_deficit > 0) {
703 GC_deficit -= max_deficit;
710 RESTORE_CANCEL(cancel_state);
713 GC_INNER void (*GC_check_heap)(void) = 0;
714 GC_INNER void (*GC_print_all_smashed)(void) = 0;
716 GC_API int GC_CALL GC_collect_a_little(void)
722 GC_collect_a_little_inner(1);
723 result = (int)GC_collection_in_progress();
725 if (!result && GC_debugging_started) GC_print_all_smashed();
730 /* Variables for world-stop average delay time statistic computation. */
731 /* "divisor" is incremented every world-stop and halved when reached */
732 /* its maximum (or upon "total_time" overflow). */
733 static unsigned world_stopped_total_time = 0;
734 static unsigned world_stopped_total_divisor = 0;
735 # ifndef MAX_TOTAL_TIME_DIVISOR
736 /* We shall not use big values here (so "outdated" delay time */
737 /* values would have less impact on "average" delay time value than */
739 # define MAX_TOTAL_TIME_DIVISOR 1000
741 #endif /* !NO_CLOCK */
744 # define IF_USE_MUNMAP(x) x
745 # define COMMA_IF_USE_MUNMAP(x) /* comma */, x
747 # define IF_USE_MUNMAP(x) /* empty */
748 # define COMMA_IF_USE_MUNMAP(x) /* empty */
752 * We stop the world and mark from all roots.
753 * If stop_func() ever returns TRUE, we may fail and return FALSE.
754 * Increment GC_gc_no if we succeed.
756 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
760 CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
763 GC_ASSERT(I_HOLD_LOCK());
764 # if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
765 GC_add_current_malloc_heap();
767 # if defined(REGISTER_LIBRARIES_EARLY)
768 GC_cond_register_dynamic_libraries();
772 if (GC_PRINT_STATS_FLAG)
773 GET_TIME(start_time);
776 # if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
777 GC_process_togglerefs();
780 if (GC_on_collection_event)
781 GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
785 if (GC_on_collection_event)
786 GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
789 # ifdef THREAD_LOCAL_ALLOC
790 GC_world_stopped = TRUE;
792 /* Output blank line for convenience here */
794 "\n--> Marking for collection #%lu after %lu allocated bytes\n",
795 (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
796 # ifdef MAKE_BACK_GRAPH
797 if (GC_print_back_height) {
798 GC_build_back_graph();
802 /* Mark from all roots. */
803 if (GC_on_collection_event)
804 GC_on_collection_event(GC_EVENT_MARK_START);
806 /* Minimize junk left in my registers and on the stack */
807 GC_clear_a_few_frames();
808 GC_noop6(0,0,0,0,0,0);
811 # ifdef PARALLEL_MARK
812 if (stop_func != GC_never_stop_func)
813 GC_parallel_mark_disabled = TRUE;
815 for (i = 0; !(*stop_func)(); i++) {
816 if (GC_mark_some(GC_approx_sp())) {
817 # ifdef PARALLEL_MARK
818 if (GC_parallel && GC_parallel_mark_disabled) {
819 GC_COND_LOG_PRINTF("Stopped marking done after %d iterations"
820 " with disabled parallel marker\n", i);
827 # ifdef PARALLEL_MARK
828 GC_parallel_mark_disabled = FALSE;
832 GC_COND_LOG_PRINTF("Abandoned stopped marking after"
833 " %d iterations\n", i);
834 GC_deficit = i; /* Give the mutator a chance. */
835 # ifdef THREAD_LOCAL_ALLOC
836 GC_world_stopped = FALSE;
840 if (GC_on_collection_event)
841 GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
847 if (GC_on_collection_event)
848 GC_on_collection_event(GC_EVENT_POST_START_WORLD);
851 /* TODO: Notify GC_EVENT_MARK_ABANDON */
856 GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes, heap %lu KiB"
857 IF_USE_MUNMAP(" (+ %lu KiB unmapped)") "\n",
858 (unsigned long)GC_gc_no, (long)GC_bytes_found,
859 TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
860 COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)));
862 /* Check all debugged objects for consistency */
863 if (GC_debugging_started) {
866 if (GC_on_collection_event) {
867 GC_on_collection_event(GC_EVENT_MARK_END);
869 GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
872 # ifdef THREAD_LOCAL_ALLOC
873 GC_world_stopped = FALSE;
879 if (GC_on_collection_event)
880 GC_on_collection_event(GC_EVENT_POST_START_WORLD);
884 if (GC_PRINT_STATS_FLAG) {
885 unsigned long time_diff;
886 unsigned total_time, divisor;
887 CLOCK_TYPE current_time;
889 GET_TIME(current_time);
890 time_diff = MS_TIME_DIFF(current_time,start_time);
892 /* Compute new world-stop delay total time */
893 total_time = world_stopped_total_time;
894 divisor = world_stopped_total_divisor;
895 if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
896 /* Halve values if overflow occurs */
900 total_time += time_diff < (((unsigned)-1) >> 1) ?
901 (unsigned)time_diff : ((unsigned)-1) >> 1;
902 /* Update old world_stopped_total_time and its divisor */
903 world_stopped_total_time = total_time;
904 world_stopped_total_divisor = ++divisor;
906 GC_ASSERT(divisor != 0);
907 GC_log_printf("World-stopped marking took %lu ms %lu ns"
908 " (%u ms in average)\n",
909 time_diff, NS_FRAC_TIME_DIFF(current_time, start_time),
910 total_time / divisor);
916 /* Set all mark bits for the free list whose first entry is q */
917 GC_INNER void GC_set_fl_marks(ptr_t q)
919 if (q /* != NULL */) { /* CPPCHECK */
920 struct hblk *h = HBLKPTR(q);
921 struct hblk *last_h = h;
923 IF_PER_OBJ(word sz = hhdr->hb_sz;)
926 word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
928 if (!mark_bit_from_hdr(hhdr, bit_no)) {
929 set_mark_bit_from_hdr(hhdr, bit_no);
930 ++hhdr -> hb_n_marks;
933 q = (ptr_t)obj_link(q);
941 IF_PER_OBJ(sz = hhdr->hb_sz;)
947 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
948 /* Check that all mark bits for the free list whose first entry is */
949 /* (*pfreelist) are set. Check skipped if points to a special value. */
950 void GC_check_fl_marks(void **pfreelist)
952 /* TODO: There is a data race with GC_FAST_MALLOC_GRANS (which does */
953 /* not do atomic updates to the free-list). The race seems to be */
954 /* harmless, and for now we just skip this check in case of TSan. */
955 # if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
956 AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
957 /* Atomic operations are used because the world is running. */
961 if ((word)list <= HBLKSIZE) return;
963 prev = (AO_t *)pfreelist;
964 for (p = list; p != NULL;) {
967 if (!GC_is_marked(p)) {
968 ABORT_ARG2("Unmarked local free list entry",
969 ": object %p on list %p", (void *)p, (void *)list);
972 /* While traversing the free-list, it re-reads the pointer to */
973 /* the current node before accepting its next pointer and */
974 /* bails out if the latter has changed. That way, it won't */
975 /* try to follow the pointer which might be been modified */
976 /* after the object was returned to the client. It might */
977 /* perform the mark-check on the just allocated object but */
978 /* that should be harmless. */
979 next = (AO_t *)AO_load_acquire_read(p);
980 if (AO_load(prev) != (AO_t)p)
986 /* FIXME: Not implemented (just skipped). */
990 #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
992 /* Clear all mark bits for the free list whose first entry is q */
993 /* Decrement GC_bytes_found by number of bytes on free list. */
994 STATIC void GC_clear_fl_marks(ptr_t q)
996 struct hblk *h = HBLKPTR(q);
997 struct hblk *last_h = h;
999 word sz = hhdr->hb_sz; /* Normally set only once. */
1002 word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
1004 if (mark_bit_from_hdr(hhdr, bit_no)) {
1005 size_t n_marks = hhdr -> hb_n_marks;
1007 GC_ASSERT(n_marks != 0);
1008 clear_mark_bit_from_hdr(hhdr, bit_no);
1010 # ifdef PARALLEL_MARK
1011 /* Appr. count, don't decrement to zero! */
1012 if (0 != n_marks || !GC_parallel) {
1013 hhdr -> hb_n_marks = n_marks;
1016 hhdr -> hb_n_marks = n_marks;
1019 GC_bytes_found -= sz;
1021 q = (ptr_t)obj_link(q);
1034 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1035 void GC_check_tls(void);
1038 GC_on_heap_resize_proc GC_on_heap_resize = 0;
1040 /* Used for logging only. */
1041 GC_INLINE int GC_compute_heap_usage_percent(void)
1043 word used = GC_composite_in_use + GC_atomic_in_use;
1044 word heap_sz = GC_heapsize - GC_unmapped_bytes;
1045 # if defined(CPPCHECK)
1046 word limit = (GC_WORD_MAX >> 1) / 50; /* to avoid a false positive */
1048 const word limit = GC_WORD_MAX / 100;
1051 return used >= heap_sz ? 0 : used < limit ?
1052 (int)((used * 100) / heap_sz) : (int)(used / (heap_sz / 100));
1055 /* Finish up a collection. Assumes mark bits are consistent, lock is */
1056 /* held, but the world is otherwise running. */
1057 STATIC void GC_finish_collection(void)
1060 CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
1061 CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
1064 GC_ASSERT(I_HOLD_LOCK());
1065 # if defined(GC_ASSERTIONS) \
1066 && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1067 /* Check that we marked some of our own data. */
1068 /* TODO: Add more checks. */
1074 GET_TIME(start_time);
1076 if (GC_on_collection_event)
1077 GC_on_collection_event(GC_EVENT_RECLAIM_START);
1079 # ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1080 if (GC_bytes_found > 0)
1081 GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
1084 # if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1085 if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
1086 GC_print_address_map();
1091 /* Mark all objects on the free list. All objects should be */
1092 /* marked when we're done. */
1093 word size; /* current object size */
1097 for (kind = 0; kind < GC_n_kinds; kind++) {
1098 for (size = 1; size <= MAXOBJGRANULES; size++) {
1099 q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
1104 GC_start_reclaim(TRUE);
1105 /* The above just checks; it doesn't really reclaim anything. */
1108 # ifndef GC_NO_FINALIZATION
1113 GET_TIME(finalize_time);
1116 if (GC_print_back_height) {
1117 # ifdef MAKE_BACK_GRAPH
1118 GC_traverse_back_graph();
1119 # elif !defined(SMALL_CONFIG)
1120 GC_err_printf("Back height not available: "
1121 "Rebuild collector with -DMAKE_BACK_GRAPH\n");
1125 /* Clear free list mark bits, in case they got accidentally marked */
1126 /* (or GC_find_leak is set and they were intentionally marked). */
1127 /* Also subtract memory remaining from GC_bytes_found count. */
1128 /* Note that composite objects on free list are cleared. */
1129 /* Thus accidentally marking a free list is not a problem; only */
1130 /* objects on the list itself will be marked, and that's fixed here. */
1132 word size; /* current object size */
1133 ptr_t q; /* pointer to current object */
1136 for (kind = 0; kind < GC_n_kinds; kind++) {
1137 for (size = 1; size <= MAXOBJGRANULES; size++) {
1138 q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
1140 GC_clear_fl_marks(q);
1145 GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
1146 (long)GC_bytes_found);
1148 /* Reconstruct free lists to contain everything not marked */
1149 GC_start_reclaim(FALSE);
1150 GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n",
1151 GC_compute_heap_usage_percent(),
1152 TO_KiB_UL(GC_composite_in_use),
1153 TO_KiB_UL(GC_atomic_in_use));
1154 if (GC_is_full_gc) {
1155 GC_used_heap_size_after_full = USED_HEAP_SIZE;
1156 GC_need_full_gc = FALSE;
1158 GC_need_full_gc = USED_HEAP_SIZE - GC_used_heap_size_after_full
1159 > min_bytes_allocd();
1162 GC_VERBOSE_LOG_PRINTF("Immediately reclaimed %ld bytes, heapsize:"
1163 " %lu bytes" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
1164 (long)GC_bytes_found,
1165 (unsigned long)GC_heapsize /*, */
1166 COMMA_IF_USE_MUNMAP((unsigned long)
1167 GC_unmapped_bytes));
1169 /* Reset or increment counters for next cycle */
1171 GC_is_full_gc = FALSE;
1172 GC_bytes_allocd_before_gc += GC_bytes_allocd;
1173 GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
1174 GC_bytes_allocd = 0;
1175 GC_bytes_dropped = 0;
1177 GC_finalizer_bytes_freed = 0;
1179 IF_USE_MUNMAP(GC_unmap_old());
1181 if (GC_on_collection_event)
1182 GC_on_collection_event(GC_EVENT_RECLAIM_END);
1184 if (GC_print_stats) {
1185 CLOCK_TYPE done_time;
1187 GET_TIME(done_time);
1188 # if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1189 /* A convenient place to output finalization statistics. */
1190 GC_print_finalization_stats();
1192 GC_log_printf("Finalize and initiate sweep took %lu ms %lu ns"
1193 " + %lu ms %lu ns\n",
1194 MS_TIME_DIFF(finalize_time, start_time),
1195 NS_FRAC_TIME_DIFF(finalize_time, start_time),
1196 MS_TIME_DIFF(done_time, finalize_time),
1197 NS_FRAC_TIME_DIFF(done_time, finalize_time));
1199 # elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1201 GC_print_finalization_stats();
1205 /* If stop_func == 0 then GC_default_stop_func is used instead. */
1206 STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
1207 GC_bool force_unmap GC_ATTR_UNUSED)
1210 IF_USE_MUNMAP(int old_unmap_threshold;)
1211 IF_CANCEL(int cancel_state;)
1214 if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
1215 if (GC_debugging_started) GC_print_all_smashed();
1216 GC_INVOKE_FINALIZERS();
1218 DISABLE_CANCEL(cancel_state);
1220 old_unmap_threshold = GC_unmap_threshold;
1222 (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
1223 GC_unmap_threshold = 1; /* unmap as much as possible */
1226 /* Minimize junk left in my registers */
1227 GC_noop6(0,0,0,0,0,0);
1228 result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
1229 GC_default_stop_func);
1231 IF_USE_MUNMAP(GC_unmap_threshold = old_unmap_threshold); /* restore */
1232 RESTORE_CANCEL(cancel_state);
1235 if (GC_debugging_started) GC_print_all_smashed();
1236 GC_INVOKE_FINALIZERS();
1241 /* Externally callable routines to invoke full, stop-the-world collection. */
1242 GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
1244 GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
1245 return (int)GC_try_to_collect_general(stop_func, FALSE);
1248 GC_API void GC_CALL GC_gcollect(void)
1250 /* 0 is passed as stop_func to get GC_default_stop_func value */
1251 /* while holding the allocation lock (to prevent data races). */
1252 (void)GC_try_to_collect_general(0, FALSE);
1253 if (GC_have_errors) GC_print_all_errors();
1256 STATIC word GC_heapsize_at_forced_unmap = 0;
1258 GC_API void GC_CALL GC_gcollect_and_unmap(void)
1260 /* Record current heap size to make heap growth more conservative */
1261 /* afterwards (as if the heap is growing from zero size again). */
1262 GC_heapsize_at_forced_unmap = GC_heapsize;
1263 /* Collect and force memory unmapping to OS. */
1264 (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
1267 GC_INNER word GC_n_heap_sects = 0;
1268 /* Number of sections currently in heap. */
1270 #ifdef USE_PROC_FOR_LIBRARIES
1271 GC_INNER word GC_n_memory = 0;
1272 /* Number of GET_MEM allocated memory sections. */
1275 #ifdef USE_PROC_FOR_LIBRARIES
1276 /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
1277 /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set. */
1278 GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
1281 if (GC_n_memory >= MAX_HEAP_SECTS)
1282 ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1283 GC_our_memory[GC_n_memory].hs_start = p;
1284 GC_our_memory[GC_n_memory].hs_bytes = bytes;
1290 * Use the chunk of memory starting at p of size bytes as part of the heap.
1291 * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
1293 GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
1298 if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
1299 ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
1301 while ((word)p <= HBLKSIZE) {
1302 /* Can't handle memory near address zero. */
1305 if (0 == bytes) return;
1307 endp = (word)p + bytes;
1308 if (endp <= (word)p) {
1309 /* Address wrapped. */
1311 if (0 == bytes) return;
1314 phdr = GC_install_header(p);
1316 /* This is extremely unlikely. Can't add it. This will */
1317 /* almost certainly result in a 0 return from the allocator, */
1318 /* which is entirely appropriate. */
1321 GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
1322 GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
1323 GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
1325 phdr -> hb_sz = bytes;
1326 phdr -> hb_flags = 0;
1328 GC_heapsize += bytes;
1330 /* Normally the caller calculates a new GC_collect_at_heapsize,
1331 * but this is also called directly from alloc_mark_stack, so
1332 * adjust here. It will be recalculated when called from
1333 * GC_expand_hp_inner.
1335 GC_collect_at_heapsize += bytes;
1336 if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1337 GC_collect_at_heapsize = GC_WORD_MAX;
1339 if ((word)p <= (word)GC_least_plausible_heap_addr
1340 || GC_least_plausible_heap_addr == 0) {
1341 GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
1342 /* Making it a little smaller than necessary prevents */
1343 /* us from getting a false hit from the variable */
1344 /* itself. There's some unintentional reflection */
1347 if ((word)p + bytes >= (word)GC_greatest_plausible_heap_addr) {
1348 GC_greatest_plausible_heap_addr = (void *)endp;
1352 #if !defined(NO_DEBUGGING)
1353 void GC_print_heap_sects(void)
1357 GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
1358 (unsigned long)GC_heapsize /*, */
1359 COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
1361 for (i = 0; i < GC_n_heap_sects; i++) {
1362 ptr_t start = GC_heap_sects[i].hs_start;
1363 size_t len = GC_heap_sects[i].hs_bytes;
1367 for (h = (struct hblk *)start; (word)h < (word)(start + len); h++) {
1368 if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
1370 GC_printf("Section %d from %p to %p %u/%lu blacklisted\n",
1371 i, (void *)start, (void *)&start[len],
1372 nbl, (unsigned long)divHBLKSZ(len));
1377 void * GC_least_plausible_heap_addr = (void *)GC_WORD_MAX;
1378 void * GC_greatest_plausible_heap_addr = 0;
1380 GC_INLINE word GC_max(word x, word y)
1382 return(x > y? x : y);
1385 GC_INLINE word GC_min(word x, word y)
1387 return(x < y? x : y);
1390 STATIC word GC_max_heapsize = 0;
1392 GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
1394 GC_max_heapsize = n;
1397 GC_word GC_max_retries = 0;
1399 /* This explicitly increases the size of the heap. It is used */
1400 /* internally, but may also be invoked from GC_expand_hp by the user. */
1401 /* The argument is in units of HBLKSIZE (tiny values are rounded up). */
1402 /* Returns FALSE on failure. */
1403 GC_INNER GC_bool GC_expand_hp_inner(word n)
1406 struct hblk * space;
1407 word expansion_slop; /* Number of bytes by which we expect the */
1408 /* heap to expand soon. */
1410 GC_ASSERT(I_HOLD_LOCK());
1411 if (n < MINHINCR) n = MINHINCR;
1412 bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
1413 if (GC_max_heapsize != 0
1414 && (GC_max_heapsize < (word)bytes
1415 || GC_heapsize > GC_max_heapsize - (word)bytes)) {
1416 /* Exceeded self-imposed limit */
1419 space = GET_MEM(bytes);
1420 GC_add_to_our_memory((ptr_t)space, bytes);
1422 WARN("Failed to expand heap by %" WARN_PRIdPTR " bytes\n",
1426 GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
1427 TO_KiB_UL(GC_heapsize + (word)bytes),
1428 (unsigned long)GC_bytes_allocd);
1429 /* Adjust heap limits generously for blacklisting to work better. */
1430 /* GC_add_to_heap performs minimal adjustment needed for */
1432 expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
1433 if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
1434 || (GC_last_heap_addr != 0
1435 && (word)GC_last_heap_addr < (word)space)) {
1436 /* Assume the heap is growing up */
1437 word new_limit = (word)space + (word)bytes + expansion_slop;
1438 if (new_limit > (word)space) {
1439 GC_greatest_plausible_heap_addr =
1440 (void *)GC_max((word)GC_greatest_plausible_heap_addr,
1444 /* Heap is growing down */
1445 word new_limit = (word)space - expansion_slop;
1446 if (new_limit < (word)space) {
1447 GC_least_plausible_heap_addr =
1448 (void *)GC_min((word)GC_least_plausible_heap_addr,
1449 (word)space - expansion_slop);
1452 GC_prev_heap_addr = GC_last_heap_addr;
1453 GC_last_heap_addr = (ptr_t)space;
1454 GC_add_to_heap(space, bytes);
1455 /* Force GC before we are likely to allocate past expansion_slop */
1456 GC_collect_at_heapsize =
1457 GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
1458 if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1459 GC_collect_at_heapsize = GC_WORD_MAX;
1460 if (GC_on_heap_resize)
1461 (*GC_on_heap_resize)(GC_heapsize);
1466 /* Really returns a bool, but it's externally visible, so that's clumsy. */
1467 /* Arguments is in bytes. Includes GC_init() call. */
1468 GC_API int GC_CALL GC_expand_hp(size_t bytes)
1473 if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
1475 result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
1476 if (result) GC_requested_heapsize += bytes;
1481 word GC_fo_entries = 0; /* used also in extra/MacOS.c */
1483 GC_INNER unsigned GC_fail_count = 0;
1484 /* How many consecutive GC/expansion failures? */
1485 /* Reset by GC_allochblk. */
1487 static word last_fo_entries = 0;
1488 static word last_bytes_finalized = 0;
1490 /* Collect or expand heap in an attempt make the indicated number of */
1491 /* free blocks available. Should be called until the blocks are */
1492 /* available (setting retry value to TRUE unless this is the first call */
1493 /* in a loop) or until it fails by returning FALSE. */
1494 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
1495 GC_bool ignore_off_page,
1498 GC_bool gc_not_stopped = TRUE;
1500 IF_CANCEL(int cancel_state;)
1502 GC_ASSERT(I_HOLD_LOCK());
1503 DISABLE_CANCEL(cancel_state);
1504 if (!GC_incremental && !GC_dont_gc &&
1505 ((GC_dont_expand && GC_bytes_allocd > 0)
1506 || (GC_fo_entries > (last_fo_entries + 500)
1507 && (last_bytes_finalized | GC_bytes_finalized) != 0)
1508 || GC_should_collect())) {
1509 /* Try to do a full collection using 'default' stop_func (unless */
1510 /* nothing has been allocated since the latest collection or heap */
1511 /* expansion is disabled). */
1512 gc_not_stopped = GC_try_to_collect_inner(
1513 GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
1514 GC_default_stop_func : GC_never_stop_func);
1515 if (gc_not_stopped == TRUE || !retry) {
1516 /* Either the collection hasn't been aborted or this is the */
1517 /* first attempt (in a loop). */
1518 last_fo_entries = GC_fo_entries;
1519 last_bytes_finalized = GC_bytes_finalized;
1520 RESTORE_CANCEL(cancel_state);
1525 blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
1526 / (HBLKSIZE * GC_free_space_divisor)
1528 if (blocks_to_get > MAXHINCR) {
1531 /* Get the minimum required to make it likely that we can satisfy */
1532 /* the current request in the presence of black-listing. */
1533 /* This will probably be more than MAXHINCR. */
1534 if (ignore_off_page) {
1537 slop = 2 * divHBLKSZ(BL_LIMIT);
1538 if (slop > needed_blocks) slop = needed_blocks;
1540 if (needed_blocks + slop > MAXHINCR) {
1541 blocks_to_get = needed_blocks + slop;
1543 blocks_to_get = MAXHINCR;
1545 if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
1546 blocks_to_get = divHBLKSZ(GC_WORD_MAX);
1549 if (!GC_expand_hp_inner(blocks_to_get)
1550 && (blocks_to_get == needed_blocks
1551 || !GC_expand_hp_inner(needed_blocks))) {
1552 if (gc_not_stopped == FALSE) {
1553 /* Don't increment GC_fail_count here (and no warning). */
1554 GC_gcollect_inner();
1555 GC_ASSERT(GC_bytes_allocd == 0);
1556 } else if (GC_fail_count++ < GC_max_retries) {
1557 WARN("Out of Memory! Trying to continue...\n", 0);
1558 GC_gcollect_inner();
1560 # if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1561 WARN("Out of Memory! Heap size: %" WARN_PRIdPTR " MiB."
1562 " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1564 RESTORE_CANCEL(cancel_state);
1567 } else if (GC_fail_count) {
1568 GC_COND_LOG_PRINTF("Memory available again...\n");
1570 RESTORE_CANCEL(cancel_state);
1575 * Make sure the object free list for size gran (in granules) is not empty.
1576 * Return a pointer to the first object on the free list.
1577 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1579 GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
1581 void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
1582 GC_bool tried_minor = FALSE;
1583 GC_bool retry = FALSE;
1585 GC_ASSERT(I_HOLD_LOCK());
1586 if (gran == 0) return(0);
1590 # ifndef GC_DISABLE_INCREMENTAL
1591 if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED) {
1592 /* True incremental mode, not just generational. */
1593 /* Do our share of marking work. */
1594 GC_collect_a_little_inner(1);
1597 /* Sweep blocks for objects of this size */
1598 GC_ASSERT(!GC_is_full_gc
1599 || NULL == GC_obj_kinds[kind].ok_reclaim_list
1600 || NULL == GC_obj_kinds[kind].ok_reclaim_list[gran]);
1601 GC_continue_reclaim(gran, kind);
1604 GC_new_hblk(gran, kind);
1607 if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
1609 GC_collect_a_little_inner(1);
1612 if (!GC_collect_or_expand(1, FALSE, retry)) {
1622 /* Successful allocation; reset failure count. */
1625 return (ptr_t)(*flh);