int kind, unsigned flags)
{
word descr;
-# ifdef MARK_BIT_PER_GRANULE
- size_t granules;
+# ifdef MARK_BIT_PER_GRANULE
if (byte_sz > MAXOBJBYTES)
flags |= LARGE_BLOCK;
# endif
hhdr -> hb_inv_sz = inv_sz;
}
# else /* MARK_BIT_PER_GRANULE */
- granules = BYTES_TO_GRANULES(byte_sz);
+ {
+ size_t granules = BYTES_TO_GRANULES(byte_sz);
+
if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
/* Make it look like a valid block. */
hhdr -> hb_sz = HBLKSIZE;
}
hhdr -> hb_map = GC_obj_map[(hhdr -> hb_flags & LARGE_BLOCK) != 0 ?
0 : granules];
+ }
# endif /* MARK_BIT_PER_GRANULE */
/* Clear mark bits */
{
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
- CLOCK_TYPE current_time;
# endif
+
ASSERT_CANCEL_DISABLED();
GC_ASSERT(I_HOLD_LOCK());
if (GC_dont_gc || (*stop_func)()) return FALSE;
GC_finish_collection();
# ifndef SMALL_CONFIG
if (GC_print_stats) {
+ CLOCK_TYPE current_time;
+
GET_TIME(current_time);
GC_log_printf("Complete collection took %lu msecs\n",
MS_TIME_DIFF(current_time,start_time));
unsigned i;
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
- CLOCK_TYPE current_time;
# endif
# if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
if (GC_PRINT_STATS_FLAG) {
unsigned long time_diff;
unsigned total_time, divisor;
+ CLOCK_TYPE current_time;
+
GET_TIME(current_time);
time_diff = MS_TIME_DIFF(current_time,start_time);
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
CLOCK_TYPE finalize_time = 0;
- CLOCK_TYPE done_time;
# endif
# if defined(GC_ASSERTIONS) && defined(THREADS) \
GC_on_collection_event(GC_EVENT_RECLAIM_END);
# ifndef SMALL_CONFIG
if (GC_print_stats) {
+ CLOCK_TYPE done_time;
+
GET_TIME(done_time);
# ifndef GC_NO_FINALIZATION
/* A convenient place to output finalization statistics. */
ptr_t old_back_ptr = GET_OH_BG_PTR(q);
back_edges * be, *be_cont;
word i;
- static unsigned random_number = 13;
-# define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0)
- /* A not very random number we use to occasionally allocate a */
- /* back_edges structure even for a single backward edge. This */
- /* prevents us from repeatedly tracing back through very long */
- /* chains, since we will have some place to store height and */
- /* in_progress flags along the way. */
GC_ASSERT(p == GC_base(p) && q == GC_base(q));
if (!GC_HAS_DEBUG_INFO(q) || !GC_HAS_DEBUG_INFO(p)) {
return;
}
if (0 == old_back_ptr) {
+ static unsigned random_number = 13;
+# define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0)
+ /* A not very random number we use to occasionally allocate a */
+ /* back_edges structure even for a single backward edge. This */
+ /* prevents us from repeatedly tracing back through very long */
+ /* chains, since we will have some place to store height and */
+ /* in_progress flags along the way. */
+
SET_OH_BG_PTR(q, p);
if (GOT_LUCKY_NUMBER) ensure_struct(q);
return;
GC_INNER void GC_push_all_stacks(void)
{
- int i;
- ptr_t lo, hi, altstack_lo, altstack_hi;
+ ptr_t hi, altstack_lo, altstack_hi;
task_t my_task = current_task();
mach_port_t my_thread = mach_thread_self();
GC_bool found_me = FALSE;
# ifndef DARWIN_DONT_PARSE_STACK
if (GC_query_task_threads) {
+ int i;
kern_return_t kern_result;
thread_act_array_t act_list = 0;
for (i = 0; i < (int)listcount; i++) {
thread_act_t thread = act_list[i];
- lo = GC_stack_range_for(&hi, thread, NULL, FALSE, my_thread,
- &altstack_lo, &altstack_hi);
+ ptr_t lo = GC_stack_range_for(&hi, thread, NULL, FALSE, my_thread,
+ &altstack_lo, &altstack_hi);
+
if (lo) {
GC_ASSERT((word)lo <= (word)hi);
total_size += hi - lo;
} else
# endif /* !DARWIN_DONT_PARSE_STACK */
/* else */ {
+ int i;
+
for (i = 0; i < (int)listcount; i++) {
GC_thread p;
+
for (p = GC_threads[i]; p != NULL; p = p->next)
if ((p->flags & FINISHED) == 0) {
thread_act_t thread = (thread_act_t)p->stop_info.mach_thread;
- lo = GC_stack_range_for(&hi, thread, p, (GC_bool)p->thread_blocked,
- my_thread, &altstack_lo, &altstack_hi);
+ ptr_t lo = GC_stack_range_for(&hi, thread, p,
+ (GC_bool)p->thread_blocked,
+ my_thread, &altstack_lo,
+ &altstack_hi);
+
if (lo) {
GC_ASSERT((word)lo <= (word)hi);
total_size += hi - lo;
/* Caller holds allocation lock. */
GC_INNER void GC_stop_world(void)
{
- unsigned i;
task_t my_task = current_task();
mach_port_t my_thread = mach_thread_self();
kern_return_t kern_result;
if (GC_query_task_threads) {
# ifndef GC_NO_THREADS_DISCOVERY
+ unsigned i;
GC_bool changed;
thread_act_array_t act_list, prev_list;
mach_msg_type_number_t listcount, prevcount;
# endif /* !GC_NO_THREADS_DISCOVERY */
} else {
+ unsigned i;
+
for (i = 0; i < THREAD_TABLE_SZ; i++) {
GC_thread p;
GC_INNER void GC_start_world(void)
{
task_t my_task = current_task();
- int i;
# ifdef DEBUG_THREADS
GC_log_printf("World starting\n");
# endif
if (GC_query_task_threads) {
# ifndef GC_NO_THREADS_DISCOVERY
+ int i;
int j = GC_mach_threads_count;
kern_return_t kern_result;
thread_act_array_t act_list;
# endif /* !GC_NO_THREADS_DISCOVERY */
} else {
+ int i;
mach_port_t my_thread = mach_thread_self();
for (i = 0; i < THREAD_TABLE_SZ; i++) {
{
signed_word n = (signed_word)number_of_elements;
signed_word nsorted = 1;
- signed_word i;
while (nsorted < n) {
+ signed_word i;
+
while (nsorted < n &&
(word)base[nsorted-1].hs_start < (word)base[nsorted].hs_start)
++nsorted;
GC_INNER void GC_register_dynamic_libraries(void)
{
- int status;
- ldr_process_t mypid;
-
- /* module */
- ldr_module_t moduleid = LDR_NULL_MODULE;
- ldr_module_info_t moduleinfo;
- size_t moduleinfosize = sizeof(moduleinfo);
- size_t modulereturnsize;
-
- /* region */
- ldr_region_t region;
- ldr_region_info_t regioninfo;
- size_t regioninfosize = sizeof(regioninfo);
- size_t regionreturnsize;
-
- /* Obtain id of this process */
- mypid = ldr_my_process();
+ ldr_module_t moduleid = LDR_NULL_MODULE;
+ ldr_process_t mypid = ldr_my_process(); /* obtain id of this process */
/* For each module */
while (TRUE) {
-
- /* Get the next (first) module */
- status = ldr_next_module(mypid, &moduleid);
+ ldr_module_info_t moduleinfo;
+ size_t modulereturnsize;
+ ldr_region_t region;
+ ldr_region_info_t regioninfo;
+ size_t regionreturnsize;
+ int status = ldr_next_module(mypid, &moduleid);
+ /* Get the next (first) module */
/* Any more modules? */
if (moduleid == LDR_NULL_MODULE)
/* Get the module information */
status = ldr_inq_module(mypid, moduleid, &moduleinfo,
- moduleinfosize, &modulereturnsize);
+ sizeof(moduleinfo), &modulereturnsize);
if (status != 0 )
ABORT("ldr_inq_module failed");
for (region = 0; region < moduleinfo.lmi_nregion; region++) {
/* Get the region information */
status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
- regioninfosize, ®ionreturnsize);
+ sizeof(regioninfo), ®ionreturnsize);
if (status != 0 )
ABORT("ldr_inq_region failed");
GC_INNER void GC_register_dynamic_libraries(void)
{
- int status;
int index = 1; /* Ordinal position in shared library search list */
- struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
/* For each dynamic library loaded */
while (TRUE) {
-
- /* Get info about next shared library */
- status = shl_get(index, &shl_desc);
+ struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
+ int status = shl_get(index, &shl_desc);
+ /* Get info about next shared library */
/* Check if this is the end of the list or if some error occurred */
if (status != 0) {
const struct GC_MACH_SECTION *sec;
const char *name;
GC_has_static_roots_func callback = GC_has_static_roots;
- char secnam[16];
- const char *fmt;
DCL_LOCK_STATE;
if (GC_no_dls) return;
/* Sections constructed on demand. */
for (j = 0; j < sizeof(GC_dyld_add_sect_fmts) / sizeof(char *); j++) {
- fmt = GC_dyld_add_sect_fmts[j];
+ const char *fmt = GC_dyld_add_sect_fmts[j];
+
/* Add our manufactured aligned BSS sections. */
for (i = 0; i <= L2_MAX_OFILE_ALIGNMENT; i++) {
+ char secnam[16];
+
(void)snprintf(secnam, sizeof(secnam), fmt, (unsigned)i);
secnam[sizeof(secnam) - 1] = '\0';
sec = GC_GETSECTBYNAME(hdr, SEG_DATA, secnam);
unsigned long start, end;
unsigned i, j;
const struct GC_MACH_SECTION *sec;
- char secnam[16];
- const char *fmt;
for (i = 0; i < sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]); i++) {
sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg,
/* Remove our on-demand sections. */
for (j = 0; j < sizeof(GC_dyld_add_sect_fmts) / sizeof(char *); j++) {
- fmt = GC_dyld_add_sect_fmts[j];
+ const char *fmt = GC_dyld_add_sect_fmts[j];
+
for (i = 0; i <= L2_MAX_OFILE_ALIGNMENT; i++) {
+ char secnam[16];
+
(void)snprintf(secnam, sizeof(secnam), fmt, (unsigned)i);
secnam[sizeof(secnam) - 1] = '\0';
sec = GC_GETSECTBYNAME(hdr, SEG_DATA, secnam);
void GC_amiga_free_all_mem(void){
struct GC_Amiga_AllocedMemoryHeader *gc_am=(struct GC_Amiga_AllocedMemoryHeader *)(~(int)(GC_AMIGAMEM));
- struct GC_Amiga_AllocedMemoryHeader *temp;
#ifdef GC_AMIGA_PRINTSTATS
printf("\n\n"
#endif
while(gc_am!=NULL){
- temp=gc_am->next;
+ struct GC_Amiga_AllocedMemoryHeader *temp = gc_am->next;
FreeMem(gc_am,gc_am->size);
gc_am=(struct GC_Amiga_AllocedMemoryHeader *)(~(int)(temp));
}
void *GC_amiga_allocwrapper_any(size_t size,void *(*AllocFunction)(size_t size2)){
- void *ret,*ret2;
+ void *ret;
GC_amiga_dontalloc=TRUE; // Pretty tough thing to do, but its indeed necessary.
latestsize=size;
}
#ifdef GC_AMIGA_RETRY
else{
+ void *ret2;
/* We got chip-mem. Better try again and again and again etc., we might get fast-mem sooner or later... */
/* Using gctest to check the effectiveness of doing this, does seldom give a very good result. */
/* However, real programs doesn't normally rapidly allocate and deallocate. */
char*const begin = buffer;
char*const end = buffer + size;
size_t line_number = 0;
- char str[128];
if (size) {
*buffer = 0;
size = (GC_ULONG_PTR)end < (GC_ULONG_PTR)buffer ? 0 : end - buffer;
if (line_number) {
+ char str[128];
+
wsprintf(str, "(%d) : ", (int)line_number);
if (size) {
strncpy(buffer, str, size)[size - 1] = 0;
#endif
{
ptr_t op;
- word lg;
DCL_LOCK_STATE;
GC_DBG_COLLECT_AT_MALLOC(lb);
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
+ word lg = GC_size_map[lb];
+
LOCK();
op = GC_gcjobjfreelist[lg];
if(EXPECT(0 == op, FALSE)) {
void * ptr_to_struct_containing_descr)
{
ptr_t op;
- word lg;
DCL_LOCK_STATE;
GC_DBG_COLLECT_AT_MALLOC(lb);
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
+ word lg = GC_size_map[lb];
+
LOCK();
op = GC_gcjobjfreelist[lg];
if (EXPECT(0 == op, FALSE)) {
}
static void deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_objfreelist_ptr + nwords;
+ size_t nwords = GC_round_up(n);
+ void ** flh = GC_objfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
memset(reinterpret_cast<char *>(p) + GC_bytes_per_word, 0,
GC_bytes_per_word * (nwords - 1));
}
static void ptr_free_deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_aobjfreelist_ptr + nwords;
+ size_t nwords = GC_round_up(n);
+ void ** flh = GC_aobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
}
static void deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_uobjfreelist_ptr + nwords;
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh = GC_uobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_bytes_recently_freed +=
}
static void ptr_free_deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_auobjfreelist_ptr + nwords;
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh = GC_auobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_bytes_recently_freed +=
hdr *hhdr;
size_t sz; /* bytes */
size_t ngranules; /* sz in granules */
- void ** flh;
int knd;
struct obj_kind * ok;
ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
if (ngranules <= MAXOBJGRANULES) {
+ void ** flh;
+
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (ok -> ok_init) {
/* re-registering dynamic libraries. */
void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
{
- struct roots * old;
-
GC_ASSERT((word)b <= (word)e);
b = (ptr_t)(((word)b + (sizeof(word) - 1)) & ~(sizeof(word) - 1));
/* round b up to word boundary */
/* takes to scan the roots. */
{
register int i;
- old = 0; /* initialized to prevent warning. */
+ struct roots * old = NULL; /* initialized to prevent warning. */
+
for (i = 0; i < n_root_sets; i++) {
old = GC_static_roots + i;
if ((word)b <= (word)old->r_end
}
}
# else
- old = (struct roots *)GC_roots_present(b);
- if (old != 0) {
- if ((word)e <= (word)old->r_end) /* already there */ return;
- /* else extend */
- GC_root_size += e - old -> r_end;
- old -> r_end = e;
- return;
+ {
+ struct roots * old = (struct roots *)GC_roots_present(b);
+
+ if (old != 0) {
+ if ((word)e <= (word)old->r_end)
+ return; /* already there */
+ /* else extend */
+ GC_root_size += e - old -> r_end;
+ old -> r_end = e;
+ return;
+ }
}
# endif
if (n_root_sets == MAX_ROOT_SETS) {
/* Used to occasionally clear a bigger */
/* chunk. */
# endif
- ptr_t limit;
# define SLOP 400
/* Extra bytes we clear every time. This clears our own */
/* larger ... */
# ifdef THREADS
if (++random_no % 13 == 0) {
- limit = sp;
+ ptr_t limit = sp;
+
MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
limit = (ptr_t)((word)limit & ~0xf);
/* Make it sufficiently aligned for assembly */
/* implementations of GC_clear_stack_inner. */
return GC_clear_stack_inner(arg, limit);
- } else {
- BZERO((void *)dummy, SMALL_CLEAR_SIZE*sizeof(word));
}
+ BZERO((void *)dummy, SMALL_CLEAR_SIZE*sizeof(word));
# else
if (GC_gc_no > GC_stack_last_cleared) {
/* Start things over, so we clear the entire stack again */
GC_high_water = sp;
}
MAKE_HOTTER(GC_high_water, GC_SLOP);
- limit = GC_min_sp;
- MAKE_HOTTER(limit, SLOP);
- if ((word)sp COOLER_THAN (word)limit) {
- limit = (ptr_t)((word)limit & ~0xf);
- /* Make it sufficiently aligned for assembly */
- /* implementations of GC_clear_stack_inner. */
- GC_min_sp = sp;
- return GC_clear_stack_inner(arg, limit);
- } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset
- > CLEAR_THRESHOLD) {
+ {
+ ptr_t limit = GC_min_sp;
+
+ MAKE_HOTTER(limit, SLOP);
+ if ((word)sp COOLER_THAN (word)limit) {
+ limit = (ptr_t)((word)limit & ~0xf);
+ /* Make it sufficiently aligned for assembly */
+ /* implementations of GC_clear_stack_inner. */
+ GC_min_sp = sp;
+ return GC_clear_stack_inner(arg, limit);
+ }
+ }
+ if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) {
/* Restart clearing process, but limit how much clearing we do. */
GC_min_sp = sp;
MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
GC_ULONG_PTR count;
do {
- PVOID * pages, * pages_end;
+ PVOID * pages = gww_buf;
DWORD page_size;
- pages = gww_buf;
count = GC_GWW_BUF_LEN;
/* GetWriteWatch is documented as returning non-zero when it */
/* fails, but the documentation doesn't explicitly say why it */
}
count = 1; /* Done with this section. */
} else /* succeeded */ {
- pages_end = pages + count;
+ PVOID * pages_end = pages + count;
+
while (pages != pages_end) {
struct hblk * h = (struct hblk *) *pages++;
struct hblk * h_end = (struct hblk *) ((char *) h + page_size);
STATIC void GC_protect_heap(void)
{
- ptr_t start;
- size_t len;
- struct hblk * current;
- struct hblk * current_start; /* Start of block to be protected. */
- struct hblk * limit;
unsigned i;
GC_bool protect_all =
- (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
+ (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
+
for (i = 0; i < GC_n_heap_sects; i++) {
- start = GC_heap_sects[i].hs_start;
- len = GC_heap_sects[i].hs_bytes;
+ ptr_t start = GC_heap_sects[i].hs_start;
+ size_t len = GC_heap_sects[i].hs_bytes;
+
if (protect_all) {
PROTECT(start, len);
} else {
+ struct hblk * current;
+ struct hblk * current_start; /* Start of block to be protected. */
+ struct hblk * limit;
+
GC_ASSERT(PAGE_ALIGNED(len));
GC_ASSERT(PAGE_ALIGNED(start));
current_start = current = (struct hblk *)start;
GC_INNER void GC_read_dirty(void)
{
int nmaps;
- unsigned long npages;
- unsigned pagesize;
- ptr_t vaddr, limit;
- struct prasmap * map;
- char * bufp;
+ char * bufp = GC_proc_buf;
int i;
BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
- bufp = GC_proc_buf;
if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
/* Retry with larger buffer. */
word new_size = 2 * GC_proc_buf_size;
# endif
bufp += sizeof(struct prpageheader);
for (i = 0; i < nmaps; i++) {
- map = (struct prasmap *)bufp;
- vaddr = (ptr_t)(map -> pr_vaddr);
- npages = map -> pr_npage;
- pagesize = map -> pr_pagesize;
+ struct prasmap * map = (struct prasmap *)bufp;
+ ptr_t vaddr = (ptr_t)(map -> pr_vaddr);
+ unsigned long npages = map -> pr_npage;
+ unsigned pagesize = map -> pr_pagesize;
+ ptr_t limit;
+
# ifdef DEBUG_DIRTY_BITS
GC_log_printf(
"pr_vaddr= %p, npage= %lu, mflags= 0x%x, pagesize= 0x%x\n",
{
kern_return_t r;
char *addr;
- struct hblk *h;
- size_t i;
thread_state_flavor_t flavor = DARWIN_EXC_STATE;
mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT;
DARWIN_EXC_STATE_T exc_state;
# endif
if (GC_mprotect_state == GC_MP_NORMAL) { /* common case */
- h = (struct hblk*)((word)addr & ~(GC_page_size-1));
+ struct hblk * h = (struct hblk*)((word)addr & ~(GC_page_size-1));
+ size_t i;
+
UNPROTECT(h, GC_page_size);
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
register int index = PHT_HASH(h+i);
register int n_live_threads = 0;
register int result;
# endif
-# ifdef GC_NETBSD_THREADS_WORKAROUND
- int code;
-# endif
# ifdef DEBUG_THREADS
GC_log_printf("World starting\n");
}
# ifdef GC_NETBSD_THREADS_WORKAROUND
for (i = 0; i < n_live_threads; i++) {
+ int code;
+
while (0 != (code = sem_wait(&GC_restart_ack_sem))) {
if (errno != EINTR) {
ABORT_ARG1("sem_wait() for restart handler failed",
GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set,
sigset_t *oset)
{
- sigset_t fudged_set;
- int sig_suspend;
-
INIT_REAL_SYMS();
if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
- fudged_set = *set;
- sig_suspend = GC_get_suspend_signal();
+ sigset_t fudged_set = *set;
+ int sig_suspend = GC_get_suspend_signal();
+
GC_ASSERT(sig_suspend >= 0);
if (sigdelset(&fudged_set, sig_suspend) != 0)
ABORT("sigdelset failed");
struct hblk ** rlh;
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
- CLOCK_TYPE done_time;
if (GC_print_stats == VERBOSE)
GET_TIME(start_time);
}
# ifndef SMALL_CONFIG
if (GC_print_stats == VERBOSE) {
+ CLOCK_TYPE done_time;
+
GET_TIME(done_time);
GC_verbose_log_printf("Disposing of reclaim lists took %lu msecs\n",
MS_TIME_DIFF(done_time,start_time));
# else
char *y = (char *)(GC_word)fail_proc1;
# endif
- CLOCK_TYPE typed_time;
# endif
CLOCK_TYPE start_time;
CLOCK_TYPE reverse_time;
- CLOCK_TYPE tree_time;
unsigned long time_diff;
# ifndef NO_TEST_HANDLE_FORK
pid_t pid;
# ifndef DBG_HDRS_ALL
typed_test();
if (print_stats) {
+ CLOCK_TYPE typed_time;
+
GET_TIME(typed_time);
time_diff = MS_TIME_DIFF(typed_time, start_time);
GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
# endif /* DBG_HDRS_ALL */
tree_test();
if (print_stats) {
+ CLOCK_TYPE tree_time;
+
GET_TIME(tree_time);
time_diff = MS_TIME_DIFF(tree_time, start_time);
GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
#if defined(MACOS)
void SetMinimumStack(long minSize)
{
- long newApplLimit;
-
if (minSize > LMGetDefltStack())
{
- newApplLimit = (long) GetApplLimit()
- - (minSize - LMGetDefltStack());
+ long newApplLimit = (long) GetApplLimit()
+ - (minSize - LMGetDefltStack());
SetApplLimit((Ptr) newApplLimit);
MaxApplZone();
}
static void return_single_freelist(void *fl, void **gfl)
{
- void *q, **qptr;
-
if (*gfl == 0) {
*gfl = fl;
} else {
+ void *q, **qptr;
+
GC_ASSERT(GC_size(fl) == GC_size(*gfl));
/* Concatenate: */
qptr = &(obj_link(fl));
{
signed_word last_set_bit = len - 1;
GC_descr result;
- signed_word i;
# define HIGH_BIT (((word)1) << (WORDSZ - 1))
DCL_LOCK_STATE;
if (last_set_bit < 0) return(0 /* no pointers */);
# if ALIGNMENT == CPP_WORDSZ/8
{
+ signed_word i;
+
for (i = 0; i < last_set_bit; i++) {
if (!GC_get_bit(bm, i)) {
break;
}
# endif
if ((word)last_set_bit < BITMAP_BITS) {
+ signed_word i;
+
/* Hopefully the common case. */
/* Build bitmap descriptor (with bits reversed) */
result = HIGH_BIT;
# ifdef GC_ASSERTIONS
DWORD thread_id = GetCurrentThreadId();
# endif
- int i;
GC_ASSERT(I_HOLD_LOCK());
if (GC_win32_dll_threads) {
LONG my_max = GC_get_max_thread_index();
+ int i;
+
for (i = 0; i <= my_max; i++) {
GC_thread t = (GC_thread)(dll_thread_table + i);
if (t -> suspended) {
GC_INNER void GC_start_mark_threads_inner(void)
{
int i;
-# ifdef MSWINCE
- HANDLE handle;
- DWORD thread_id;
-# else
- GC_uintptr_t handle;
- unsigned thread_id;
-# endif
GC_ASSERT(I_DONT_HOLD_LOCK());
if (available_markers_m1 <= 0) return;
}
for (i = 0; i < GC_markers_m1; ++i) {
- marker_last_stack_min[i] = ADDR_LIMIT;
# ifdef MSWINCE
+ HANDLE handle;
+ DWORD thread_id;
+
+ marker_last_stack_min[i] = ADDR_LIMIT;
/* There is no _beginthreadex() in WinCE. */
handle = CreateThread(NULL /* lpsa */,
MARK_THREAD_STACK_SIZE /* ignored */,
CloseHandle(handle);
}
# else
+ GC_uintptr_t handle;
+ unsigned thread_id;
+
+ marker_last_stack_min[i] = ADDR_LIMIT;
handle = _beginthreadex(NULL /* security_attr */,
MARK_THREAD_STACK_SIZE, GC_mark_thread,
(void *)(word)i, 0 /* flags */, &thread_id);
LPDWORD lpThreadId)
{
HANDLE thread_h;
- thread_args *args;
if (!EXPECT(parallel_initialized, TRUE))
GC_init_parallel();
return CreateThread(lpThreadAttributes, dwStackSize, lpStartAddress,
lpParameter, dwCreationFlags, lpThreadId);
} else {
- args = GC_malloc_uncollectable(sizeof(thread_args));
+ thread_args *args = GC_malloc_uncollectable(sizeof(thread_args));
/* Handed off to and deallocated by child thread. */
+
if (0 == args) {
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
return NULL;
void *arglist, unsigned initflag,
unsigned *thrdaddr)
{
- GC_uintptr_t thread_h;
- thread_args *args;
-
if (!EXPECT(parallel_initialized, TRUE))
GC_init_parallel();
/* make sure GC is initialized (i.e. main thread is */
return _beginthreadex(security, stack_size, start_address,
arglist, initflag, thrdaddr);
} else {
- args = GC_malloc_uncollectable(sizeof(thread_args));
+ GC_uintptr_t thread_h;
+ thread_args *args = GC_malloc_uncollectable(sizeof(thread_args));
/* Handed off to and deallocated by child thread. */
+
if (0 == args) {
/* MSDN docs say _beginthreadex() returns 0 on error and sets */
/* errno to either EAGAIN (too many threads) or EINVAL (the */