/* Should return the same value as GC_large_free_bytes. */
GC_INNER word GC_compute_large_free_bytes(void)
{
- struct hblk * h;
- hdr * hhdr;
word total_free = 0;
unsigned i;
for (i = 0; i <= N_HBLK_FLS; ++i) {
+ struct hblk * h;
+ hdr * hhdr;
+
for (h = GC_hblkfreelist[i]; h != 0; h = hhdr->hb_next) {
hhdr = HDR(h);
total_free += hhdr->hb_sz;
# if !defined(NO_DEBUGGING)
void GC_print_hblkfreelist(void)
{
- struct hblk * h;
- hdr * hhdr;
unsigned i;
word total;
for (i = 0; i <= N_HBLK_FLS; ++i) {
- h = GC_hblkfreelist[i];
+ struct hblk * h = GC_hblkfreelist[i];
+
if (0 != h) GC_printf("Free list %u (total size %lu):\n",
i, (unsigned long)GC_free_bytes[i]);
while (h != 0) {
- hhdr = HDR(h);
+ hdr * hhdr = HDR(h);
+
GC_printf("\t%p size %lu %s black listed\n",
(void *)h, (unsigned long) hhdr -> hb_sz,
GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
/* appears, or -1 if it appears nowhere. */
static int free_list_index_of(hdr *wanted)
{
- struct hblk * h;
- hdr * hhdr;
int i;
for (i = 0; i <= N_HBLK_FLS; ++i) {
- h = GC_hblkfreelist[i];
- while (h != 0) {
+ struct hblk * h;
+ hdr * hhdr;
+
+ for (h = GC_hblkfreelist[i]; h != 0; h = hhdr -> hb_next) {
hhdr = HDR(h);
if (hhdr == wanted) return i;
- h = hhdr -> hb_next;
}
}
return -1;
GC_API void GC_CALL GC_dump_regions(void)
{
unsigned i;
- ptr_t start, end;
- ptr_t p;
- size_t bytes;
- hdr *hhdr;
+
for (i = 0; i < GC_n_heap_sects; ++i) {
- start = GC_heap_sects[i].hs_start;
- bytes = GC_heap_sects[i].hs_bytes;
- end = start + bytes;
+ ptr_t start = GC_heap_sects[i].hs_start;
+ size_t bytes = GC_heap_sects[i].hs_bytes;
+ ptr_t end = start + bytes;
+ ptr_t p;
+
/* Merge in contiguous sections. */
while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
++i;
}
GC_printf("***Section from %p to %p\n", start, end);
for (p = start; (word)p < (word)end; ) {
- hhdr = HDR(p);
+ hdr *hhdr = HDR(p);
+
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
GC_printf("\t%p Missing header!!(%p)\n", p, (void *)hhdr);
p += HBLKSIZE;
int kind, unsigned flags)
{
word descr;
-# ifdef MARK_BIT_PER_GRANULE
- size_t granules;
+# ifdef MARK_BIT_PER_GRANULE
if (byte_sz > MAXOBJBYTES)
flags |= LARGE_BLOCK;
# endif
hhdr -> hb_inv_sz = inv_sz;
}
# else /* MARK_BIT_PER_GRANULE */
- granules = BYTES_TO_GRANULES(byte_sz);
+ {
+ size_t granules = BYTES_TO_GRANULES(byte_sz);
+
if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
/* Make it look like a valid block. */
hhdr -> hb_sz = HBLKSIZE;
}
hhdr -> hb_map = GC_obj_map[(hhdr -> hb_flags & LARGE_BLOCK) != 0 ?
0 : granules];
+ }
# endif /* MARK_BIT_PER_GRANULE */
/* Clear mark bits */
{
int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
struct hblk *second = GC_hblkfreelist[index];
- hdr * second_hdr;
# if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
hdr * nexthdr = HDR(next);
struct hblk *prev = GC_free_block_ending_at(h);
hdr * prevhdr = HDR(prev);
+
GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
|| (GC_heapsize & SIGNB) != 0);
/* In the last case, blocks may be too large to merge. */
GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
|| (GC_heapsize & SIGNB) != 0);
# endif
-
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
GC_hblkfreelist[index] = h;
GC_free_bytes[index] += hhdr -> hb_sz;
hhdr -> hb_next = second;
hhdr -> hb_prev = 0;
if (0 != second) {
+ hdr * second_hdr;
+
GET_HDR(second, second_hdr);
second_hdr -> hb_prev = h;
}
/* way blocks are ever unmapped. */
GC_INNER void GC_unmap_old(void)
{
- struct hblk * h;
- hdr * hhdr;
int i;
if (GC_unmap_threshold == 0)
return; /* unmapping disabled */
for (i = 0; i <= N_HBLK_FLS; ++i) {
+ struct hblk * h;
+ hdr * hhdr;
+
for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
hhdr = HDR(h);
if (!IS_MAPPED(hhdr)) continue;
/* fully mapped or fully unmapped. */
GC_INNER void GC_merge_unmapped(void)
{
- struct hblk * h, *next;
- hdr * hhdr, *nexthdr;
- word size, nextsize;
int i;
for (i = 0; i <= N_HBLK_FLS; ++i) {
- h = GC_hblkfreelist[i];
+ struct hblk *h = GC_hblkfreelist[i];
+
while (h != 0) {
+ struct hblk *next;
+ hdr *hhdr, *nexthdr;
+ word size, nextsize;
+
GET_HDR(h, hhdr);
size = hhdr->hb_sz;
next = (struct hblk *)((word)h + size);
hdr * hhdr; /* Header corr. to hbp */
struct hblk *thishbp;
hdr * thishdr; /* Header corr. to thishbp */
- signed_word size_needed; /* number of bytes in requested objects */
- signed_word size_avail; /* bytes available in this block */
-
- size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz);
+ signed_word size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz);
+ /* number of bytes in requested objects */
/* search for a big enough block in free list */
for (hbp = GC_hblkfreelist[n];; hbp = hhdr -> hb_next) {
+ signed_word size_avail; /* bytes available in this block */
+
if (NULL == hbp) return NULL;
GET_HDR(hbp, hhdr); /* set hhdr value */
size_avail = hhdr->hb_sz;
if (size_avail < size_needed) continue;
if (size_avail != size_needed) {
- signed_word next_size;
-
if (!may_split) continue;
/* If the next heap block is obviously better, go on. */
/* This prevents us from disassembling a single large */
/* block to get tiny blocks. */
thishbp = hhdr -> hb_next;
if (thishbp != 0) {
+ signed_word next_size;
+
GET_HDR(thishbp, thishdr);
next_size = (signed_word)(thishdr -> hb_sz);
if (next_size < size_avail
*/
STATIC void GC_maybe_gc(void)
{
- static int n_partial_gcs = 0;
-
GC_ASSERT(I_HOLD_LOCK());
ASSERT_CANCEL_DISABLED();
if (GC_should_collect()) {
+ static int n_partial_gcs = 0;
+
if (!GC_incremental) {
/* FIXME: If possible, GC_default_stop_func should be used here */
GC_try_to_collect_inner(GC_never_stop_func);
{
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
- CLOCK_TYPE current_time;
# endif
+
ASSERT_CANCEL_DISABLED();
GC_ASSERT(I_HOLD_LOCK());
if (GC_dont_gc || (*stop_func)()) return FALSE;
GC_finish_collection();
# ifndef SMALL_CONFIG
if (GC_print_stats) {
+ CLOCK_TYPE current_time;
+
GET_TIME(current_time);
GC_log_printf("Complete collection took %lu msecs\n",
MS_TIME_DIFF(current_time,start_time));
GC_INNER void GC_collect_a_little_inner(int n)
{
- int i;
IF_CANCEL(int cancel_state;)
if (GC_dont_gc) return;
DISABLE_CANCEL(cancel_state);
if (GC_incremental && GC_collection_in_progress()) {
+ int i;
+
for (i = GC_deficit; i < GC_RATE*n; i++) {
if (GC_mark_some((ptr_t)0)) {
/* Need to finish a collection */
unsigned i;
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
- CLOCK_TYPE current_time;
# endif
# if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
if (GC_PRINT_STATS_FLAG) {
unsigned long time_diff;
unsigned total_time, divisor;
+ CLOCK_TYPE current_time;
+
GET_TIME(current_time);
time_diff = MS_TIME_DIFF(current_time,start_time);
/* Set all mark bits for the free list whose first entry is q */
GC_INNER void GC_set_fl_marks(ptr_t q)
{
- struct hblk *h, *last_h;
- hdr *hhdr;
- IF_PER_OBJ(size_t sz;)
- unsigned bit_no;
-
- if (q != NULL) {
- h = HBLKPTR(q);
- last_h = h;
- hhdr = HDR(h);
- IF_PER_OBJ(sz = hhdr->hb_sz;)
-
- for (;;) {
- bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+ if (q != NULL) {
+ struct hblk *h = HBLKPTR(q);
+ struct hblk *last_h = h;
+ hdr *hhdr = HDR(h);
+ IF_PER_OBJ(size_t sz = hhdr->hb_sz;)
+
+ for (;;) {
+ unsigned bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+
if (!mark_bit_from_hdr(hhdr, bit_no)) {
set_mark_bit_from_hdr(hhdr, bit_no);
++hhdr -> hb_n_marks;
hhdr = HDR(h);
IF_PER_OBJ(sz = hhdr->hb_sz;)
}
- }
- }
+ }
+ }
}
#if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
/* Decrement GC_bytes_found by number of bytes on free list. */
STATIC void GC_clear_fl_marks(ptr_t q)
{
- struct hblk *h, *last_h;
- hdr *hhdr;
- size_t sz;
- unsigned bit_no;
-
- if (q != NULL) {
- h = HBLKPTR(q);
- last_h = h;
- hhdr = HDR(h);
- sz = hhdr->hb_sz; /* Normally set only once. */
-
- for (;;) {
- bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+ if (q != NULL) {
+ struct hblk *h = HBLKPTR(q);
+ struct hblk *last_h = h;
+ hdr *hhdr = HDR(h);
+ size_t sz = hhdr->hb_sz; /* Normally set only once. */
+
+ for (;;) {
+ unsigned bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+
if (mark_bit_from_hdr(hhdr, bit_no)) {
size_t n_marks = hhdr -> hb_n_marks - 1;
clear_mark_bit_from_hdr(hhdr, bit_no);
hhdr = HDR(h);
sz = hhdr->hb_sz;
}
- }
- }
+ }
+ }
}
#if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
CLOCK_TYPE finalize_time = 0;
- CLOCK_TYPE done_time;
# endif
# if defined(GC_ASSERTIONS) && defined(THREADS) \
GC_on_collection_event(GC_EVENT_RECLAIM_END);
# ifndef SMALL_CONFIG
if (GC_print_stats) {
+ CLOCK_TYPE done_time;
+
GET_TIME(done_time);
# ifndef GC_NO_FINALIZATION
/* A convenient place to output finalization statistics. */
ptr_t old_back_ptr = GET_OH_BG_PTR(q);
back_edges * be, *be_cont;
word i;
- static unsigned random_number = 13;
-# define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0)
- /* A not very random number we use to occasionally allocate a */
- /* back_edges structure even for a single backward edge. This */
- /* prevents us from repeatedly tracing back through very long */
- /* chains, since we will have some place to store height and */
- /* in_progress flags along the way. */
GC_ASSERT(p == GC_base(p) && q == GC_base(q));
if (!GC_HAS_DEBUG_INFO(q) || !GC_HAS_DEBUG_INFO(p)) {
return;
}
if (0 == old_back_ptr) {
+ static unsigned random_number = 13;
+# define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0)
+ /* A not very random number we use to occasionally allocate a */
+ /* back_edges structure even for a single backward edge. This */
+ /* prevents us from repeatedly tracing back through very long */
+ /* chains, since we will have some place to store height and */
+ /* in_progress flags along the way. */
+
SET_OH_BG_PTR(q, p);
if (GOT_LUCKY_NUMBER) ensure_struct(q);
return;
GC_INNER void GC_push_all_stacks(void)
{
- int i;
- ptr_t lo, hi, altstack_lo, altstack_hi;
+ ptr_t hi, altstack_lo, altstack_hi;
task_t my_task = current_task();
mach_port_t my_thread = mach_thread_self();
GC_bool found_me = FALSE;
# ifndef DARWIN_DONT_PARSE_STACK
if (GC_query_task_threads) {
+ int i;
kern_return_t kern_result;
thread_act_array_t act_list = 0;
for (i = 0; i < (int)listcount; i++) {
thread_act_t thread = act_list[i];
- lo = GC_stack_range_for(&hi, thread, NULL, FALSE, my_thread,
- &altstack_lo, &altstack_hi);
+ ptr_t lo = GC_stack_range_for(&hi, thread, NULL, FALSE, my_thread,
+ &altstack_lo, &altstack_hi);
+
if (lo) {
GC_ASSERT((word)lo <= (word)hi);
total_size += hi - lo;
} else
# endif /* !DARWIN_DONT_PARSE_STACK */
/* else */ {
+ int i;
+
for (i = 0; i < (int)listcount; i++) {
GC_thread p;
+
for (p = GC_threads[i]; p != NULL; p = p->next)
if ((p->flags & FINISHED) == 0) {
thread_act_t thread = (thread_act_t)p->stop_info.mach_thread;
- lo = GC_stack_range_for(&hi, thread, p, (GC_bool)p->thread_blocked,
- my_thread, &altstack_lo, &altstack_hi);
+ ptr_t lo = GC_stack_range_for(&hi, thread, p,
+ (GC_bool)p->thread_blocked,
+ my_thread, &altstack_lo,
+ &altstack_hi);
+
if (lo) {
GC_ASSERT((word)lo <= (word)hi);
total_size += hi - lo;
/* Caller holds allocation lock. */
GC_INNER void GC_stop_world(void)
{
- unsigned i;
task_t my_task = current_task();
mach_port_t my_thread = mach_thread_self();
kern_return_t kern_result;
if (GC_query_task_threads) {
# ifndef GC_NO_THREADS_DISCOVERY
+ unsigned i;
GC_bool changed;
thread_act_array_t act_list, prev_list;
mach_msg_type_number_t listcount, prevcount;
# endif /* !GC_NO_THREADS_DISCOVERY */
} else {
+ unsigned i;
+
for (i = 0; i < THREAD_TABLE_SZ; i++) {
GC_thread p;
GC_INNER void GC_start_world(void)
{
task_t my_task = current_task();
- int i;
# ifdef DEBUG_THREADS
GC_log_printf("World starting\n");
# endif
if (GC_query_task_threads) {
# ifndef GC_NO_THREADS_DISCOVERY
+ int i;
int j = GC_mach_threads_count;
kern_return_t kern_result;
thread_act_array_t act_list;
# endif /* !GC_NO_THREADS_DISCOVERY */
} else {
+ int i;
mach_port_t my_thread = mach_thread_self();
for (i = 0; i < THREAD_TABLE_SZ; i++) {
{
signed_word n = (signed_word)number_of_elements;
signed_word nsorted = 1;
- signed_word i;
while (nsorted < n) {
+ signed_word i;
+
while (nsorted < n &&
(word)base[nsorted-1].hs_start < (word)base[nsorted].hs_start)
++nsorted;
GC_INNER void GC_register_dynamic_libraries(void)
{
MEMORY_BASIC_INFORMATION buf;
- size_t result;
DWORD protect;
LPVOID p;
char * base;
# endif
base = limit = p = GC_sysinfo.lpMinimumApplicationAddress;
while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
- result = VirtualQuery(p, &buf, sizeof(buf));
+ size_t result = VirtualQuery(p, &buf, sizeof(buf));
+
# ifdef MSWINCE
if (result == 0) {
/* Page is free; advance to the next possible allocation base */
GC_INNER void GC_register_dynamic_libraries(void)
{
- int status;
- ldr_process_t mypid;
-
- /* module */
- ldr_module_t moduleid = LDR_NULL_MODULE;
- ldr_module_info_t moduleinfo;
- size_t moduleinfosize = sizeof(moduleinfo);
- size_t modulereturnsize;
-
- /* region */
- ldr_region_t region;
- ldr_region_info_t regioninfo;
- size_t regioninfosize = sizeof(regioninfo);
- size_t regionreturnsize;
-
- /* Obtain id of this process */
- mypid = ldr_my_process();
+ ldr_module_t moduleid = LDR_NULL_MODULE;
+ ldr_process_t mypid = ldr_my_process(); /* obtain id of this process */
/* For each module */
while (TRUE) {
-
- /* Get the next (first) module */
- status = ldr_next_module(mypid, &moduleid);
+ ldr_module_info_t moduleinfo;
+ size_t modulereturnsize;
+ ldr_region_t region;
+ ldr_region_info_t regioninfo;
+ size_t regionreturnsize;
+ int status = ldr_next_module(mypid, &moduleid);
+ /* Get the next (first) module */
/* Any more modules? */
if (moduleid == LDR_NULL_MODULE)
/* Get the module information */
status = ldr_inq_module(mypid, moduleid, &moduleinfo,
- moduleinfosize, &modulereturnsize);
+ sizeof(moduleinfo), &modulereturnsize);
if (status != 0 )
ABORT("ldr_inq_module failed");
for (region = 0; region < moduleinfo.lmi_nregion; region++) {
/* Get the region information */
status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
- regioninfosize, ®ionreturnsize);
+ sizeof(regioninfo), ®ionreturnsize);
if (status != 0 )
ABORT("ldr_inq_region failed");
GC_INNER void GC_register_dynamic_libraries(void)
{
- int status;
int index = 1; /* Ordinal position in shared library search list */
- struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
/* For each dynamic library loaded */
while (TRUE) {
-
- /* Get info about next shared library */
- status = shl_get(index, &shl_desc);
+ struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
+ int status = shl_get(index, &shl_desc);
+ /* Get info about next shared library */
/* Check if this is the end of the list or if some error occurred */
if (status != 0) {
const struct GC_MACH_SECTION *sec;
const char *name;
GC_has_static_roots_func callback = GC_has_static_roots;
- char secnam[16];
- const char *fmt;
DCL_LOCK_STATE;
if (GC_no_dls) return;
/* Sections constructed on demand. */
for (j = 0; j < sizeof(GC_dyld_add_sect_fmts) / sizeof(char *); j++) {
- fmt = GC_dyld_add_sect_fmts[j];
+ const char *fmt = GC_dyld_add_sect_fmts[j];
+
/* Add our manufactured aligned BSS sections. */
for (i = 0; i <= L2_MAX_OFILE_ALIGNMENT; i++) {
+ char secnam[16];
+
(void)snprintf(secnam, sizeof(secnam), fmt, (unsigned)i);
secnam[sizeof(secnam) - 1] = '\0';
sec = GC_GETSECTBYNAME(hdr, SEG_DATA, secnam);
unsigned long start, end;
unsigned i, j;
const struct GC_MACH_SECTION *sec;
- char secnam[16];
- const char *fmt;
for (i = 0; i < sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]); i++) {
sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg,
/* Remove our on-demand sections. */
for (j = 0; j < sizeof(GC_dyld_add_sect_fmts) / sizeof(char *); j++) {
- fmt = GC_dyld_add_sect_fmts[j];
+ const char *fmt = GC_dyld_add_sect_fmts[j];
+
for (i = 0; i <= L2_MAX_OFILE_ALIGNMENT; i++) {
+ char secnam[16];
+
(void)snprintf(secnam, sizeof(secnam), fmt, (unsigned)i);
secnam[sizeof(secnam) - 1] = '\0';
sec = GC_GETSECTBYNAME(hdr, SEG_DATA, secnam);
void GC_amiga_free_all_mem(void){
struct GC_Amiga_AllocedMemoryHeader *gc_am=(struct GC_Amiga_AllocedMemoryHeader *)(~(int)(GC_AMIGAMEM));
- struct GC_Amiga_AllocedMemoryHeader *temp;
#ifdef GC_AMIGA_PRINTSTATS
printf("\n\n"
#endif
while(gc_am!=NULL){
- temp=gc_am->next;
+ struct GC_Amiga_AllocedMemoryHeader *temp = gc_am->next;
FreeMem(gc_am,gc_am->size);
gc_am=(struct GC_Amiga_AllocedMemoryHeader *)(~(int)(temp));
}
void *GC_amiga_allocwrapper_any(size_t size,void *(*AllocFunction)(size_t size2)){
- void *ret,*ret2;
+ void *ret;
GC_amiga_dontalloc=TRUE; // Pretty tough thing to do, but its indeed necessary.
latestsize=size;
}
#ifdef GC_AMIGA_RETRY
else{
+ void *ret2;
/* We got chip-mem. Better try again and again and again etc., we might get fast-mem sooner or later... */
/* Using gctest to check the effectiveness of doing this, does seldom give a very good result. */
/* However, real programs doesn't normally rapidly allocate and deallocate. */
char*const begin = buffer;
char*const end = buffer + size;
size_t line_number = 0;
- char str[128];
if (size) {
*buffer = 0;
size = (GC_ULONG_PTR)end < (GC_ULONG_PTR)buffer ? 0 : end - buffer;
if (line_number) {
+ char str[128];
+
wsprintf(str, "(%d) : ", (int)line_number);
if (size) {
strncpy(buffer, str, size)[size - 1] = 0;
hdr * hhdr = HDR(p);
word descr = hhdr -> hb_descr;
ptr_t q;
- word r;
ptr_t scan_limit;
ptr_t target_limit = p + hhdr -> hb_sz - 1;
scan_limit = target_limit + 1 - sizeof(word);
}
for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
- r = *(word *)q;
+ word r = *(word *)q;
+
if (r < (word)p || r > (word)target_limit) {
GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
}
finalization_mark_proc mp)
{
ptr_t base;
- struct finalizable_object * curr_fo, * prev_fo;
+ struct finalizable_object * curr_fo;
size_t index;
struct finalizable_object *new_fo = 0;
hdr *hhdr = NULL; /* initialized to prevent warning. */
- GC_oom_func oom_fn;
DCL_LOCK_STATE;
LOCK();
/* in the THREADS case we hold allocation lock. */
base = (ptr_t)obj;
for (;;) {
+ struct finalizable_object *prev_fo = NULL;
+ GC_oom_func oom_fn;
+
index = HASH2(base, log_fo_table_size);
- prev_fo = 0;
curr_fo = GC_fnlz_roots.fo_head[index];
while (curr_fo != 0) {
GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
STATIC void GC_dump_finalization_links(
const struct dl_hashtbl_s *dl_hashtbl)
{
- struct disappearing_link *curr_dl;
- ptr_t real_ptr, real_link;
size_t dl_size = dl_hashtbl->log_size == -1 ? 0 :
(size_t)1 << dl_hashtbl->log_size;
size_t i;
for (i = 0; i < dl_size; i++) {
+ struct disappearing_link *curr_dl;
+
for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
curr_dl = dl_next(curr_dl)) {
- real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
- real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
+ ptr_t real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
+ ptr_t real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
+
GC_printf("Object: %p, link: %p\n", real_ptr, real_link);
}
}
struct finalizable_object * curr_fo;
size_t fo_size = log_fo_table_size == -1 ? 0 :
(size_t)1 << log_fo_table_size;
- ptr_t real_ptr;
size_t i;
GC_printf("Disappearing (short) links:\n");
for (i = 0; i < fo_size; i++) {
for (curr_fo = GC_fnlz_roots.fo_head[i];
curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
- real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ ptr_t real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
+
GC_printf("Finalizable object: %p\n", real_ptr);
}
}
size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
(size_t)1 << dl_hashtbl->log_size; \
for (i = 0; i < dl_size; i++) { \
+ struct disappearing_link *prev_dl = NULL; \
curr_dl = dl_hashtbl -> head[i]; \
- prev_dl = NULL; \
while (curr_dl) {
#define ITERATE_DL_HASHTBL_END(curr_dl, prev_dl) \
GC_INLINE void GC_make_disappearing_links_disappear(
struct dl_hashtbl_s* dl_hashtbl)
{
- struct disappearing_link *curr, *prev, *next;
- ptr_t real_ptr, real_link;
+ struct disappearing_link *curr, *next;
ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
- real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
- real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
+ ptr_t real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
+ ptr_t real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
+
if (!GC_is_marked(real_ptr)) {
*(word *)real_link = 0;
GC_clear_mark_bit(curr);
GC_INLINE void GC_remove_dangling_disappearing_links(
struct dl_hashtbl_s* dl_hashtbl)
{
- struct disappearing_link *curr, *prev, *next;
- ptr_t real_link;
+ struct disappearing_link *curr, *next;
ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
- real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
+ ptr_t real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
+
if (NULL != real_link && !GC_is_marked(real_link)) {
GC_clear_mark_bit(curr);
DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
/* Should be called without allocation lock. */
GC_API int GC_CALL GC_invoke_finalizers(void)
{
- struct finalizable_object * curr_fo;
int count = 0;
word bytes_freed_before = 0; /* initialized to prevent warning. */
DCL_LOCK_STATE;
while (GC_fnlz_roots.finalize_now != NULL) {
+ struct finalizable_object * curr_fo;
+
# ifdef THREADS
LOCK();
# endif
#endif
{
ptr_t op;
- word lg;
DCL_LOCK_STATE;
GC_DBG_COLLECT_AT_MALLOC(lb);
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
+ word lg = GC_size_map[lb];
+
LOCK();
op = GC_gcjobjfreelist[lg];
if(EXPECT(0 == op, FALSE)) {
void * ptr_to_struct_containing_descr)
{
ptr_t op;
- word lg;
DCL_LOCK_STATE;
GC_DBG_COLLECT_AT_MALLOC(lb);
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
+ word lg = GC_size_map[lb];
+
LOCK();
op = GC_gcjobjfreelist[lg];
if (EXPECT(0 == op, FALSE)) {
GC_INNER GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
{
struct hblk * hbp;
- word i;
for (hbp = h; (word)hbp < (word)h + sz; hbp += BOTTOM_SZ) {
if (!get_index((word) hbp)) return(FALSE);
}
if (!get_index((word)h + sz - 1)) return(FALSE);
for (hbp = h + 1; (word)hbp < (word)h + sz; hbp += 1) {
- i = HBLK_PTR_DIFF(hbp, h);
+ word i = HBLK_PTR_DIFF(hbp, h);
+
SET_HDR(hbp, (hdr *)(i > MAX_JUMP? MAX_JUMP : i));
}
return(TRUE);
}
static void deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_objfreelist_ptr + nwords;
+ size_t nwords = GC_round_up(n);
+ void ** flh = GC_objfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
memset(reinterpret_cast<char *>(p) + GC_bytes_per_word, 0,
GC_bytes_per_word * (nwords - 1));
}
static void ptr_free_deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_aobjfreelist_ptr + nwords;
+ size_t nwords = GC_round_up(n);
+ void ** flh = GC_aobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
}
static void deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_uobjfreelist_ptr + nwords;
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh = GC_uobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_bytes_recently_freed +=
}
static void ptr_free_deallocate(void *p, size_t n)
{
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
-
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
- flh = GC_auobjfreelist_ptr + nwords;
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh = GC_auobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_bytes_recently_freed +=
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
{
- void *op;
- void **opp;
- size_t lg;
- DCL_LOCK_STATE;
-
GC_ASSERT(k < MAXOBJKINDS);
if (SMALL_OBJ(lb)) {
+ void *op;
+ void **opp;
+ size_t lg = GC_size_map[lb];
+ DCL_LOCK_STATE;
+
GC_DBG_COLLECT_AT_MALLOC(lb);
- lg = GC_size_map[lb];
LOCK();
opp = &GC_obj_kinds[k].ok_freelist[lg];
op = *opp;
size_t lb, int k)
{
void *op;
- void **opp;
- size_t lg;
DCL_LOCK_STATE;
GC_ASSERT(k < MAXOBJKINDS);
if (SMALL_OBJ(lb)) {
+ void **opp;
+ size_t lg;
+
GC_DBG_COLLECT_AT_MALLOC(lb);
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
hdr *hhdr;
size_t sz; /* In bytes */
size_t ngranules; /* sz in granules */
- void **flh;
int knd;
struct obj_kind * ok;
DCL_LOCK_STATE;
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
+ void **flh;
+
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
UNLOCK();
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
+
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
hdr *hhdr;
size_t sz; /* bytes */
size_t ngranules; /* sz in granules */
- void ** flh;
int knd;
struct obj_kind * ok;
ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
if (ngranules <= MAXOBJGRANULES) {
+ void ** flh;
+
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (ok -> ok_init) {
word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
register word *p;
- register word q;
register word *lim;
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
register ptr_t least_ha = GC_least_plausible_heap_addr;
lim = t - 1 /* longword */;
for (p = b; (word)p <= (word)lim;
p = (word *)(((ptr_t)p) + ALIGNMENT)) {
- q = *p;
+ register word q = *p;
GC_PUSH_ONE_STACK(q, p);
}
# undef GC_greatest_plausible_heap_addr
word * mark_word_addr = &(hhdr->hb_marks[0]);
word *p;
word *plim;
- word *q;
- word mark_word;
/* Allow registers to be used for some frequently accessed */
/* global variables. Otherwise aliasing issues are likely */
/* go through all words in block */
while ((word)p < (word)plim) {
- mark_word = *mark_word_addr++;
- q = p;
+ word mark_word = *mark_word_addr++;
+ word *q = p;
+
while(mark_word != 0) {
if (mark_word & 1) {
PUSH_GRANULE(q);
word * mark_word_addr = &(hhdr->hb_marks[0]);
word *p;
word *plim;
- word *q;
- word mark_word;
ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
ptr_t least_ha = GC_least_plausible_heap_addr;
/* go through all words in block */
while ((word)p < (word)plim) {
- mark_word = *mark_word_addr++;
- q = p;
+ word mark_word = *mark_word_addr++;
+ word *q = p;
+
while(mark_word != 0) {
if (mark_word & 1) {
PUSH_GRANULE(q);
word * mark_word_addr = &(hhdr->hb_marks[0]);
word *p;
word *plim;
- word *q;
- word mark_word;
ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
ptr_t least_ha = GC_least_plausible_heap_addr;
/* go through all words in block */
while ((word)p < (word)plim) {
- mark_word = *mark_word_addr++;
- q = p;
+ word mark_word = *mark_word_addr++;
+ word *q = p;
+
while(mark_word != 0) {
if (mark_word & 1) {
PUSH_GRANULE(q);
/* re-registering dynamic libraries. */
void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
{
- struct roots * old;
-
GC_ASSERT((word)b <= (word)e);
b = (ptr_t)(((word)b + (sizeof(word) - 1)) & ~(sizeof(word) - 1));
/* round b up to word boundary */
/* takes to scan the roots. */
{
register int i;
- old = 0; /* initialized to prevent warning. */
+ struct roots * old = NULL; /* initialized to prevent warning. */
+
for (i = 0; i < n_root_sets; i++) {
old = GC_static_roots + i;
if ((word)b <= (word)old->r_end
}
}
# else
- old = (struct roots *)GC_roots_present(b);
- if (old != 0) {
- if ((word)e <= (word)old->r_end) /* already there */ return;
- /* else extend */
- GC_root_size += e - old -> r_end;
- old -> r_end = e;
- return;
+ {
+ struct roots * old = (struct roots *)GC_roots_present(b);
+
+ if (old != 0) {
+ if ((word)e <= (word)old->r_end)
+ return; /* already there */
+ /* else extend */
+ GC_root_size += e - old -> r_end;
+ old -> r_end = e;
+ return;
+ }
}
# endif
if (n_root_sets == MAX_ROOT_SETS) {
{
size_t low = 0;
size_t high = GC_excl_table_entries - 1;
- size_t mid;
while (high > low) {
- mid = (low + high) >> 1;
+ size_t mid = (low + high) >> 1;
+
/* low <= mid < high */
if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
low = mid + 1;
GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish)
{
struct exclusion * next;
- size_t next_index, i;
+ size_t next_index;
GC_ASSERT((word)start % sizeof(word) == 0);
GC_ASSERT((word)start < (word)finish);
next = GC_next_exclusion(start);
}
if (0 != next) {
+ size_t i;
+
if ((word)(next -> e_start) < (word) finish) {
/* incomplete error check. */
ABORT("Exclusion ranges overlap");
STATIC void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top,
GC_bool all GC_ATTR_UNUSED)
{
- struct exclusion * next;
- ptr_t excl_start;
-
while ((word)bottom < (word)top) {
- next = GC_next_exclusion(bottom);
- if (0 == next || (word)(excl_start = next -> e_start) >= (word)top) {
- GC_PUSH_CONDITIONAL(bottom, top, all);
- return;
+ struct exclusion *next = GC_next_exclusion(bottom);
+ ptr_t excl_start;
+
+ if (0 == next
+ || (word)(excl_start = next -> e_start) >= (word)top) {
+ GC_PUSH_CONDITIONAL(bottom, top, all);
+ break;
}
if ((word)excl_start > (word)bottom)
GC_PUSH_CONDITIONAL(bottom, excl_start, all);
/* Used to occasionally clear a bigger */
/* chunk. */
# endif
- ptr_t limit;
# define SLOP 400
/* Extra bytes we clear every time. This clears our own */
/* larger ... */
# ifdef THREADS
if (++random_no % 13 == 0) {
- limit = sp;
+ ptr_t limit = sp;
+
MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
limit = (ptr_t)((word)limit & ~0xf);
/* Make it sufficiently aligned for assembly */
/* implementations of GC_clear_stack_inner. */
return GC_clear_stack_inner(arg, limit);
- } else {
- BZERO((void *)dummy, SMALL_CLEAR_SIZE*sizeof(word));
}
+ BZERO((void *)dummy, SMALL_CLEAR_SIZE*sizeof(word));
# else
if (GC_gc_no > GC_stack_last_cleared) {
/* Start things over, so we clear the entire stack again */
GC_high_water = sp;
}
MAKE_HOTTER(GC_high_water, GC_SLOP);
- limit = GC_min_sp;
- MAKE_HOTTER(limit, SLOP);
- if ((word)sp COOLER_THAN (word)limit) {
- limit = (ptr_t)((word)limit & ~0xf);
- /* Make it sufficiently aligned for assembly */
- /* implementations of GC_clear_stack_inner. */
- GC_min_sp = sp;
- return GC_clear_stack_inner(arg, limit);
- } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset
- > CLEAR_THRESHOLD) {
+ {
+ ptr_t limit = GC_min_sp;
+
+ MAKE_HOTTER(limit, SLOP);
+ if ((word)sp COOLER_THAN (word)limit) {
+ limit = (ptr_t)((word)limit & ~0xf);
+ /* Make it sufficiently aligned for assembly */
+ /* implementations of GC_clear_stack_inner. */
+ GC_min_sp = sp;
+ return GC_clear_stack_inner(arg, limit);
+ }
+ }
+ if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) {
/* Restart clearing process, but limit how much clearing we do. */
GC_min_sp = sp;
MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
struct hblk *h;
bottom_index *bi;
hdr *candidate_hdr;
- ptr_t limit;
r = p;
if (!EXPECT(GC_is_initialized, TRUE)) return 0;
size_t offset = HBLKDISPL(r);
word sz = candidate_hdr -> hb_sz;
size_t obj_displ = offset % sz;
+ ptr_t limit;
r -= obj_displ;
limit = r + sz;
STATIC word GC_parse_mem_size_arg(const char *str)
{
- char *endptr;
word result = 0; /* bad value */
- char ch;
if (*str != '\0') {
+ char *endptr;
+ char ch;
+
result = (word)STRTOULL(str, &endptr, 10);
ch = *endptr;
if (ch != '\0') {
return len;
# else
int bytes_written = 0;
- int result;
IF_CANCEL(int cancel_state;)
DISABLE_CANCEL(cancel_state);
while ((size_t)bytes_written < len) {
# ifdef GC_SOLARIS_THREADS
- result = syscall(SYS_write, fd, buf + bytes_written,
+ int result = syscall(SYS_write, fd, buf + bytes_written,
len - bytes_written);
# else
- result = write(fd, buf + bytes_written, len - bytes_written);
+ int result = write(fd, buf + bytes_written, len - bytes_written);
# endif
+
if (-1 == result) {
RESTORE_CANCEL(cancel_state);
return(result);
STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
{
size_t num_read = 0;
- ssize_t result;
ASSERT_CANCEL_DISABLED();
while (num_read < count) {
- result = READ(fd, buf + num_read, count - num_read);
+ ssize_t result = READ(fd, buf + num_read, count - num_read);
+
if (result < 0) return result;
if (result == 0) break;
num_read += result;
/* of time. */
GC_INNER char * GC_get_maps(void)
{
- int f;
ssize_t result;
static char *maps_buf = NULL;
static size_t maps_buf_sz = 1;
/* Note that we may not allocate conventionally, and */
/* thus can't use stdio. */
do {
+ int f;
+
while (maps_size >= maps_buf_sz) {
/* Grow only by powers of 2, since we leak "too small" buffers.*/
while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
STATIC ptr_t GC_least_described_address(ptr_t start)
{
MEMORY_BASIC_INFORMATION buf;
- size_t result;
LPVOID limit;
ptr_t p;
- LPVOID q;
limit = GC_sysinfo.lpMinimumApplicationAddress;
p = (ptr_t)((word)start & ~(GC_page_size - 1));
for (;;) {
- q = (LPVOID)(p - GC_page_size);
+ size_t result;
+ LPVOID q = (LPVOID)(p - GC_page_size);
+
if ((word)q > (word)p /* underflow */ || (word)q < (word)limit) break;
result = VirtualQuery(q, &buf, sizeof(buf));
if (result != sizeof(buf) || buf.AllocationBase == 0) break;
STATIC void GC_register_root_section(ptr_t static_root)
{
MEMORY_BASIC_INFORMATION buf;
- size_t result;
DWORD protect;
LPVOID p;
char * base;
- char * limit, * new_limit;
+ char * limit;
if (!GC_no_win32_dlls) return;
p = base = limit = GC_least_described_address(static_root);
while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
- result = VirtualQuery(p, &buf, sizeof(buf));
+ size_t result = VirtualQuery(p, &buf, sizeof(buf));
+ char * new_limit;
+
if (result != sizeof(buf) || buf.AllocationBase == 0
|| GC_is_heap_base(buf.AllocationBase)) break;
new_limit = (char *)p + buf.RegionSize;
void GC_register_data_segments(void)
{
ptr_t region_start = DATASTART;
- ptr_t region_end;
if ((word)region_start - 1U >= (word)DATAEND)
ABORT_ARG2("Wrong DATASTART/END pair",
": %p .. %p", region_start, DATAEND);
for (;;) {
- region_end = GC_find_limit_openbsd(region_start, DATAEND);
+ ptr_t region_end = GC_find_limit_openbsd(region_start, DATAEND);
+
GC_add_roots_inner(region_start, region_end, FALSE);
if ((word)region_end >= (word)DATAEND)
break;
GC_ULONG_PTR count;
do {
- PVOID * pages, * pages_end;
+ PVOID * pages = gww_buf;
DWORD page_size;
- pages = gww_buf;
count = GC_GWW_BUF_LEN;
/* GetWriteWatch is documented as returning non-zero when it */
/* fails, but the documentation doesn't explicitly say why it */
}
count = 1; /* Done with this section. */
} else /* succeeded */ {
- pages_end = pages + count;
+ PVOID * pages_end = pages + count;
+
while (pages != pages_end) {
struct hblk * h = (struct hblk *) *pages++;
struct hblk * h_end = (struct hblk *) ((char *) h + page_size);
char * addr = (char *) (exc_info -> ExceptionRecord
-> ExceptionInformation[1]);
# endif
- size_t i;
if (SIG_OK && CODE_OK) {
register struct hblk * h =
(struct hblk *)((word)addr & ~(GC_page_size-1));
GC_bool in_allocd_block;
+ size_t i;
+
# ifdef CHECKSUMS
GC_record_fault(h);
# endif
-
# ifdef SUNOS5SIGS
/* Address is only within the correct physical page. */
in_allocd_block = FALSE;
STATIC void GC_protect_heap(void)
{
- ptr_t start;
- size_t len;
- struct hblk * current;
- struct hblk * current_start; /* Start of block to be protected. */
- struct hblk * limit;
unsigned i;
GC_bool protect_all =
- (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
+ (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
+
for (i = 0; i < GC_n_heap_sects; i++) {
- start = GC_heap_sects[i].hs_start;
- len = GC_heap_sects[i].hs_bytes;
+ ptr_t start = GC_heap_sects[i].hs_start;
+ size_t len = GC_heap_sects[i].hs_bytes;
+
if (protect_all) {
PROTECT(start, len);
} else {
+ struct hblk * current;
+ struct hblk * current_start; /* Start of block to be protected. */
+ struct hblk * limit;
+
GC_ASSERT(PAGE_ALIGNED(len));
GC_ASSERT(PAGE_ALIGNED(start));
current_start = current = (struct hblk *)start;
GC_INNER void GC_read_dirty(void)
{
int nmaps;
- unsigned long npages;
- unsigned pagesize;
- ptr_t vaddr, limit;
- struct prasmap * map;
- char * bufp;
+ char * bufp = GC_proc_buf;
int i;
BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
- bufp = GC_proc_buf;
if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
/* Retry with larger buffer. */
word new_size = 2 * GC_proc_buf_size;
# endif
bufp += sizeof(struct prpageheader);
for (i = 0; i < nmaps; i++) {
- map = (struct prasmap *)bufp;
- vaddr = (ptr_t)(map -> pr_vaddr);
- npages = map -> pr_npage;
- pagesize = map -> pr_pagesize;
+ struct prasmap * map = (struct prasmap *)bufp;
+ ptr_t vaddr = (ptr_t)(map -> pr_vaddr);
+ unsigned long npages = map -> pr_npage;
+ unsigned pagesize = map -> pr_pagesize;
+ ptr_t limit;
+
# ifdef DEBUG_DIRTY_BITS
GC_log_printf(
"pr_vaddr= %p, npage= %lu, mflags= 0x%x, pagesize= 0x%x\n",
{
kern_return_t r;
char *addr;
- struct hblk *h;
- size_t i;
thread_state_flavor_t flavor = DARWIN_EXC_STATE;
mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT;
DARWIN_EXC_STATE_T exc_state;
# endif
if (GC_mprotect_state == GC_MP_NORMAL) { /* common case */
- h = (struct hblk*)((word)addr & ~(GC_page_size-1));
+ struct hblk * h = (struct hblk*)((word)addr & ~(GC_page_size-1));
+ size_t i;
+
UNPROTECT(h, GC_page_size);
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
register int index = PHT_HASH(h+i);
register int n_live_threads = 0;
register int result;
# endif
-# ifdef GC_NETBSD_THREADS_WORKAROUND
- int code;
-# endif
# ifdef DEBUG_THREADS
GC_log_printf("World starting\n");
}
# ifdef GC_NETBSD_THREADS_WORKAROUND
for (i = 0; i < n_live_threads; i++) {
+ int code;
+
while (0 != (code = sem_wait(&GC_restart_ack_sem))) {
if (errno != EINTR) {
ABORT_ARG1("sem_wait() for restart handler failed",
sigset_t *oset)
{
sigset_t fudged_set;
- int sig_suspend;
INIT_REAL_SYMS();
if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
+ int sig_suspend = GC_get_suspend_signal();
+
fudged_set = *set;
- sig_suspend = GC_get_suspend_signal();
GC_ASSERT(sig_suspend >= 0);
if (sigdelset(&fudged_set, sig_suspend) != 0)
ABORT("sigdelset failed");
hdr * hhdr = HDR(hbp);
size_t sz = hhdr -> hb_sz; /* size of objects in current block */
struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
- struct hblk ** rlh;
if( sz > MAXOBJBYTES ) { /* 1 big object */
if( !mark_bit_from_hdr(hhdr, 0) ) {
}
} else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
/* group of smaller objects, enqueue the real work */
- rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
+ struct hblk **rlh = ok -> ok_reclaim_list + BYTES_TO_GRANULES(sz);
+
hhdr -> hb_next = *rlh;
*rlh = hbp;
} /* else not worth salvaging. */
GC_atomic_in_use = 0;
/* Clear reclaim- and free-lists */
for (kind = 0; kind < GC_n_kinds; kind++) {
- void **fop;
- void **lim;
struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
if (rlist == 0) continue; /* This kind not used. */
if (!report_if_found) {
- lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
+ void **fop;
+ void **lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
+
for (fop = GC_obj_kinds[kind].ok_freelist;
(word)fop < (word)lim; fop++) {
if (*fop != 0) {
struct hblk ** rlh;
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
- CLOCK_TYPE done_time;
if (GC_print_stats == VERBOSE)
GET_TIME(start_time);
}
# ifndef SMALL_CONFIG
if (GC_print_stats == VERBOSE) {
+ CLOCK_TYPE done_time;
+
GET_TIME(done_time);
GC_verbose_log_printf("Disposing of reclaim lists took %lu msecs\n",
MS_TIME_DIFF(done_time,start_time));
# else
char *y = (char *)(GC_word)fail_proc1;
# endif
- CLOCK_TYPE typed_time;
# endif
CLOCK_TYPE start_time;
CLOCK_TYPE reverse_time;
- CLOCK_TYPE tree_time;
unsigned long time_diff;
# ifndef NO_TEST_HANDLE_FORK
pid_t pid;
# ifndef DBG_HDRS_ALL
typed_test();
if (print_stats) {
+ CLOCK_TYPE typed_time;
+
GET_TIME(typed_time);
time_diff = MS_TIME_DIFF(typed_time, start_time);
GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
# endif /* DBG_HDRS_ALL */
tree_test();
if (print_stats) {
+ CLOCK_TYPE tree_time;
+
GET_TIME(tree_time);
time_diff = MS_TIME_DIFF(tree_time, start_time);
GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
#if defined(MACOS)
void SetMinimumStack(long minSize)
{
- long newApplLimit;
-
if (minSize > LMGetDefltStack())
{
- newApplLimit = (long) GetApplLimit()
- - (minSize - LMGetDefltStack());
+ long newApplLimit = (long) GetApplLimit()
+ - (minSize - LMGetDefltStack());
SetApplLimit((Ptr) newApplLimit);
MaxApplZone();
}
static void return_single_freelist(void *fl, void **gfl)
{
- void *q, **qptr;
-
if (*gfl == 0) {
*gfl = fl;
} else {
+ void *q, **qptr;
+
GC_ASSERT(GC_size(fl) == GC_size(*gfl));
/* Concatenate: */
qptr = &(obj_link(fl));
{
signed_word last_set_bit = len - 1;
GC_descr result;
- signed_word i;
# define HIGH_BIT (((word)1) << (WORDSZ - 1))
DCL_LOCK_STATE;
if (last_set_bit < 0) return(0 /* no pointers */);
# if ALIGNMENT == CPP_WORDSZ/8
{
+ signed_word i;
+
for (i = 0; i < last_set_bit; i++) {
if (!GC_get_bit(bm, i)) {
break;
}
# endif
if ((word)last_set_bit < BITMAP_BITS) {
+ signed_word i;
+
/* Hopefully the common case. */
/* Build bitmap descriptor (with bits reversed) */
result = HIGH_BIT;
# ifdef GC_ASSERTIONS
DWORD thread_id = GetCurrentThreadId();
# endif
- int i;
GC_ASSERT(I_HOLD_LOCK());
if (GC_win32_dll_threads) {
LONG my_max = GC_get_max_thread_index();
+ int i;
+
for (i = 0; i <= my_max; i++) {
GC_thread t = (GC_thread)(dll_thread_table + i);
if (t -> suspended) {
GC_INNER void GC_start_mark_threads_inner(void)
{
int i;
-# ifdef MSWINCE
- HANDLE handle;
- DWORD thread_id;
-# else
- GC_uintptr_t handle;
- unsigned thread_id;
-# endif
GC_ASSERT(I_DONT_HOLD_LOCK());
if (available_markers_m1 <= 0) return;
}
for (i = 0; i < GC_markers_m1; ++i) {
- marker_last_stack_min[i] = ADDR_LIMIT;
# ifdef MSWINCE
+ HANDLE handle;
+ DWORD thread_id;
+
+ marker_last_stack_min[i] = ADDR_LIMIT;
/* There is no _beginthreadex() in WinCE. */
handle = CreateThread(NULL /* lpsa */,
MARK_THREAD_STACK_SIZE /* ignored */,
CloseHandle(handle);
}
# else
+ GC_uintptr_t handle;
+ unsigned thread_id;
+
+ marker_last_stack_min[i] = ADDR_LIMIT;
handle = _beginthreadex(NULL /* security_attr */,
MARK_THREAD_STACK_SIZE, GC_mark_thread,
(void *)(word)i, 0 /* flags */, &thread_id);
LPDWORD lpThreadId)
{
HANDLE thread_h;
- thread_args *args;
if (!EXPECT(parallel_initialized, TRUE))
GC_init_parallel();
return CreateThread(lpThreadAttributes, dwStackSize, lpStartAddress,
lpParameter, dwCreationFlags, lpThreadId);
} else {
- args = GC_malloc_uncollectable(sizeof(thread_args));
+ thread_args *args = GC_malloc_uncollectable(sizeof(thread_args));
/* Handed off to and deallocated by child thread. */
+
if (0 == args) {
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
return NULL;
void *arglist, unsigned initflag,
unsigned *thrdaddr)
{
- GC_uintptr_t thread_h;
- thread_args *args;
-
if (!EXPECT(parallel_initialized, TRUE))
GC_init_parallel();
/* make sure GC is initialized (i.e. main thread is */
return _beginthreadex(security, stack_size, start_address,
arglist, initflag, thrdaddr);
} else {
- args = GC_malloc_uncollectable(sizeof(thread_args));
+ GC_uintptr_t thread_h;
+ thread_args *args = GC_malloc_uncollectable(sizeof(thread_args));
/* Handed off to and deallocated by child thread. */
+
if (0 == args) {
/* MSDN docs say _beginthreadex() returns 0 on error and sets */
/* errno to either EAGAIN (too many threads) or EINVAL (the */