* Makefile.am, Makefile.direct: Add NT_X64_STATIC_THREADS_MAKEFILE.
* Makefile.in: Regenerate.
* NT_X64_STATIC_THREADS_MAKEFILE: Fix warning flags.
* allochblk.c, alloc.c, blacklst.c, dbg_malc.c, dyn_load.c,
finalize.c, headers.c, mach_dep.c, malloc.c, mark.c, misc.c,
obj_map.c, os_dep.c, ptr_chck.c, reclaim.c, typd_mlc.c,
win32_threads.c, cord/de_win.c, include/gc_mark.h,
include/private/gc_hdrs.h, include/private/gc_pmark.h,
include/private/gc_priv.h, tests/test_cpp.cc:
Replace old style function declarations. Clean up integral types.
Remove register declarations. The change in malloc.c and the
"int descr" declaration in mark.c are the most likely to have
been real bugs outside of win64.
* msvc_dbg.c: Disable on win64.
* win32_threads.c: Add AMD64 support.
* include/gc.h: no backtrace on AMD64 for now.
+2007-06-06 Hans Boehm <Hans.Boehm@hp.com>
+
+ * Makefile.am, Makefile.direct: Add NT_X64_STATIC_THREADS_MAKEFILE.
+ * Makefile.in: Regenerate.
+ * NT_X64_STATIC_THREADS_MAKEFILE: Fix warning flags.
+ * allochblk.c, alloc.c, blacklst.c, dbg_malc.c, dyn_load.c,
+ finalize.c, headers.c, mach_dep.c, malloc.c, mark.c, misc.c,
+ obj_map.c, os_dep.c, ptr_chck.c, reclaim.c, typd_mlc.c,
+ win32_threads.c, cord/de_win.c, include/gc_mark.h,
+ include/private/gc_hdrs.h, include/private/gc_pmark.h,
+ include/private/gc_priv.h, tests/test_cpp.cc:
+ Replace old style function declarations. Clean up integral types.
+ Remove register declarations. The change in malloc.c and the
+ "int descr" declaration in mark.c are the most likely to have
+ been real bugs outside of win64.
+ * msvc_dbg.c: Disable on win64.
+ * win32_threads.c: Add AMD64 support.
+ * include/gc.h: no backtrace on AMD64 for now.
+
2007-06-06 Hans Boehm <Hans.Boehm@hp.com>
* msvc_dbg.c(GetModuleBase): Replace strcat with strcat_s.
OS2_MAKEFILE PCR-Makefile digimars.mak EMX_MAKEFILE \
Makefile.direct Makefile.dj Makefile.DLLs SMakefile.amiga \
WCC_MAKEFILE configure_atomic_ops.sh \
- NT_STATIC_THREADS_MAKEFILE
+ NT_STATIC_THREADS_MAKEFILE NT_X64_STATIC_THREADS_MAKEFILE
# files used by makefiles other than Makefile.am
#
BCC_MAKEFILE EMX_MAKEFILE WCC_MAKEFILE Makefile.dj \
PCR-Makefile SMakefile.amiga Makefile.DLLs \
digimars.mak Makefile.direct NT_STATIC_THREADS_MAKEFILE \
- configure_atomic_ops.sh
+ NT_X64_STATIC_THREADS_MAKEFILE configure_atomic_ops.sh
# Makefile and Makefile.direct are copies of each other.
OTHER_FILES= Makefile setjmp_t.c callprocs \
NT_THREADS_MAKEFILE OS2_MAKEFILE PCR-Makefile digimars.mak \
EMX_MAKEFILE Makefile.direct Makefile.dj Makefile.DLLs \
SMakefile.amiga WCC_MAKEFILE configure_atomic_ops.sh \
- NT_STATIC_THREADS_MAKEFILE add_gc_prefix.c gcname.c if_mach.c \
- if_not_there.c hpux_test_and_clear.s gc.mak MacOS.c \
- MacProjects.sit.hqx mach_dep.c setjmp_t.c threadlibs.c \
- AmigaOS.c Mac_files/datastart.c Mac_files/dataend.c \
+ NT_STATIC_THREADS_MAKEFILE NT_X64_STATIC_THREADS_MAKEFILE \
+ add_gc_prefix.c gcname.c if_mach.c if_not_there.c \
+ hpux_test_and_clear.s gc.mak MacOS.c MacProjects.sit.hqx \
+ mach_dep.c setjmp_t.c threadlibs.c AmigaOS.c \
+ Mac_files/datastart.c Mac_files/dataend.c \
Mac_files/MacOS_config.h Mac_files/MacOS_Test_config.h \
include/private/msvc_dbg.h msvc_dbg.c libatomic_ops-1.2 \
libtool.m4 cord/cordbscs.c cord/cordtest.c cord/de.c \
all: gctest.exe cord\de.exe test_cpp.exe
.c.obj:
- $(cc) $(cdebug) $(cflags) $(cvarsmt) -Iinclude -I$(AO_INCLUDE_DIR) -DALL_INTERIOR_POINTERS -D__STDC__ -DGC_NOT_DLL -DGC_WIN32_THREADS -DTHREAD_LOCAL_ALLOC $*.c /Fo$*.obj /wd4107 -D_CRT_SECURE_NO_DEPRECATE
+ $(cc) $(cdebug) $(cflags) $(cvarsmt) -Iinclude -I$(AO_INCLUDE_DIR) -DALL_INTERIOR_POINTERS -D__STDC__ -DGC_NOT_DLL -DGC_WIN32_THREADS -DTHREAD_LOCAL_ALLOC $*.c /Fo$*.obj /wd4701 -D_CRT_SECURE_NO_DEPRECATE
# Disable "may not be initialized" warnings. They're too approximate.
# Disable crt security warnings, since unfortunately they warn about all sorts
# of safe uses of strncpy. It would be nice to leave the rest enabled.
.cpp.obj:
- $(cc) $(cdebug) $(cflags) $(cvarsmt) -Iinclude -I$(AO_INCLUDE_DIR) -DALL_INTERIOR_POINTERS -DGC_NOT_DLL $*.CPP -DGC_WIN32_THREADS -DTHREAD_LOCAL_ALLOC /Fo$*.obj
+ $(cc) $(cdebug) $(cflags) $(cvarsmt) -Iinclude -I$(AO_INCLUDE_DIR) -DALL_INTERIOR_POINTERS -DGC_NOT_DLL $*.CPP -DGC_WIN32_THREADS -DTHREAD_LOCAL_ALLOC /Fo$*.obj -D_CRT_SECURE_NO_DEPRECATE
$(OBJS) tests\test.obj: include\private\gc_priv.h include\private\gc_hdrs.h include\gc.h include\private\gcconfig.h include\private\gc_locks.h include\private\gc_pmark.h include\gc_mark.h include\private\msvc_dbg.h
# ifdef __GNUC__
__inline__
# endif
- static GC_bool GC_enough_large_bytes_left(bytes,n)
- word bytes;
- int n;
+ static GC_bool GC_enough_large_bytes_left(word bytes, int n)
{
int i;
for (i = N_HBLK_FLS; i >= n; --i) {
#endif /* USE_MUNMAP */
/* Map a number of blocks to the appropriate large block free list index. */
-int GC_hblk_fl_from_blocks(blocks_needed)
-word blocks_needed;
+int GC_hblk_fl_from_blocks(word blocks_needed)
{
- if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
+ if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
- return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+ return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+ UNIQUE_THRESHOLD;
}
while (h != 0) {
hhdr = HDR(h);
sz = hhdr -> hb_sz;
- GC_printf("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+ GC_printf("\t%p size %lu ", h, (unsigned long)sz);
total_free += sz;
if (GC_is_black_listed(h, HBLKSIZE) != 0) {
GC_printf("start black listed\n");
/* Return the free list index on which the block described by the header */
/* appears, or -1 if it appears nowhere. */
-int free_list_index_of(wanted)
-hdr * wanted;
+int free_list_index_of(hdr *wanted)
{
struct hblk * h;
hdr * hhdr;
/* kind of objects. */
/* Return FALSE on failure. */
static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
- int kind, unsigned char flags)
+ int kind, unsigned flags)
{
word descr;
size_t granules;
/* Set size, kind and mark proc fields */
hhdr -> hb_sz = byte_sz;
- hhdr -> hb_obj_kind = kind;
- hhdr -> hb_flags = flags;
+ hhdr -> hb_obj_kind = (unsigned char)kind;
+ hhdr -> hb_flags = (unsigned char)flags;
hhdr -> hb_block = block;
descr = GC_obj_kinds[kind].ok_descriptor;
if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
hhdr -> hb_inv_sz = inv_sz;
}
# else /* MARK_BIT_PER_GRANULE */
- hhdr -> hb_large_block = (byte_sz > MAXOBJBYTES);
+ hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
granules = BYTES_TO_GRANULES(byte_sz);
if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
/* Make it look like a valid block. */
hhdr -> hb_map = 0;
return FALSE;
} else {
- int index = (hhdr -> hb_large_block? 0 : granules);
+ size_t index = (hhdr -> hb_large_block? 0 : granules);
hhdr -> hb_map = GC_obj_map[index];
}
# endif /* MARK_BIT_PER_GRANULE */
* We assume it is on the nth free list, or on the size
* appropriate free list if n is FL_UNKNOWN.
*/
-void GC_remove_from_fl(hhdr, n)
-hdr * hhdr;
-int n;
+void GC_remove_from_fl(hdr *hhdr, int n)
{
int index;
/*
* Return a pointer to the free block ending just before h, if any.
*/
-struct hblk * GC_free_block_ending_at(h)
-struct hblk *h;
+struct hblk * GC_free_block_ending_at(struct hblk *h)
{
struct hblk * p = h - 1;
hdr * phdr;
* Add hhdr to the appropriate free list.
* We maintain individual free lists sorted by address.
*/
-void GC_add_to_fl(h, hhdr)
-struct hblk *h;
-hdr * hhdr;
+void GC_add_to_fl(struct hblk *h, hdr *hhdr)
{
int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
struct hblk *second = GC_hblkfreelist[index];
* The header for the returned block must be set up by the caller.
* If the return value is not 0, then hhdr is the header for it.
*/
-struct hblk * GC_get_first_part(h, hhdr, bytes, index)
-struct hblk *h;
-hdr * hhdr;
-word bytes;
-int index;
+struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
+ size_t bytes, int index)
{
word total_size = hhdr -> hb_sz;
struct hblk * rest;
* (Hence adding it to a free list is silly. But this path is hopefully
* rare enough that it doesn't matter. The code is cleaner this way.)
*/
-void GC_split_block(h, hhdr, n, nhdr, index)
-struct hblk *h;
-hdr * hhdr;
-struct hblk *n;
-hdr * nhdr;
-int index; /* Index of free list */
+void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
+ hdr *nhdr, int index /* Index of free list */)
{
word total_size = hhdr -> hb_sz;
word h_size = (word)n - (word)h;
}
struct hblk *
-GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned char flags, int n);
+GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n);
/*
* Allocate (and return pointer to) a heap block
* The client is responsible for clearing the block, if necessary.
*/
struct hblk *
-GC_allochblk(size_t sz, int kind, unsigned char flags/* IGNORE_OFF_PAGE or 0 */)
+GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
{
word blocks;
int start_list;
* Unlike the above, sz is in bytes.
*/
struct hblk *
-GC_allochblk_nth(size_t sz, int kind, unsigned char flags, int n)
+GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n)
{
struct hblk *hbp;
hdr * hhdr; /* Header corr. to hbp */
{
# define NWORDS 64
word frames[NWORDS];
- register int i;
+ int i;
for (i = 0; i < NWORDS; i++) frames[i] = 0;
}
struct hblk * h, * last_h = 0;
hdr *hhdr; /* gcc "might be uninitialized" warning is bogus. */
IF_PER_OBJ(size_t sz;)
- int bit_no;
+ unsigned bit_no;
for (p = q; p != 0; p = obj_link(p)){
h = HBLKPTR(p);
struct hblk * h, * last_h = 0;
hdr *hhdr;
size_t sz;
- int bit_no;
+ unsigned bit_no;
for (p = q; p != 0; p = obj_link(p)){
h = HBLKPTR(p);
}
bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, sz);
if (mark_bit_from_hdr(hhdr, bit_no)) {
- int n_marks = hhdr -> hb_n_marks - 1;
+ size_t n_marks = hhdr -> hb_n_marks - 1;
clear_mark_bit_from_hdr(hhdr, bit_no);
# ifdef PARALLEL_MARK
/* Appr. count, don't decrement to zero! */
/* Mark all objects on the free list. All objects should be */
/* marked when we're done. */
{
- register word size; /* current object size */
- int kind;
+ word size; /* current object size */
+ unsigned kind;
ptr_t q;
for (kind = 0; kind < GC_n_kinds; kind++) {
/* Thus accidentally marking a free list is not a problem; only */
/* objects on the list itself will be marked, and that's fixed here. */
{
- register word size; /* current object size */
- register ptr_t q; /* pointer to current object */
- int kind;
+ word size; /* current object size */
+ ptr_t q; /* pointer to current object */
+ unsigned kind;
for (kind = 0; kind < GC_n_kinds; kind++) {
for (size = 1; size <= MAXOBJGRANULES; size++) {
# if !defined(NO_DEBUGGING)
void GC_print_heap_sects(void)
{
- register unsigned i;
+ unsigned i;
GC_printf("Total heap size: %lu\n", (unsigned long) GC_heapsize);
for (i = 0; i < GC_n_heap_sects; i++) {
unsigned nbl = 0;
GC_printf("Section %d from %p to %p ", i,
- start, (unsigned long)(start + len));
+ start, start + len);
for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
}
{
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
{
- register int index = PHT_HASH(p);
+ word index = PHT_HASH((word)p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
# ifdef PRINT_BLACK_LIST
void GC_add_to_black_list_stack(word p)
#endif
{
- register int index = PHT_HASH(p);
+ word index = PHT_HASH((word)p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
# ifdef PRINT_BLACK_LIST
*/
struct hblk * GC_is_black_listed(struct hblk *h, word len)
{
- register int index = PHT_HASH((word)h);
- register word i;
+ word index = PHT_HASH((word)h);
+ word i;
word nblocks = divHBLKSZ(len);
if (!GC_all_interior_pointers) {
word result = 0;
for (h = start; h < endp1; h++) {
- register int index = PHT_HASH((word)h);
+ word index = PHT_HASH((word)h);
if (get_pht_entry_from_index(GC_old_stack_bl, index)) result++;
}
if (wParam == QUIT) {
SendMessage( hwnd, WM_CLOSE, 0, 0L );
} else {
- do_command(wParam);
+ do_command((int)wParam);
}
return(0);
SetTextColor(dc, GetSysColor(COLOR_WINDOWTEXT));
TextOut(dc, this_line.left, this_line.top,
- plain, len);
- TextOut(dc, this_line.left + len * char_width, this_line.top,
- blanks, COLS - len);
+ plain, (int)len);
+ TextOut(dc, this_line.left + (int)len * char_width,
+ this_line.top,
+ blanks, (int)(COLS - len));
SetBkMode(dc, TRANSPARENT);
SetTextColor(dc, RED);
TextOut(dc, this_line.left, this_line.top,
- control, strlen(control));
+ control, (int)strlen(control));
}
}
EndPaint(hwnd, &ps);
/* Check the object with debugging info at ohdr */
/* return NIL if it's OK. Else return clobbered */
/* address. */
-ptr_t GC_check_annotated_obj(ohdr)
-register oh * ohdr;
+ptr_t GC_check_annotated_obj(oh *ohdr)
{
register ptr_t body = (ptr_t)(ohdr + 1);
register word gc_sz = GC_size((ptr_t)ohdr);
static GC_describe_type_fn GC_describe_type_fns[MAXOBJKINDS] = {0};
-void GC_register_describe_type_fn(kind, fn)
-int kind;
-GC_describe_type_fn fn;
+void GC_register_describe_type_fn(int kind, GC_describe_type_fn fn)
{
GC_describe_type_fns[kind] = fn;
}
/* Print a type description for the object whose client-visible address */
/* is p. */
-void GC_print_type(p)
-ptr_t p;
+void GC_print_type(ptr_t p)
{
hdr * hhdr = GC_find_header(p);
char buffer[GC_TYPE_DESCR_LEN + 1];
-void GC_print_obj(p)
-ptr_t p;
+void GC_print_obj(ptr_t p)
{
register oh * ohdr = (oh *)GC_base(p);
return GC_debug_malloc(lb, OPT_RA s, i);
}
-void GC_debug_change_stubborn(p)
-void * p;
+void GC_debug_change_stubborn(void *p)
{
}
-void GC_debug_end_stubborn_change(p)
-void * p;
+void GC_debug_end_stubborn_change(void *p)
{
}
{
struct hblkhdr * hhdr = HDR(hbp);
size_t sz = hhdr -> hb_sz;
- int bit_no;
+ size_t bit_no;
char *p, *plim;
p = hbp->hb_body;
void GC_register_dynamic_libraries()
{
MEMORY_BASIC_INFORMATION buf;
- DWORD result;
+ size_t result;
DWORD protect;
LPVOID p;
char * base;
# endif
base = limit = p = GC_sysinfo.lpMinimumApplicationAddress;
# if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
- /* Only the first 32 MB of address space belongs to the current process */
- while (p < (LPVOID)0x02000000) {
+ /* Only the first 32 MB of address space belongs to the current process */
+ while (p < (LPVOID)0x02000000) {
result = VirtualQuery(p, &buf, sizeof(buf));
if (result == 0) {
/* Page is free; advance to the next possible allocation base */
& ~(GC_sysinfo.dwAllocationGranularity-1));
} else
# else
- while (p < GC_sysinfo.lpMaximumApplicationAddress) {
+ while (p < GC_sysinfo.lpMaximumApplicationAddress) {
result = VirtualQuery(p, &buf, sizeof(buf));
# endif
{
{
register word i;
register struct hash_chain_entry *p;
- int log_old_size = *log_size_ptr;
- register int log_new_size = log_old_size + 1;
+ signed_word log_old_size = *log_size_ptr;
+ signed_word log_new_size = log_old_size + 1;
word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
- register word new_size = 1 << log_new_size;
+ word new_size = (word)1 << log_new_size;
/* FIXME: Power of 2 size often gets rounded up to one more page. */
struct hash_chain_entry **new_table = (struct hash_chain_entry **)
GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
for (i = 0; i < old_size; i++) {
p = (*table)[i];
while (p != 0) {
- register ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
- register struct hash_chain_entry *next = p -> next;
- register int new_hash = HASH3(real_key, new_size, log_new_size);
+ ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
+ struct hash_chain_entry *next = p -> next;
+ size_t new_hash = HASH3(real_key, new_size, log_new_size);
p -> next = new_table[new_hash];
new_table[new_hash] = p;
int GC_general_register_disappearing_link(void * * link, void * obj)
{
struct disappearing_link *curr_dl;
- int index;
+ size_t index;
struct disappearing_link * new_dl;
DCL_LOCK_STATE;
int GC_unregister_disappearing_link(void * * link)
{
struct disappearing_link *curr_dl, *prev_dl;
- int index;
+ size_t index;
DCL_LOCK_STATE;
LOCK();
index = HASH2(link, log_dl_table_size);
- if (((unsigned long)link & (ALIGNMENT-1))) goto out;
+ if (((word)link & (ALIGNMENT-1))) goto out;
prev_dl = 0; curr_dl = dl_head[index];
while (curr_dl != 0) {
if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
/* behavior. Objects registered in this way are not finalized */
/* if they are reachable by other finalizable objects, eve if those */
/* other objects specify no ordering. */
-GC_API void GC_unreachable_finalize_mark_proc(p)
-ptr_t p;
+GC_API void GC_unreachable_finalize_mark_proc(ptr_t p)
{
GC_normal_finalize_mark_proc(p);
}
{
ptr_t base;
struct finalizable_object * curr_fo, * prev_fo;
- int index;
+ size_t index;
struct finalizable_object *new_fo;
hdr *hhdr;
DCL_LOCK_STATE;
struct disappearing_link * curr_dl, * prev_dl, * next_dl;
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
ptr_t real_ptr, real_link;
- register int i;
+ size_t i;
int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
# endif
if (count == 0) {
bytes_freed_before = GC_bytes_freed;
+ /* Don't do this outside, since we need the lock. */
}
curr_fo = GC_finalize_now;
# ifdef THREADS
GC_free((void *)curr_fo);
# endif
}
+ /* bytes_freed_before is initialized whenever count != 0 */
if (count != 0 && bytes_freed_before != GC_bytes_freed) {
LOCK();
GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
bottom_index *pi;
# ifdef HASH_TL
- unsigned i = TL_HASH(hi);
+ word i = TL_HASH(hi);
bottom_index * old;
old = p = GC_top_index[i];
/* Set up forwarding counts for block h of size sz */
GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
{
- register struct hblk * hbp;
- register int i;
+ struct hblk * hbp;
+ word i;
for (hbp = h; (char *)hbp < (char *)h + sz; hbp += BOTTOM_SZ) {
if (!get_index((word) hbp)) return(FALSE);
void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
word client_data)
{
- int j;
+ signed_word j;
bottom_index * index_p;
for (index_p = GC_all_bottom_indices; index_p != 0;
} else if (index_p->index[j] == 0) {
j--;
} else {
- j -= (word)(index_p->index[j]);
+ j -= (signed_word)(index_p->index[j]);
}
}
}
# endif
#endif
-#if defined(_MSC_VER) && _MSC_VER >= 1200 /* version 12.0+ (MSVC 6.0+) */
+#if defined(_MSC_VER) && _MSC_VER >= 1200 /* version 12.0+ (MSVC 6.0+) */ \
+ && !defined(_AMD64_)
# ifndef GC_HAVE_NO_BUILTIN_BACKTRACE
# define GC_HAVE_BUILTIN_BACKTRACE
# endif
void ** GC_new_free_list_inner(void);
/* Return a new kind, as specified. */
-int GC_new_kind(void **free_list, GC_word mark_descriptor_template,
+unsigned GC_new_kind(void **free_list, GC_word mark_descriptor_template,
int add_size_to_descriptor, int clear_new_objects);
/* The last two parameters must be zero or one. */
-int GC_new_kind_inner(void **free_list,
+unsigned GC_new_kind_inner(void **free_list,
GC_word mark_descriptor_template,
int add_size_to_descriptor,
int clear_new_objects);
/* Return a new mark procedure identifier, suitable for use as */
/* the first argument in GC_MAKE_PROC. */
-int GC_new_proc(GC_mark_proc);
-int GC_new_proc_inner(GC_mark_proc);
+unsigned GC_new_proc(GC_mark_proc);
+unsigned GC_new_proc_inner(GC_mark_proc);
/* Allocate an object of a given kind. Note that in multithreaded */
/* contexts, this is usually unsafe for kinds that have the descriptor */
/* Is the result a forwarding address to someplace closer to the */
/* beginning of the block or NIL? */
-# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP)
+# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((size_t) (hhdr) <= MAX_JUMP)
/* Get an HBLKSIZE aligned address closer to the beginning of the block */
/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
-# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr))
+# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (size_t)(hhdr))
# endif /* GC_HEADERS_H */
(((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
-extern word GC_n_mark_procs;
+extern unsigned GC_n_mark_procs;
/* Number of mark stack entries to discard on overflow. */
#define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
-# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
+# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
/* Round up byte allocation requests to integral number of words, etc. */
# define ROUNDED_UP_WORDS(n) \
word _unmapped_bytes;
# endif
- unsigned _size_map[MAXOBJBYTES+1];
+ size_t _size_map[MAXOBJBYTES+1];
/* Number of words to allocate for a given allocation request in */
/* bytes. */
# define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
# endif
-extern int GC_n_kinds;
+extern unsigned GC_n_kinds;
GC_API word GC_fo_entries;
#endif /* !USE_MARK_BYTES */
#ifdef MARK_BIT_PER_OBJ
-# define MARK_BIT_NO(offset, sz) ((offset)/(sz))
+# define MARK_BIT_NO(offset, sz) (((unsigned)(offset))/(sz))
/* Get the mark bit index corresponding to the given byte */
/* offset and size (in bytes). */
# define MARK_BIT_OFFSET(sz) 1
# define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz))
/* Position of final, always set, mark bit. */
#else /* MARK_BIT_PER_GRANULE */
-# define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES(offset)
+# define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((unsigned)(offset))
# define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
# define IF_PER_OBJ(x)
# define FINAL_MARK_BIT(sz) \
/* called explicitly without GC lock. */
struct hblk * GC_allochblk (size_t size_in_bytes, int kind,
- unsigned char flags);
+ unsigned flags);
/* Allocate a heap block, inform */
/* the marker that block is valid */
/* for objects of indicated size. */
GC_noop1((word)(&dummy));
}
-void GC_push_regs_and_stack(cold_gc_frame)
-ptr_t cold_gc_frame;
+void GC_push_regs_and_stack(ptr_t cold_gc_frame)
{
GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
}
if (h == 0) {
result = 0;
} else {
- int total_bytes = n_blocks * HBLKSIZE;
+ size_t total_bytes = n_blocks * HBLKSIZE;
if (n_blocks > 1) {
GC_large_allocd_bytes += total_bytes;
if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
/* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
-word GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
+unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
/* Initialize GC_obj_kinds properly and standard free lists properly. */
/* This must be done statically since they may be accessed before */
# ifdef ATOMIC_UNCOLLECTABLE
# ifdef STUBBORN_ALLOC
- int GC_n_kinds = 5;
+ unsigned GC_n_kinds = 5;
# else
- int GC_n_kinds = 4;
+ unsigned GC_n_kinds = 4;
# endif
# else
# ifdef STUBBORN_ALLOC
- int GC_n_kinds = 4;
+ unsigned GC_n_kinds = 4;
# else
- int GC_n_kinds = 3;
+ unsigned GC_n_kinds = 3;
# endif
# endif
/* clear all mark bits in the header */
void GC_clear_hdr_marks(hdr *hhdr)
{
- int last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
+ size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
# ifdef USE_MARK_BYTES
BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
{
unsigned i;
size_t sz = hhdr -> hb_sz;
- int n_marks = FINAL_MARK_BIT(sz);
+ size_t n_marks = FINAL_MARK_BIT(sz);
# ifdef USE_MARK_BYTES
for (i = 0; i <= n_marks; i += MARK_BIT_OFFSET(sz)) {
{
struct hblk *h = HBLKPTR(p);
hdr * hhdr = HDR(h);
- int bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
+ word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
if (!mark_bit_from_hdr(hhdr, bit_no)) {
set_mark_bit_from_hdr(hhdr, bit_no);
{
struct hblk *h = HBLKPTR(p);
hdr * hhdr = HDR(h);
- int bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
+ word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
if (mark_bit_from_hdr(hhdr, bit_no)) {
- int n_marks;
+ size_t n_marks;
clear_mark_bit_from_hdr(hhdr, bit_no);
n_marks = hhdr -> hb_n_marks - 1;
# ifdef PARALLEL_MARK
{
struct hblk *h = HBLKPTR(p);
hdr * hhdr = HDR(h);
- int bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
+ word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
- return(mark_bit_from_hdr(hhdr, bit_no));
+ return((GC_bool)mark_bit_from_hdr(hhdr, bit_no));
}
*/
mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack, mse *mark_stack_limit)
{
- int credit = HBLKSIZE; /* Remaining credit for marking work */
+ signed_word credit = HBLKSIZE; /* Remaining credit for marking work */
ptr_t current_p; /* Pointer to current candidate ptr. */
word current; /* Candidate pointer. */
ptr_t limit; /* (Incl) limit of current candidate */
# ifdef GWW_VDB
/* Don't recycle a stack segment obtained with the wrong flags. */
/* Win32 GetWriteWatch requires the right kind of memory. */
- static GC_incremental_at_stack_alloc = 0;
+ static GC_bool GC_incremental_at_stack_alloc = 0;
GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);
GC_incremental_at_stack_alloc = GC_incremental;
* Should only be used if there is no possibility of mark stack
* overflow.
*/
-void GC_push_all(bottom, top)
-ptr_t bottom;
-ptr_t top;
+void GC_push_all(ptr_t bottom, ptr_t top)
{
register word length;
{
struct hblk * h;
- bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
- top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
+ bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
if (top == 0 || bottom == top) return;
h = HBLKPTR(bottom + HBLKSIZE);
/* Push all objects reachable from marked objects in the given block */
void GC_push_marked(struct hblk *h, hdr *hhdr)
{
- int sz = hhdr -> hb_sz;
- int descr = hhdr -> hb_descr;
+ size_t sz = hhdr -> hb_sz;
+ word descr = hhdr -> hb_descr;
ptr_t p;
- int bit_no;
+ word bit_no;
ptr_t lim;
mse * GC_mark_stack_top_reg;
mse * mark_stack_limit = GC_mark_stack_limit;
/* Test whether any page in the given block is dirty */
GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
{
- int sz = hhdr -> hb_sz;
+ size_t sz = hhdr -> hb_sz;
if (sz <= MAXOBJBYTES) {
return(GC_page_was_dirty(h));
/* Make sure r points to the beginning of the object */
r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1));
{
- int offset = HBLKDISPL(r);
+ size_t offset = HBLKDISPL(r);
signed_word sz = candidate_hdr -> hb_sz;
- int obj_displ = offset % sz;
+ size_t obj_displ = offset % sz;
r -= obj_displ;
limit = r + sz;
GC_bool GC_is_initialized = FALSE;
+# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
+ extern void GC_init_parallel(void);
+# endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
+
void GC_init(void)
{
DCL_LOCK_STATE;
/* allocation is initialized, in case we didn't get */
/* called from GC_init_parallel(); */
{
- extern void GC_init_parallel(void);
GC_init_parallel();
}
# endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
# ifndef THREADS
# define GC_need_to_lock 0 /* Not defined without threads */
# endif
- int GC_write(buf, len)
- const char * buf;
- size_t len;
+ int GC_write(const char *buf, size_t len)
{
BOOL tmp;
DWORD written;
if (GC_stdout == INVALID_HANDLE_VALUE)
ABORT("Open of log file failed");
}
- tmp = WriteFile(GC_stdout, buf, len, &written, NULL);
+ tmp = WriteFile(GC_stdout, buf, (DWORD)len, &written, NULL);
if (!tmp)
DebugBreak();
# if defined(_MSC_VER) && defined(_DEBUG)
if (WRITE(GC_log, buf, strlen(buf)) < 0) ABORT("write to log failed");
}
-void GC_err_puts(s)
-const char *s;
+void GC_err_puts(const char *s)
{
if (WRITE(GC_stderr, s, strlen(s)) < 0) ABORT("write to stderr failed");
}
}
#ifndef PCR
-void GC_abort(msg)
-const char * msg;
+void GC_abort(const char *msg)
{
# if defined(MSWIN32)
(void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK);
return result;
}
-int GC_new_kind_inner(void **fl, GC_word descr, int adjust, int clear)
+unsigned GC_new_kind_inner(void **fl, GC_word descr, int adjust, int clear)
{
- int result = GC_n_kinds++;
+ unsigned result = GC_n_kinds++;
if (GC_n_kinds > MAXOBJKINDS) ABORT("Too many kinds");
GC_obj_kinds[result].ok_freelist = fl;
return result;
}
-int GC_new_kind(void **fl, GC_word descr, int adjust, int clear)
+unsigned GC_new_kind(void **fl, GC_word descr, int adjust, int clear)
{
- int result;
+ unsigned result;
LOCK();
result = GC_new_kind_inner(fl, descr, adjust, clear);
UNLOCK();
return result;
}
-int GC_new_proc_inner(GC_mark_proc proc)
+unsigned GC_new_proc_inner(GC_mark_proc proc)
{
- int result = GC_n_mark_procs++;
+ unsigned result = GC_n_mark_procs++;
if (GC_n_mark_procs > MAX_MARK_PROCS) ABORT("Too many mark procedures");
GC_mark_procs[result] = proc;
return result;
}
-int GC_new_proc(GC_mark_proc proc)
+unsigned GC_new_proc(GC_mark_proc proc)
{
- int result;
+ unsigned result;
LOCK();
result = GC_new_proc_inner(proc);
UNLOCK();
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
+#ifndef _M_AMD64
+
+/* X86_64 is ccurrently missing some meachine-dependent code below. */
+
#include "private/msvc_dbg.h"
#define WIN32_LEAN_AND_MEAN
GetDescriptionFromStack(addresses, count, NULL, symbols, size);
return symbols;
}
+
+#endif /* !_M_AMD64 */
}
} else {
for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
- new_map[displ] = displ % granules;
+ new_map[displ] = (short)(displ % granules);
}
}
GC_obj_map[granules] = new_map;
PAGE_READWRITE);
if (page != NULL) {
PVOID pages[16];
- DWORD count = 16;
+ ULONG_PTR count = 16;
DWORD page_size;
/* Check that it actually works. In spite of some */
/* documentation it actually seems to exist on W2K. */
ptr_t GC_least_described_address(ptr_t start)
{
MEMORY_BASIC_INFORMATION buf;
- DWORD result;
+ size_t result;
LPVOID limit;
ptr_t p;
LPVOID q;
if (result != sizeof(buf) || buf.AllocationBase == 0) break;
p = (ptr_t)(buf.AllocationBase);
}
- return(p);
+ return p;
}
# endif
void *GC_get_allocation_base(void *p)
{
MEMORY_BASIC_INFORMATION buf;
- DWORD result = VirtualQuery(p, &buf, sizeof(buf));
+ size_t result = VirtualQuery(p, &buf, sizeof(buf));
if (result != sizeof(buf)) {
ABORT("Weird VirtualQuery result");
}
unsigned i;
# ifndef REDIRECT_MALLOC
- static word last_gc_no = -1;
+ static word last_gc_no = (word)(-1);
if (last_gc_no != GC_gc_no) {
GC_add_current_malloc_heap();
void GC_register_root_section(ptr_t static_root)
{
MEMORY_BASIC_INFORMATION buf;
- DWORD result;
+ size_t result;
DWORD protect;
LPVOID p;
char * base;
BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
for (i = 0; i != GC_n_heap_sects; ++i) {
- DWORD count;
+ ULONG_PTR count;
do {
PVOID * pages, * pages_end;
unsigned j;
struct hblk * start = (struct hblk *)GC_heap_sects[i].hs_start;
static struct hblk *last_warned = 0;
- unsigned nblocks = divHBLKSZ(GC_heap_sects[i].hs_bytes);
+ size_t nblocks = divHBLKSZ(GC_heap_sects[i].hs_bytes);
if ( i != 0 && last_warned != start && warn_count++ < 5) {
last_warned = start;
/* entire object. */
void GC_dirty(ptr_t p)
{
+ word index = PHT_HASH(p);
async_set_pht_entry_from_index(GC_dirty_pages, index);
}
/* correctly. */
#ifdef AO_HAVE_test_and_set_acquire
static volatile AO_TS_t fault_handler_lock = 0;
- void async_set_pht_entry_from_index(volatile page_hash_table db, int index) {
+ void async_set_pht_entry_from_index(volatile page_hash_table db, size_t index) {
while (AO_test_and_set_acquire(&fault_handler_lock) == AO_TS_SET) {}
/* Could also revert to set_pht_entry_from_index_safe if initial */
/* GC_test_and_set fails. */
AO_CLEAR(&fault_handler_lock);
}
#else /* !AO_have_test_and_set_acquire */
-# error No test-and_set operation: Introduces a race.
- /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
+# error No test_and_set operation: Introduces a race.
+ /* THIS WOULD BE INCORRECT! */
+ /* The dirty bit vector may be temporarily wrong, */
/* just before we notice the conflict and correct it. We may end up */
/* looking at it while it's wrong. But this requires contention */
/* exactly when a GC is triggered, which seems far less likely to */
/* leave it this way while we think of something better, or support */
/* GC_test_and_set on the remaining platforms. */
static volatile word currently_updating = 0;
- void async_set_pht_entry_from_index(volatile page_hash_table db, int index) {
+ void async_set_pht_entry_from_index(volatile page_hash_table db, size_t index) {
unsigned int update_dummy;
currently_updating = (word)(&update_dummy);
set_pht_entry_from_index(db, index);
/* and then to have the thread stopping code set the dirty */
/* flag, if necessary. */
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
- register int index = PHT_HASH(h+i);
+ size_t index = PHT_HASH(h+i);
async_set_pht_entry_from_index(GC_dirty_pages, index);
}
& ~(GC_page_size-1));
found_clean = FALSE;
for (current = h_trunc; current < h_end; ++current) {
- int index = PHT_HASH(current);
+ size_t index = PHT_HASH(current);
if (!is_ptrfree || current < h || current >= h + nblocks) {
async_set_pht_entry_from_index(GC_dirty_pages, index);
goto fail;
}
} else {
- int offset;
- int pdispl = HBLKDISPL(p);
+ size_t offset;
+ size_t pdispl = HBLKDISPL(p);
offset = pdispl % sz;
if (HBLKPTR(p) != HBLKPTR(q)) goto fail;
ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
ptr_t list, signed_word *count)
{
- int bit_no = 0;
+ word bit_no = 0;
word *p, *q, *plim;
signed_word n_bytes_found = 0;
ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
ptr_t list, signed_word *count)
{
- int bit_no = 0;
+ word bit_no = 0;
word *p, *plim;
signed_word n_bytes_found = 0;
/*ARGSUSED*/
void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
{
- int bit_no = 0;
+ word bit_no = 0;
ptr_t p, plim;
GC_ASSERT(sz == hhdr -> hb_sz);
void GC_print_block_descr(struct hblk *h, word /* struct PrintStats */ raw_ps)
{
hdr * hhdr = HDR(h);
- unsigned bytes = hhdr -> hb_sz;
+ size_t bytes = hhdr -> hb_sz;
struct Print_stats *ps;
unsigned n_marks = GC_n_set_marks(hhdr);
*/
void GC_start_reclaim(GC_bool report_if_found)
{
- int kind;
+ unsigned kind;
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
GC_ASSERT(0 == GC_fl_builder_count);
GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
{
word sz;
- int kind;
+ unsigned kind;
hdr * hhdr;
struct hblk * hbp;
struct obj_kind * ok;
static void CleanUp( void* obj, void* data ) {
D* self = (D*) obj;
nFreed++;
- my_assert( self->i == (int) (long) data );}
+ my_assert( self->i == (int) (GC_word) data );}
static void Test() {
my_assert( nFreed >= .8 * nAllocated );}
int F::nAllocated = 0;
-long Disguise( void* p ) {
- return ~ (long) p;}
+GC_word Disguise( void* p ) {
+ return ~ (GC_word) p;}
-void* Undisguise( long i ) {
+void* Undisguise( GC_word i ) {
return (void*) ~ i;}
/* Allocate some uncollectable As and disguise their pointers.
Later we'll check to see if the objects are still there. We're
checking to make sure these objects really are uncollectable. */
- long as[ 1000 ];
- long bs[ 1000 ];
+ GC_word as[ 1000 ];
+ GC_word bs[ 1000 ];
for (i = 0; i < 1000; i++) {
as[ i ] = Disguise( new (NoGC) A( i ) );
bs[ i ] = Disguise( new (NoGC) B( i ) );}
for (i = 0; i < 1000; i++) {
C* c = new C( 2 );
C c1( 2 ); /* stack allocation should work too */
- D* d = ::new (USE_GC, D::CleanUp, (void*)(long)i) D( i );
+ D* d = ::new (USE_GC, D::CleanUp, (void*)(GC_word)i) D( i );
F* f = new F;
if (0 == i % 10) delete c;}
signed_word result;
size_t i;
word last_part;
- int extra_bits;
+ size_t extra_bits;
DCL_LOCK_STATE;
LOCK();
{
signed_word last_set_bit = len - 1;
GC_descr result;
- int i;
+ signed_word i;
# define HIGH_BIT (((word)1) << (WORDSZ - 1))
if (!GC_explicit_typing_initialized) GC_init_explicit_typing();
} else {
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
if (op != NULL)
- lg = BYTES_TO_WORDS(GC_size(op));
+ lg = BYTES_TO_WORDS(GC_size(op));
}
if (op != NULL)
((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
* If we notice this in the middle of marking.
*/
-AO_t GC_attached_thread = 0;
+AO_t GC_attached_thread = FALSE;
/* Return TRUE if an thread was attached since we last asked or */
/* since GC_attached_thread was explicitly reset. */
GC_bool GC_started_thread_while_stopped(void)
if (result) {
AO_store(&GC_attached_thread, FALSE);
}
- return (result);
+ return ((GC_bool)result);
} else {
return FALSE;
}
/* Unlike the pthreads version, the id field is set by the caller. */
GC_thread GC_new_thread(DWORD id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ word hv = ((word)id) % THREAD_TABLE_SZ;
GC_thread result;
/* It may not be safe to allocate when we register the first thread. */
static struct GC_Thread_Rep first_thread;
return (GC_thread)(dll_thread_table + i);
}
} else {
- int hv = ((word)thread_id) % THREAD_TABLE_SZ;
+ word hv = ((word)thread_id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
GC_ASSERT(I_HOLD_LOCK());
/* Cast away volatile qualifier, since we have lock. */
GC_thread gc_nvid = (GC_thread)gc_id;
DWORD id = gc_nvid -> id;
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ word hv = ((word)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
GC_delete_gc_thread(t);
}
} else {
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ word hv = ((word)id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
# if defined(I386)
PUSH4(Edi,Esi,Ebx,Edx), PUSH2(Ecx,Eax), PUSH1(Ebp);
sp = (ptr_t)context.Esp;
+# elif defined(X86_64)
+ PUSH4(Rax,Rcx,Rdx,Rbx); PUSH2(Rbp, Rsi); PUSH1(Rdi);
+ PUSH4(R8, R9, R10, R11); PUSH4(R12, R13, R14, R15);
+ sp = (ptr_t)context.Rsp;
# elif defined(ARM32)
PUSH4(R0,R1,R2,R3),PUSH4(R4,R5,R6,R7),PUSH4(R8,R9,R10,R11),PUSH1(R12);
sp = (ptr_t)context.Sp;
GC_push_all_stack(sp, thread->stack_base);
} else {
WARN("Thread stack pointer 0x%lx out of range, pushing everything\n",
- (unsigned long)sp);
+ (unsigned long)(size_t)sp);
GC_push_all_stack(stack_min, thread->stack_base);
}
} /* thread looks live */
#ifndef __GNUC__
__try {
#endif /* __GNUC__ */
- ret = (void *)args->start (args->param);
+ ret = (void *)(size_t)args->start (args->param);
#ifndef __GNUC__
} __finally {
#endif /* __GNUC__ */
DWORD WINAPI GC_win32_start(LPVOID arg)
{
- return (DWORD)GC_call_with_stack_base(GC_win32_start_inner, arg);
+ return (DWORD)(size_t)GC_call_with_stack_base(GC_win32_start_inner, arg);
}
GC_API HANDLE WINAPI GC_CreateThread(
/* Handed off to and deallocated by child thread. */
if (0 == args) {
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
- return -1L;
+ return (uintptr_t)(-1);
}
/* set up thread arguments */