From ab125444763381f2cab78e20780297473f271ede Mon Sep 17 00:00:00 2001 From: hboehm Date: Wed, 12 Aug 2009 00:42:01 +0000 Subject: [PATCH] 2009-08-11 Hans Boehm (Replacement for Ivan Maidanski's diff99_cvs. Hopefully fixes the same bugs, and then some.) * allchblk.c (GC_merge_unmapped): Don't assume that adjacent free blocks have different mapping status. Correctly handle gap between blocks. (GC_split_block): Remove dead code setting hb_flags. Add comment. (GC_allochblk): Split blocks also in generational-only mode. * os_dep.c (GC_unmap_gap): Dont really use munmap. --- ChangeLog | 11 +++++++++++ allchblk.c | 26 ++++++++++++++------------ os_dep.c | 9 ++++++++- 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/ChangeLog b/ChangeLog index ccb5e97f..ca210d1c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,14 @@ +2009-08-11 Hans Boehm + (Replacement for Ivan Maidanski's diff99_cvs. Hopefully fixes + the same bugs, and then some.) + + * allchblk.c (GC_merge_unmapped): Don't assume that adjacent + free blocks have different mapping status. Correctly handle gap + between blocks. + (GC_split_block): Remove dead code setting hb_flags. Add comment. + (GC_allochblk): Split blocks also in generational-only mode. + * os_dep.c (GC_unmap_gap): Dont really use munmap. + 2009-08-08 Hans Boehm (Really Ivan Maidanski) (Mistakenly omitted from last check-in) * include/private/gc_priv.h (GC_unmapped_bytes): Define as 0 for diff --git a/allchblk.c b/allchblk.c index b723a7f6..627ef904 100644 --- a/allchblk.c +++ b/allchblk.c @@ -431,27 +431,32 @@ void GC_merge_unmapped(void) if (0 != nexthdr && HBLK_IS_FREE(nexthdr) && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0 /* no pot. overflow */) { - if (IS_MAPPED(hhdr)) { - GC_ASSERT(!IS_MAPPED(nexthdr)); + /* Note that we usually try to avoid adjacent free blocks */ + /* that are either both mapped or both unmapped. But that */ + /* isn't guaranteed to hold since we remap blocks when we */ + /* split them, and don't merge at that point. It may also */ + /* not hold if the merged block would be too big. */ + if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) { /* make both consistent, so that we can merge */ if (size > nextsize) { GC_remap((ptr_t)next, nextsize); } else { GC_unmap((ptr_t)h, size); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); hhdr -> hb_flags |= WAS_UNMAPPED; } - } else if (IS_MAPPED(nexthdr)) { - GC_ASSERT(!IS_MAPPED(hhdr)); + } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) { if (size > nextsize) { GC_unmap((ptr_t)next, nextsize); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); } else { GC_remap((ptr_t)h, size); hhdr -> hb_flags &= ~WAS_UNMAPPED; hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed; } - } else { + } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) { /* Unmap any gap in the middle */ - GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); } /* If they are both unmapped, we merge, but leave unmapped. */ GC_remove_from_fl(hhdr, i); @@ -515,6 +520,8 @@ STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr, * * Nhdr is not completely filled in, since it is about to allocated. * It may in fact end up on the wrong free list for its size. + * That's not a disaster, since n is about to be allocated + * by our caller. * (Hence adding it to a free list is silly. But this path is hopefully * rare enough that it doesn't matter. The code is cleaner this way.) */ @@ -541,11 +548,6 @@ STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n, } INCR_FREE_BYTES(index, -(signed_word)h_size); FREE_ASSERT(GC_free_bytes[index] > 0); -# ifdef GC_ASSERTIONS - nhdr -> hb_flags &= ~FREE_BLK; - /* Don't fail test for consecutive */ - /* free blocks in GC_add_to_fl. */ -# endif # ifdef USE_MUNMAP hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no; # endif @@ -588,7 +590,7 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */) if (0 != result) return result; if (GC_use_entire_heap || GC_dont_gc || USED_HEAP_SIZE < GC_requested_heapsize - || TRUE_INCREMENTAL || !GC_should_collect()) { + || GC_incremental || !GC_should_collect()) { /* Should use more of the heap, even if it requires splitting. */ split_limit = N_HBLK_FLS; } else { diff --git a/os_dep.c b/os_dep.c index 7fb1f59e..9b80603a 100644 --- a/os_dep.c +++ b/os_dep.c @@ -2136,7 +2136,14 @@ void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2) len -= free_len; } # else - if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed"); + if (len != 0) { + /* Immediately remap as above. */ + void * result; + result = mmap(start_addr, len, PROT_NONE, + MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON, + zero_fd, 0/* offset */); + if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed"); + } GC_unmapped_bytes += len; # endif } -- 2.40.0