-/* $Id$ $Revision$ */
-/* vim:set shiftwidth=4 ts=8: */
-
/*************************************************************************
* Copyright (c) 2011 AT&T Intellectual Property
* All rights reserved. This program and the accompanying materials
* Contributors: See CVS logs. Details at http://www.graphviz.org/
*************************************************************************/
-#include "vmhdr.h"
-
-/* for VirtualAlloc and friends */
-#if defined(_WIN32)
-#include <windows.h>
-#endif
-
-/* Best-fit allocation method. This is based on a best-fit strategy
-** using a splay tree for storage of lists of free blocks of the same
-** size. Recent free blocks may be cached for fast reuse.
-**
-** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
-*/
-
-#ifdef DEBUG
-static int N_free; /* # of free calls */
-static int N_alloc; /* # of alloc calls */
-static int N_resize; /* # of resize calls */
-static int N_wild; /* # allocated from the wild block */
-static int N_cache; /* # allocated from cache */
-static int N_last; /* # allocated from last free block */
-static int P_junk; /* # of semi-free pieces */
-static int P_free; /* # of free pieces */
-static int P_busy; /* # of busy pieces */
-static size_t M_junk; /* max size of a junk piece */
-static size_t M_free; /* max size of a free piece */
-static size_t M_busy; /* max size of a busy piece */
-static size_t S_free; /* total free space */
-static size_t S_junk; /* total junk space */
-static int Vmcheck = 0; /* 1 if checking */
-
-/* Check to see if a block is in the free tree */
-static int vmintree(Block_t * node, Block_t * b)
-{
- Block_t *t;
-
- for (t = node; t; t = LINK(t))
- if (t == b)
- return 1;
- if (LEFT(node) && vmintree(LEFT(node), b))
- return 1;
- if (RIGHT(node) && vmintree(RIGHT(node), b))
- return 1;
- return 0;
-}
-
-/* Check to see if a block is known to be free */
-static int vmisfree(Vmdata_t * vd, Block_t * b)
-{
- Block_t *t;
- size_t s;
-
- if (b == vd->wild)
- return 1;
- else if ((s = SIZE(b)) < MAXTINY) {
- for (t = TINY(vd)[INDEX(s)]; t; t = LINK(t))
- if (b == t)
- return 1;
- } else if (vd->root && vmintree(vd->root, b))
- return 1;
-
- return 0;
-}
-
-/* check to see if the tree is in good shape */
-static int vmchktree(Block_t * node)
-{
- Block_t *t;
-
- /**/ ASSERT(!ISBUSY(SIZE(node)) && !ISJUNK(SIZE(node)));
-
- for (t = LINK(node); t; t = LINK(t)) {
- /**/ ASSERT(SIZE(t) == SIZE(node));
- /**/ ASSERT(!ISBUSY(SIZE(t)) && !ISJUNK(SIZE(t)));
- }
- if ((t = LEFT(node))) {
- /**/ ASSERT(SIZE(t) < SIZE(node));
- vmchktree(t);
- }
- if ((t = RIGHT(node))) {
- /**/ ASSERT(SIZE(t) > SIZE(node));
- vmchktree(t);
- }
- return 1;
-}
-
-static int vmonlist(Block_t * list, Block_t * b)
-{
- for (; list; list = LINK(list))
- if (list == b)
- return 1;
- return 0;
-}
+#include "vmhdr.h"
+#include "vmalloc.h"
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
-/**
- * @param vd
- * @param size if > 0, checking that no large free block >size
- * @param wild if != 0, do above but allow wild to be >size
+/** make room to store a new pointer we are about to allocate
+ *
+ * @param vd Vmdata to operate on
+ * @returns true on success
*/
-static int vmcheck(Vmdata_t * vd, size_t size, int wild)
-{
- reg Seg_t *seg;
- reg Block_t *b, *endb, *t, *np;
- reg size_t s;
-
- if (!Vmcheck)
- return 1;
-
- /**/ ASSERT(size <= 0 || !vd->free);
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
-
- P_junk = P_free = P_busy = 0;
- M_junk = M_free = M_busy = S_free = 0;
- for (seg = vd->seg; seg; seg = seg->next) {
- b = SEGBLOCK(seg);
- endb = (Block_t *) (seg->baddr - sizeof(Head_t));
- while (b < endb) {
- s = SIZE(b) & ~BITS;
- np = (Block_t *) ((Vmuchar_t *) DATA(b) + s);
-
- if (!ISBUSY(SIZE(b))) {
- /**/ ASSERT(!ISJUNK(SIZE(b)));
- /**/ ASSERT(!ISPFREE(SIZE(b)));
- /**/ ASSERT(TINIEST(b) || SEG(b) == seg);
- /**/ ASSERT(ISBUSY(SIZE(np)));
- /**/ ASSERT(ISPFREE(SIZE(np)));
- /**/ ASSERT(*SELF(b) == b);
- /**/ ASSERT(size <= 0 || SIZE(b) < size ||
- SIZE(b) < MAXTINY || (wild && b == vd->wild));
- P_free += 1;
- S_free += s;
- if (s > M_free)
- M_free = s;
-
- if (s < MAXTINY) {
- for (t = TINY(vd)[INDEX(s)]; t; t = LINK(t))
- if (b == t)
- goto fine;
- }
- if (b == vd->wild) {
- /**/ ASSERT(VMWILD(vd, b));
- goto fine;
- }
- if (vd->root && vmintree(vd->root, b))
- goto fine;
-
- /**/ ASSERT(0);
- } else if (ISJUNK(SIZE(b))) {
- /**/ ASSERT(ISBUSY(SIZE(b)));
- /**/ ASSERT(!ISPFREE(SIZE(np)));
- P_junk += 1;
- S_junk += s;
- if (s > M_junk)
- M_junk = s;
-
- if (b == vd->free)
- goto fine;
- if (s < MAXCACHE) {
- for (t = CACHE(vd)[INDEX(s)]; t; t = LINK(t))
- if (b == t)
- goto fine;
- }
- for (t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
- if (b == t)
- goto fine;
- /**/ ASSERT(0);
- } else {
- /**/ ASSERT(!ISPFREE(SIZE(b)) || !ISBUSY(SIZE(LAST(b))));
- /**/ ASSERT(SEG(b) == seg);
- /**/ ASSERT(!ISPFREE(SIZE(np)));
- P_busy += 1;
- if (s > M_busy)
- M_busy = s;
- goto fine;
- }
- fine:
- b = np;
- }
- }
-
- return 1;
-}
-
-#endif /*DEBUG*/
-/* Tree rotation functions */
-#define RROTATE(x,y) (LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
-#define LROTATE(x,y) (RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
-#define RLINK(s,x) ((s) = LEFT(s) = (x))
-#define LLINK(s,x) ((s) = RIGHT(s) = (x))
-/* Find and delete a suitable element in the free tree. */
-static Block_t *bestsearch(Vmdata_t * vd, reg size_t size,
- Block_t * wanted)
-{
- reg size_t s;
- reg Block_t *t, *root, *l, *r;
- Block_t link;
-
- /* extracting a tiniest block from its list */
- if ((root = wanted) && size == TINYSIZE) {
- reg Seg_t *seg;
-
- l = TLEFT(root);
- if ((r = LINK(root)))
- TLEFT(r) = l;
- if (l)
- LINK(l) = r;
- else
- TINY(vd)[0] = r;
-
- seg = vd->seg;
- if (!seg->next)
- SEG(root) = seg;
- else
- for (;; seg = seg->next) {
- if ((Vmuchar_t *) root > (Vmuchar_t *) seg->addr &&
- (Vmuchar_t *) root < seg->baddr) {
- SEG(root) = seg;
- break;
- }
- }
-
- return root;
- }
-
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
-
- /* find the right one to delete */
- l = r = &link;
- if ((root = vd->root))
- do {
- /**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
- if (size == (s = SIZE(root)))
- break;
- if (size < s) {
- if ((t = LEFT(root))) {
- if (size <= (s = SIZE(t))) {
- RROTATE(root, t);
- if (size == s)
- break;
- t = LEFT(root);
- } else {
- LLINK(l, t);
- t = RIGHT(t);
- }
- }
- RLINK(r, root);
- } else {
- if ((t = RIGHT(root))) {
- if (size >= (s = SIZE(t))) {
- LROTATE(root, t);
- if (size == s)
- break;
- t = RIGHT(root);
- } else {
- RLINK(r, t);
- t = LEFT(t);
- }
- }
- LLINK(l, root);
- }
- /**/ ASSERT(root != t);
- } while ((root = t));
-
- if (root) { /* found it, now isolate it */
- RIGHT(l) = LEFT(root);
- LEFT(r) = RIGHT(root);
- } else { /* nothing exactly fit */
- LEFT(r) = NIL(Block_t *);
- RIGHT(l) = NIL(Block_t *);
-
- /* grab the least one from the right tree */
- if ((root = LEFT(&link))) {
- while ((t = LEFT(root)))
- RROTATE(root, t);
- LEFT(&link) = RIGHT(root);
- }
- }
-
- if (root && (r = LINK(root))) { /* head of a link list, use next one for root */
- LEFT(r) = RIGHT(&link);
- RIGHT(r) = LEFT(&link);
- } else if (!(r = LEFT(&link)))
- r = RIGHT(&link);
- else { /* graft left tree to right tree */
- while ((t = LEFT(r)))
- RROTATE(r, t);
- LEFT(r) = RIGHT(&link);
- }
- vd->root = r;
- /**/ ASSERT(!r || !ISBITS(SIZE(r)));
+static bool make_space(Vmdata_t *vd) {
- /**/ ASSERT(!wanted || wanted == root);
- return root;
-}
+ if (vd->size == vd->capacity) {
+ void **p;
-/* Reclaim all delayed free blocks into the free tree */
-static int bestreclaim(reg Vmdata_t * vd, Block_t * wanted, int c)
-{
- reg size_t size, s;
- reg Block_t *fp, *np, *t, *list, **cache;
- reg int n, count;
- reg Seg_t *seg;
- Block_t tree;
-
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
-
- if ((fp = vd->free)) {
- LINK(fp) = *(cache = CACHE(vd) + S_CACHE);
- *cache = fp;
- vd->free = NIL(Block_t *);
+ /* expand our allocation storage */
+ size_t c = vd->capacity == 0 ? 1 : vd->capacity * 2;
+ p = realloc(vd->allocated, sizeof(vd->allocated[0]) * c);
+ if (p == NULL) {
+ return false;
}
- LINK(&tree) = NIL(Block_t *);
- count = 0;
- for (n = S_CACHE; n >= c; --n) {
- list = *(cache = CACHE(vd) + n);
- *cache = NIL(Block_t *);
- while ((fp = list)) { /* Note that below here we allow ISJUNK blocks to be
- ** forward-merged even though they are not removed from
- ** the list immediately. In this way, the list is
- ** scanned only once. It works because the LINK and SIZE
- ** fields are not destroyed during the merging. This can
- ** be seen by observing that a tiniest block has a 2-word
- ** header and a 2-word body. Merging a tiniest block
- ** (1seg) and the next block (2seg) looks like this:
- ** 1seg size link left 2seg size link left ....
- ** 1seg size link left rite xxxx xxxx .... self
- ** After the merge, the 2seg word is replaced by the RIGHT
- ** pointer of the new block and somewhere beyond the
- ** two xxxx fields, the SELF pointer will replace some
- ** other word. The important part is that the two xxxx
- ** fields are kept intact.
- */
- count += 1;
- list = LINK(list);
- /**/ ASSERT(!vmonlist(list, fp));
-
- size = SIZE(fp);
- if (!ISJUNK(size)) /* already done */
- continue;
-
- /* see if this address is from region */
- for (seg = vd->seg; seg; seg = seg->next)
- if (fp >= SEGBLOCK(seg) && fp < (Block_t *) seg->baddr)
- break;
- if (!seg) { /* must be a bug in application code! */
- /**/ ASSERT(seg != NIL(Seg_t *));
- continue;
- }
-
- if (ISPFREE(size)) { /* backward merge */
- fp = LAST(fp);
- s = SIZE(fp);
- REMOVE(vd, fp, INDEX(s), t, bestsearch);
- size = (size & ~BITS) + s + sizeof(Head_t);
- } else
- size &= ~BITS;
-
- for (;;) { /* forward merge */
- np = (Block_t *) ((Vmuchar_t *) fp + size +
- sizeof(Head_t));
- s = SIZE(np);
- /**/ ASSERT(s > 0);
- if (!ISBUSY(s)) {
- if (np == vd->wild)
- vd->wild = NIL(Block_t *);
- else
- REMOVE(vd, np, INDEX(s), t, bestsearch);
- } else if (ISJUNK(s)) {
- if ((int) C_INDEX(s) < c)
- c = C_INDEX(s);
- SIZE(np) = 0;
- CLRBITS(s);
- } else
- break;
- size += s + sizeof(Head_t);
- }
- SIZE(fp) = size;
-
- if (fp == wanted) /* about to be consumed by bestresize */
- continue;
-
- /* tell next block that this one is free */
- SETPFREE(SIZE(np));
- /**/ ASSERT(ISBUSY(SIZE(np)));
- *(SELF(fp)) = fp;
-
- if (np->body.data >= vd->seg->baddr) {
- vd->wild = fp;
- continue;
- }
-
- /* tiny block goes to tiny list */
- if (size < MAXTINY) {
- s = INDEX(size);
- np = LINK(fp) = TINY(vd)[s];
- if (s == 0) { /* TINIEST block */
- if (np)
- TLEFT(np) = fp;
- TLEFT(fp) = NIL(Block_t *);
- } else {
- if (np)
- LEFT(np) = fp;
- LEFT(fp) = NIL(Block_t *);
- SETLINK(fp);
- }
- TINY(vd)[s] = fp;
- continue;
- }
-
- /* don't put in free tree yet because they may be merged soon */
- np = &tree;
- if ((LINK(fp) = LINK(np)))
- LEFT(LINK(fp)) = fp;
- LINK(np) = fp;
- LEFT(fp) = np;
- SETLINK(fp);
- }
- }
-
- /* insert all free blocks into the free tree */
- for (list = LINK(&tree); list;) {
- fp = list;
- list = LINK(list);
-
- /**/ ASSERT(!ISBITS(SIZE(fp)));
- /**/ ASSERT(ISBUSY(SIZE(NEXT(fp))));
- /**/ ASSERT(ISPFREE(SIZE(NEXT(fp))));
- LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t *);
- if (!(np = vd->root)) { /* inserting into an empty tree */
- vd->root = fp;
- continue;
- }
-
- size = SIZE(fp);
- while (1) { /* leaf insertion */
- /**/ ASSERT(np != fp);
- if ((s = SIZE(np)) > size) {
- if ((t = LEFT(np))) {
- /**/ ASSERT(np != t);
- np = t;
- } else {
- LEFT(np) = fp;
- break;
- }
- } else if (s < size) {
- if ((t = RIGHT(np))) {
- /**/ ASSERT(np != t);
- np = t;
- } else {
- RIGHT(np) = fp;
- break;
- }
- } else { /* s == size */
- if ((t = LINK(np))) {
- LINK(fp) = t;
- LEFT(t) = fp;
- }
- LINK(np) = fp;
- LEFT(fp) = np;
- SETLINK(fp);
- break;
- }
- }
- }
+ /* save the new array */
+ vd->allocated = p;
+ vd->capacity = c;
+ }
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
- return count;
+ return true;
}
/**
* @param vm region allocating from
* @param size desired block size
*/
-static void *bestalloc(Vmalloc_t * vm, reg size_t size)
-{
- reg Vmdata_t *vd = vm->data;
- reg size_t s;
- reg Block_t *tp, *np, **cache;
- reg int local;
- size_t orgsize = 0;
-
- /**/ COUNT(N_alloc);
-
- if (!(local = vd->mode & VM_TRUST)) {
- GETLOCAL(vd, local);
- if (ISLOCK(vd, local))
- return NIL(void *);
- SETLOCK(vd, local);
- orgsize = size;
- }
+static void *bestalloc(Vmalloc_t *vm, reg size_t size) {
+ Vmdata_t *vd = vm->data;
+ void *p;
- /**/ ASSERT(HEADSIZE == sizeof(Head_t));
- /**/ ASSERT(BODYSIZE == sizeof(Body_t));
- /**/ ASSERT((ALIGN % (BITS + 1)) == 0);
- /**/ ASSERT((sizeof(Head_t) % ALIGN) == 0);
- /**/ ASSERT((sizeof(Body_t) % ALIGN) == 0);
- /**/ ASSERT((TINYSIZE % ALIGN) == 0);
- /**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t) + sizeof(Head_t)));
-
- /* for ANSI requirement that malloc(0) returns non-NULL pointer */
- size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN);
-
- if (size < MAXCACHE && (tp = *(cache = CACHE(vd) + INDEX(size)))) {
- *cache = LINK(tp);
- CLRJUNK(SIZE(tp));
- /**/ COUNT(N_cache);
- goto done;
- }
+ if (!make_space(vd)) {
+ return NULL;
+ }
- if ((tp = vd->free)) { /* allocate from last free piece */
- /**/ ASSERT(ISBUSY(SIZE(tp)));
- /**/ ASSERT(ISJUNK(SIZE(tp)));
- /**/ COUNT(N_last);
-
- vd->free = NIL(Block_t *);
- if ((s = SIZE(tp)) < size) {
- LINK(tp) = *(cache = CACHE(vd) + S_CACHE);
- *cache = tp;
- } else {
- if (s >= size + (sizeof(Head_t) + TINYSIZE)) {
- SIZE(tp) = size;
- np = NEXT(tp);
- SEG(np) = SEG(tp);
- SIZE(np) =
- ((s & ~BITS) - (size + sizeof(Head_t))) | JUNK | BUSY;
- vd->free = np;
- SIZE(tp) |= s & BITS;
- }
- CLRJUNK(SIZE(tp));
- goto done;
- }
- }
+ p = malloc(size);
+ if (p == NULL) {
+ return NULL;
+ }
- for (;;) {
- for (;;) { /* best-fit - more or less */
- for (s = INDEX(size); s < S_TINY; ++s) {
- if ((tp = TINY(vd)[s])) {
- REMOVE(vd, tp, s, np, bestsearch);
- CLRPFREE(SIZE(NEXT(tp)));
- goto got_block;
- }
- }
-
- if (CACHE(vd)[S_CACHE]) /* reclaim big pieces */
- bestreclaim(vd, NIL(Block_t *), S_CACHE);
- if (vd->root && (tp = bestsearch(vd, size, NIL(Block_t *))))
- goto got_block;
- if (bestreclaim(vd, NIL(Block_t *), 0) == 0)
- break;
- }
-
- /**/ ASSERT(!vd->free);
- if ((tp = vd->wild) && SIZE(tp) >= size) {
- /**/ ASSERT(vmcheck(vd, size, 1));
- /**/ COUNT(N_wild);
- vd->wild = NIL(Block_t *);
- goto got_block;
- }
-
- /**/ ASSERT(vmcheck(vd, size, 0));
- if ((tp = (*_Vmextend) (vm, size, bestsearch)))
- goto got_block;
- else if (vd->mode & VM_AGAIN)
- vd->mode &= ~VM_AGAIN;
- else {
- CLRLOCK(vd, local);
- return NIL(void *);
- }
- }
+ vd->allocated[vd->size] = p;
+ ++vd->size;
- got_block:
- /**/ ASSERT(!ISBITS(SIZE(tp)));
- /**/ ASSERT(SIZE(tp) >= size);
- /**/ ASSERT((SIZE(tp) % ALIGN) == 0);
- /**/ ASSERT(!vd->free);
-
- /* tell next block that we are no longer a free block */
- CLRPFREE(SIZE(NEXT(tp)));
- /**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
-
- if ((s = SIZE(tp) - size) >= (sizeof(Head_t) + TINYSIZE)) {
- SIZE(tp) = size;
-
- np = NEXT(tp);
- SEG(np) = SEG(tp);
- SIZE(np) = (s - sizeof(Head_t)) | BUSY | JUNK;
-
- if (!vd->root || !VMWILD(vd, np))
- vd->free = np;
- else {
- SIZE(np) &= ~BITS;
- *SELF(np) = np;
- SETPFREE(SIZE(NEXT(np)));
- vd->wild = np;
- }
- }
-
- SETBUSY(SIZE(tp));
-
- done:
- if (!local && (vd->mode & VM_TRACE) && _Vmtrace
- && VMETHOD(vd) == VM_MTBEST)
- (*_Vmtrace) (vm, NIL(Vmuchar_t *), (Vmuchar_t *) DATA(tp), orgsize,
- 0);
-
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
-
- CLRLOCK(vd, local);
- return DATA(tp);
+ return p;
}
-/**
- * @param vm region allocating from
- * @param addr address to check
- */
-static long bestaddr(Vmalloc_t * vm, void * addr)
-{
- reg Seg_t *seg;
- reg Block_t *b, *endb;
- reg long offset;
- reg Vmdata_t *vd = vm->data;
- reg int local;
- b = 0;
- endb = 0;
-
- if (!(local = vd->mode & VM_TRUST)) {
- GETLOCAL(vd, local);
- if (ISLOCK(vd, local))
- return -1L;
- SETLOCK(vd, local);
- }
+static int bestfree(Vmalloc_t *vm, void *data) {
+ Vmdata_t *vd = vm->data;
+ size_t i;
- offset = -1L;
- for (seg = vd->seg; seg; seg = seg->next) {
- b = SEGBLOCK(seg);
- endb = (Block_t *) (seg->baddr - sizeof(Head_t));
- if ((Vmuchar_t *) addr > (Vmuchar_t *) b &&
- (Vmuchar_t *) addr < (Vmuchar_t *) endb)
- break;
- }
+ if (!data) { /* ANSI-ism */
+ return 0;
+ }
- if (local && !(vd->mode & VM_TRUST)) { /* from bestfree or bestresize */
- b = BLOCK(addr);
- if (seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)))
- offset = 0;
- if (offset != 0 && vm->disc->exceptf)
- (void) (*vm->disc->exceptf) (vm, VM_BADADDR, addr, vm->disc);
- } else if (seg) {
- while (b < endb) {
- reg Vmuchar_t *data = (Vmuchar_t *) DATA(b);
- reg size_t size = SIZE(b) & ~BITS;
-
- if ((Vmuchar_t *) addr >= data
- && (Vmuchar_t *) addr < data + size) {
- if (ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
- offset = -1L;
- else
- offset = (Vmuchar_t *) addr - data;
- goto done;
- }
-
- b = (Block_t *) ((Vmuchar_t *) DATA(b) + size);
- }
- }
+ /* find the pointer we previously allocated */
+ for (i = 0; i < vd->size; ++i) {
+ if (vd->allocated[i] == data) {
- done:
- CLRLOCK(vd, local);
- return offset;
-}
+ /* clear this slot */
+ size_t extent = sizeof(vd->allocated[0]) * (vd->size - i - 1);
+ memmove(vd->allocated + i, vd->allocated + i + 1, extent);
+ --vd->size;
-static int bestfree(Vmalloc_t * vm, void * data)
-{
- reg Vmdata_t *vd = vm->data;
- reg Block_t *bp, **cache;
- reg size_t s;
- reg int local;
-
- /**/ COUNT(N_free);
-
- if (!data) /* ANSI-ism */
- return 0;
-
- if (!(local = vd->mode & VM_TRUST)) {
- if (ISLOCK(vd, 0))
- return -1;
- if (KPVADDR(vm, data, bestaddr) != 0)
- return -1;
- SETLOCK(vd, 0);
- }
+ /* give this back to the underlying allocator */
+ free(data);
- bp = BLOCK(data);
- /**/ ASSERT(ISBUSY(SIZE(bp)) && !ISJUNK(SIZE(bp)));
- SETJUNK(SIZE(bp));
- if ((s = SIZE(bp)) < MAXCACHE) {
- /**/ ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp));
- LINK(bp) = *(cache = CACHE(vd) + INDEX(s));
- *cache = bp;
- } else if (!vd->free)
- vd->free = bp;
- else {
- /**/ ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp));
- LINK(bp) = *(cache = CACHE(vd) + S_CACHE);
- *cache = bp;
+ return 0;
}
+ }
- /* coalesce large free blocks to avoid fragmentation */
- if (s >= _Vmpagesize && ISPFREE(s))
- bestreclaim(vd, NIL(Block_t *), 0);
-
- if (!local && _Vmtrace && (vd->mode & VM_TRACE)
- && VMETHOD(vd) == VM_MTBEST)
- (*_Vmtrace) (vm, (Vmuchar_t *) data, NIL(Vmuchar_t *), (s & ~BITS),
- 0);
-
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
-
- CLRLOCK(vd, 0);
- return 0;
+ /* we did not find this pointer; free() of something we did not allocate */
+ return -1;
}
/**
* @param vm region allocation from
* @param data old block of data
* @param size new size
- * @param type !=0 to move, <0 for not copy
+ * @param type ignored
*/
-static void *bestresize(Vmalloc_t * vm, void * data, reg size_t size,
- int type)
-{
- reg Vmdata_t *vd = vm->data;
- reg Block_t *rp, *np, *t, **cache;
- reg size_t s, bs;
- reg int local, *d, *ed;
- size_t oldsize = 0, orgsize = 0;
- void *orgdata;
- orgdata = 0;
-
- /**/ COUNT(N_resize);
-
- if (!data) {
- if ((data = bestalloc(vm, size))) {
- oldsize = 0;
- size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN);
- }
- goto done;
- }
- if (size == 0) {
- (void) bestfree(vm, data);
- return NIL(void *);
- }
+static void *bestresize(Vmalloc_t *vm, void *data, reg size_t size, int type) {
+ Vmdata_t *vd = vm->data;
+ size_t i;
- if (!(local = vd->mode & VM_TRUST)) {
- GETLOCAL(vd, local);
- if (ISLOCK(vd, local))
- return NIL(void *);
- if (!local && KPVADDR(vm, data, bestaddr) != 0)
- return NIL(void *);
- SETLOCK(vd, local);
+ /* ignore type */
+ (void)type;
- orgdata = data; /* for tracing */
- orgsize = size;
- }
+ if (!data) {
+ return bestalloc(vm, size);
+ }
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
-
- size = size <= TINYSIZE ? TINYSIZE : ROUND(size, ALIGN);
- rp = BLOCK(data);
- /**/ ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
- if ((bs = oldsize = SIZE(rp)) < size) {
- CLRBITS(SIZE(rp));
- np = NEXT(rp);
- do { /* forward merge as much as possible */
- s = SIZE(np);
- if (np == vd->free) {
- vd->free = NIL(Block_t *);
- CLRBITS(s);
- } else if (ISJUNK(s)) {
- CPYBITS(SIZE(rp), bs);
- bestreclaim(vd, np, C_INDEX(s));
- s = SIZE(np);
- bs = SIZE(rp);
- CLRBITS(SIZE(rp));
- } else if (!ISBUSY(s)) {
- if (np == vd->wild)
- vd->wild = NIL(Block_t *);
- else
- REMOVE(vd, np, INDEX(s), t, bestsearch);
- } else
- break;
-
- SIZE(rp) += (s += sizeof(Head_t));
- np = (Block_t *) ((Vmuchar_t *) np + s);
- CLRPFREE(SIZE(np));
- } while (SIZE(rp) < size);
-
- if (SIZE(rp) < size && size > vd->incr && SEGWILD(rp)) {
- reg Seg_t *seg;
-
- s = (size - SIZE(rp)) + sizeof(Head_t);
- s = ROUND(s, vd->incr);
- seg = SEG(rp);
- if ((*vm->disc->memoryf) (vm, seg->addr, seg->extent,
- seg->extent + s,
- vm->disc) == seg->addr) {
- SIZE(rp) += s;
- seg->extent += s;
- seg->size += s;
- seg->baddr += s;
- SEG(NEXT(rp)) = seg;
- SIZE(NEXT(rp)) = BUSY;
- }
- }
-
- CPYBITS(SIZE(rp), bs);
- }
+ /* find the pointer we previously allocated */
+ for (i = 0; i < vd->size; ++i) {
+ if (vd->allocated[i] == data) {
- /* If a buffer is resized, it is likely to be resized again.
- So we increase a bit more to reduce future work */
- bs = size < (BODYSIZE << 1) ? size : size < 1024 ? (size >> 1) : 1024;
- if ((s = SIZE(rp)) >= (size + bs + (TINYSIZE + sizeof(Head_t)))) {
- SIZE(rp) = size;
- np = NEXT(rp);
- SEG(np) = SEG(rp);
- SIZE(np) = (((s & ~BITS) - size) - sizeof(Head_t)) | BUSY | JUNK;
- CPYBITS(SIZE(rp), s);
- rp = np;
- goto do_free;
- } else if (s < size) {
- if (!(type & (VM_RSMOVE | VM_RSCOPY))) /* see if old data is moveable */
- data = NIL(void *);
- else {
- ed = (int *) data;
- if (size < ((s & ~BITS) + bs))
- size = (s & ~BITS) + bs;
- if ((data = KPVALLOC(vm, size, bestalloc))) {
- if (type & VM_RSCOPY) { /* old data must be copied */
- d = (int *) data;
- INTCOPY(d, ed, s);
- }
- do_free: /* delay reusing these blocks as long as possible */
- SETJUNK(SIZE(rp));
- LINK(rp) = *(cache = CACHE(vd) + S_CACHE);
- *cache = rp;
- if ((rp = vd->free)) {
- vd->free = NIL(Block_t *);
- LINK(rp) = *cache;
- *cache = rp;
- }
- }
- }
- }
+ /* resize the allocation */
+ void *p = realloc(data, size);
+ if (p == NULL) {
+ return p;
+ }
- if (!local && _Vmtrace && data && (vd->mode & VM_TRACE)
- && VMETHOD(vd) == VM_MTBEST)
- (*_Vmtrace) (vm, (Vmuchar_t *) orgdata, (Vmuchar_t *) data,
- orgsize, 0);
- CLRLOCK(vd, local);
+ /* save the updated pointer */
+ vd->allocated[i] = p;
- done:if (data && (type & VM_RSZERO) && size > CLRBITS(oldsize)) {
- d = (int *) ((char *) data + oldsize);
- size -= oldsize;
- INTZERO(d, size);
+ return p;
}
+ }
- /**/ ASSERT(!vd->root || vmchktree(vd->root));
-
- return data;
+ /* the pointer the caller gave us was not allocated by us */
+ return NULL;
}
/* A discipline to get memory using sbrk() or VirtualAlloc on win32 */
bestalloc,
bestresize,
bestfree,
- bestaddr,
+ 0,
VM_MTBEST
};
NIL(Block_t *), /* free */
NIL(Block_t *), /* wild */
NIL(Block_t *), /* root */
+ { NULL }, /* tiny */
+ { NULL }, /* cache */
+ NULL, /* allocated */
+ 0, /* size */
+ 0, /* capacity */
};
static Vmalloc_t _Vmheap = {
{bestalloc,
bestresize,
bestfree,
- bestaddr,
+ 0,
VM_MTBEST},
NIL(char *), /* file */
0, /* line */
-/* $Id$ $Revision$ */
-/* vim:set shiftwidth=4 ts=8: */
-
/*************************************************************************
* Copyright (c) 2011 AT&T Intellectual Property
* All rights reserved. This program and the accompanying materials
* Contributors: See CVS logs. Details at http://www.graphviz.org/
*************************************************************************/
-#include "vmhdr.h"
+#include "vmhdr.h"
+#include "vmalloc.h"
+#include <stdlib.h>
/* Opening a new region of allocation.
-** Note that because of possible exotic memory types,
-** all region data must be stored within the space given
-** by the discipline.
**
** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
*/
-typedef struct _vminit_ {
- Vmdata_t vd; /* space for the region itself */
- Seg_t seg; /* space for segment */
- Block_t block; /* space for a block */
- Head_t head; /* space for the fake header */
- char a[3 * ALIGN]; /* extra to fuss with alignment */
-} Vminit_t;
-
/**
- * @param disc discipline to get segments
+ * @param disc ignored
* @param meth method to manage space
- * @param mode type of region
+ * @param mode ignored type of region
*/
-Vmalloc_t *vmopen(Vmdisc_t * disc, Vmethod_t * meth, int mode)
-{
- reg Vmalloc_t *vm;
- reg Vmdata_t *vd;
- reg size_t s, a, incr;
- reg Block_t *b;
- reg Seg_t *seg;
- Vmuchar_t *addr;
- reg Vmemory_f memoryf;
- reg int e;
-
- if (!meth || !disc || !(memoryf = disc->memoryf))
- return NIL(Vmalloc_t *);
-
- GETPAGESIZE(_Vmpagesize);
-
- /* note that Vmalloc_t space must be local to process since that's
- where the meth&disc function addresses are going to be stored */
- if (!(vm = (Vmalloc_t *) vmalloc(Vmheap, sizeof(Vmalloc_t))))
- return NIL(Vmalloc_t *);
- vm->meth = *meth;
- vm->disc = disc;
- vm->file = NIL(char *);
- vm->line = 0;
-
- if (disc->exceptf) {
- addr = NIL(Vmuchar_t *);
- if ((e =
- (*disc->exceptf) (vm, VM_OPEN, (void *) (&addr),
- disc)) != 0) {
- if (e < 0 || !addr)
- goto open_error;
-
- /* align this address */
- if ((a = (size_t) (VLONG(addr) % ALIGN)) != 0)
- addr += ALIGN - a;
-
- /* see if it's a valid region */
- vd = (Vmdata_t *) addr;
- if ((vd->mode & meth->meth) != 0) {
- vm->data = vd;
- return vm;
- } else {
- open_error:
- vmfree(Vmheap, vm);
- return NIL(Vmalloc_t *);
- }
- }
- }
-
- /* make sure vd->incr is properly rounded */
- incr = disc->round <= 0 ? _Vmpagesize : disc->round;
- incr = MULTIPLE(incr, ALIGN);
-
- /* get space for region data */
- s = ROUND(sizeof(Vminit_t), incr);
- if (!(addr = (Vmuchar_t *) (*memoryf) (vm, NIL(void *), 0, s, disc))) {
- vmfree(Vmheap, vm);
- return NIL(Vmalloc_t *);
- }
-
- /* make sure that addr is aligned */
- if ((a = (size_t) (VLONG(addr) % ALIGN)) != 0)
- addr += ALIGN - a;
-
- /* initialize region */
- vd = (Vmdata_t *) addr;
- vd->mode = (mode & VM_FLAGS) | meth->meth;
- vd->incr = incr;
- vd->pool = 0;
- vd->free = vd->wild = NIL(Block_t *);
-
- if (vd->mode & (VM_TRACE | VM_MTDEBUG))
- vd->mode &= ~VM_TRUST;
-
- if (vd->mode & (VM_MTBEST | VM_MTDEBUG | VM_MTPROFILE)) {
- vd->root = NIL(Block_t *);
- for (e = S_TINY - 1; e >= 0; --e)
- TINY(vd)[e] = NIL(Block_t *);
- for (e = S_CACHE; e >= 0; --e)
- CACHE(vd)[e] = NIL(Block_t *);
- incr = sizeof(Vmdata_t);
- } else
- incr = OFFSET(Vmdata_t, root);
-
- vd->seg = (Seg_t *) (addr + ROUND(incr, ALIGN));
- /**/ ASSERT(VLONG(vd->seg) % ALIGN == 0);
-
- seg = vd->seg;
- seg->next = NIL(Seg_t *);
- seg->vm = vm;
- seg->addr = (void *) (addr - (a ? ALIGN - a : 0));
- seg->extent = s;
- seg->baddr = addr + s - (a ? ALIGN : 0);
- seg->size = s; /* this size is larger than usual so that the segment
- will not be freed until the region is closed. */
- seg->free = NIL(Block_t *);
-
- /* make a data block out of the remainder */
- b = SEGBLOCK(seg);
- SEG(b) = seg;
- SIZE(b) = seg->baddr - (Vmuchar_t *) b - 2 * sizeof(Head_t);
- *SELF(b) = b;
- /**/ ASSERT(SIZE(b) % ALIGN == 0);
- /**/ ASSERT(VLONG(b) % ALIGN == 0);
+Vmalloc_t *vmopen(Vmdisc_t *disc, Vmethod_t *meth, int mode) {
+ Vmalloc_t *vm;
- /* make a fake header for next block in case of noncontiguous segments */
- SEG(NEXT(b)) = seg;
- SIZE(NEXT(b)) = BUSY | PFREE;
+ (void)disc;
+ (void)mode;
- if (vd->mode & (VM_MTLAST | VM_MTPOOL))
- seg->free = b;
- else
- vd->wild = b;
+ vm = malloc(sizeof(*vm));
+ if (vm == NULL) {
+ return NULL;
+ }
- vm->data = vd;
+ vm->meth = *meth;
- /* put into linked list of regions */
- vm->next = Vmheap->next;
- Vmheap->next = vm;
+ vm->data = calloc(1, sizeof(*vm->data));
+ if (vm->data == NULL) {
+ free(vm);
+ return NULL;
+ }
- return vm;
+ return vm;
}
+++ /dev/null
-/* $Id$ $Revision$ */
-/* vim:set shiftwidth=4 ts=8: */
-
-/*************************************************************************
- * Copyright (c) 2011 AT&T Intellectual Property
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * which accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- * Contributors: See CVS logs. Details at http://www.graphviz.org/
- *************************************************************************/
-
-#include "vmhdr.h"
-
-#if 0 /* not used */
-static char *Version = "\n@(#)Vmalloc (AT&T Labs - kpv) 1999-08-05\0\n";
-#endif
-
-
-/* Private code used in the vmalloc library
-**
-** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
-*/
-
-/**
- * Get more memory for a region
- *
- * @param vm region to increase in size
- * @param size desired amount of space
- * @param searchf tree search function
- */
-static Block_t *vmextend(reg Vmalloc_t * vm, size_t size,
- Vmsearch_f searchf)
-{
- reg size_t s;
- reg Seg_t *seg;
- reg Block_t *bp, *t;
- reg Vmuchar_t *addr;
- reg Vmdata_t *vd = vm->data;
- reg Vmemory_f memoryf = vm->disc->memoryf;
- reg Vmexcept_f exceptf = vm->disc->exceptf;
-
- GETPAGESIZE(_Vmpagesize);
-
- if (vd->incr <= 0) /* this is just _Vmheap on the first call */
- vd->incr = 4 * _Vmpagesize;
-
- /* Get slightly more for administrative data */
- s = size + sizeof(Seg_t) + sizeof(Block_t) + sizeof(Head_t) +
- 2 * ALIGN;
- if (s <= size) /* size was too large and we have wrapped around */
- return NIL(Block_t *);
- if ((size = ROUND(s, vd->incr)) < s)
- return NIL(Block_t *);
-
- /* see if we can extend the current segment */
- if (!(seg = vd->seg))
- addr = NIL(Vmuchar_t *);
- else {
- if (!vd->wild || SEG(vd->wild) != seg)
- s = 0;
- else {
- s = SIZE(vd->wild) + sizeof(Head_t);
- if ((s = (s / vd->incr) * vd->incr) == size)
- size += vd->incr;
- }
- addr = (Vmuchar_t *) (*memoryf) (vm, seg->addr, seg->extent,
- seg->extent + size - s, vm->disc);
- if (!addr)
- seg = NIL(Seg_t *);
- else {
- /**/ ASSERT(addr == (Vmuchar_t *) seg->addr);
- addr += seg->extent;
- size -= s;
- }
- }
-
- while (!addr) { /* try to get space */
- if ((addr =
- (Vmuchar_t *) (*memoryf) (vm, NIL(void *), 0, size,
- vm->disc)))
- break;
-
- /* check with exception handler to see if we should continue */
- if (!exceptf)
- return NIL(Block_t *);
- else {
- int rv, lock;
- lock = vd->mode & VM_LOCK;
- vd->mode &= ~VM_LOCK;
- rv = (*exceptf) (vm, VM_NOMEM, (void *) size, vm->disc);
- vd->mode |= lock;
- if (rv <= 0) {
- if (rv == 0)
- vd->mode |= VM_AGAIN;
- return NIL(Block_t *);
- }
- }
- }
-
- if (seg) { /* extending current segment */
- bp = BLOCK(seg->baddr);
- /**/ ASSERT((SIZE(bp) & ~BITS) == 0);
- /**/ ASSERT(SEG(bp) == seg);
-
- if (vd->mode & (VM_MTBEST | VM_MTDEBUG | VM_MTPROFILE)) {
- if (!ISPFREE(SIZE(bp)))
- SIZE(bp) = size - sizeof(Head_t);
- else {
- /**/ ASSERT(searchf);
- bp = LAST(bp);
- if (bp == vd->wild)
- vd->wild = NIL(Block_t *);
- else
- REMOVE(vd, bp, INDEX(SIZE(bp)), t, (*searchf));
- SIZE(bp) += size;
- }
- } else {
- if (seg->free) {
- bp = seg->free;
- seg->free = NIL(Block_t *);
- SIZE(bp) += size;
- } else
- SIZE(bp) = size - sizeof(Head_t);
- }
-
- seg->size += size;
- seg->extent += size;
- seg->baddr += size;
- } else { /* creating a new segment */
- reg Seg_t *sp, *lastsp;
-
- if ((s = (size_t) (VLONG(addr) % ALIGN)) != 0)
- addr += ALIGN - s;
-
- seg = (Seg_t *) addr;
- seg->vm = vm;
- seg->addr = (void *) (addr - (s ? ALIGN - s : 0));
- seg->extent = size;
- seg->baddr = addr + size - (s ? 2 * ALIGN : 0);
- seg->free = NIL(Block_t *);
- bp = SEGBLOCK(seg);
- SEG(bp) = seg;
- SIZE(bp) = seg->baddr - (Vmuchar_t *) bp - 2 * sizeof(Head_t);
-
- /* NOTE: for Vmbest, Vmdebug and Vmprofile the region's segment list
- is reversely ordered by addresses. This is so that we can easily
- check for the wild block.
- */
- lastsp = NIL(Seg_t *);
- sp = vd->seg;
- if (vd->mode & (VM_MTBEST | VM_MTDEBUG | VM_MTPROFILE))
- for (; sp; lastsp = sp, sp = sp->next)
- if (seg->addr > sp->addr)
- break;
- seg->next = sp;
- if (lastsp)
- lastsp->next = seg;
- else
- vd->seg = seg;
-
- seg->size = SIZE(bp);
- }
-
- /* make a fake header for possible segmented memory */
- t = NEXT(bp);
- SEG(t) = seg;
- SIZE(t) = BUSY;
-
- /* see if the wild block is still wild */
- if ((t = vd->wild) && (seg = SEG(t)) != vd->seg) {
- CLRPFREE(SIZE(NEXT(t)));
- if (vd->mode & (VM_MTBEST | VM_MTDEBUG | VM_MTPROFILE)) {
- SIZE(t) |= BUSY | JUNK;
- LINK(t) = CACHE(vd)[C_INDEX(SIZE(t))];
- CACHE(vd)[C_INDEX(SIZE(t))] = t;
- } else
- seg->free = t;
-
- vd->wild = NIL(Block_t *);
- }
-
- return bp;
-}
-
-/**
- * Truncate a segment if possible
- *
- * @param vm containing region
- * @param seg the one to be truncated
- * @param size amount of free space
- * @param exact amount given was exact
- */
-static int vmtruncate(Vmalloc_t * vm, Seg_t * seg, size_t size, int exact)
-{
- reg void *caddr;
- reg Seg_t *last;
- reg Vmdata_t *vd = vm->data;
- reg Vmemory_f memoryf = vm->disc->memoryf;
-
- caddr = seg->addr;
-
- if (size < seg->size) {
- reg size_t less;
-
- /* the truncated amount must satisfy the discipline requirement */
- if ((less = vm->disc->round) <= 0)
- less = _Vmpagesize;
- less = (size / less) * less;
- less = (less / ALIGN) * ALIGN;
-
- if (!exact) /* only truncate multiples of incr */
- less = (less / vd->incr) * vd->incr;
-
- if (less > 0 && size > less && (size - less) < sizeof(Block_t))
- less -= vd->incr;
-
- if (less <= 0 ||
- (*memoryf) (vm, caddr, seg->extent, seg->extent - less,
- vm->disc) != caddr)
- return -1;
-
- seg->extent -= less;
- seg->size -= less;
- seg->baddr -= less;
- SIZE(BLOCK(seg->baddr)) = BUSY;
- return 0;
- }
-
- /* unlink segment from region */
- if (seg == vd->seg) {
- vd->seg = seg->next;
- last = NIL(Seg_t *);
- } else {
- for (last = vd->seg; last->next != seg; last = last->next);
- last->next = seg->next;
- }
-
- /* now delete it */
- if ((*memoryf) (vm, caddr, seg->extent, 0, vm->disc) == caddr)
- return 0;
-
- /* space reduction failed, reinsert segment */
- if (last) {
- seg->next = last->next;
- last->next = seg;
- } else {
- seg->next = vd->seg;
- vd->seg = seg;
- }
- return -1;
-}
-
-/* Externally visible names but local to library */
-Vmextern_t _Vmextern = { vmextend, /* _Vmextend */
- vmtruncate, /* _Vmtruncate */
- 0, /* _Vmpagesize */
- NIL(char *(*)(char *, char *, int)), /* _Vmstrcpy */
- NIL(char *(*)(Vmulong_t, int)), /* _Vmitoa */
- NIL(void (*)(Vmalloc_t *,
- Vmuchar_t *, Vmuchar_t *, size_t, size_t)), /* _Vmtrace */
- NIL(void (*)(Vmalloc_t *)) /* _Vmpfclose */
-};