src/util/lookup.h \
src/util/range.h \
src/util/s_to_n32_unsafe.h \
+ src/util/slab_allocator.h \
src/util/smart_ptr.h \
src/util/static_assert.h \
src/util/string_utils.h \
, const std::vector<size_t> &fill
, Skeleton *skel
, const charset_t &charset
- , tcpool_t *ptcpool
, const std::string &n
, const std::string &c
, uint32_t l
, head(NULL)
, rules(dfa.rules)
, tags(dfa.tags)
- , tcpool(*ptcpool)
+ , tcpool(dfa.tcpool)
, max_fill (0)
, need_backup (false)
, need_accept (false)
, const std::vector<size_t> &fill
, Skeleton *skel
, const charset_t &charset
- , tcpool_t *ptcpool
, const std::string &n
, const std::string &c
, uint32_t l
insert_fallback_tags(dfa);
// try to minimize the number of tag variables
- tcpool_t *tcpool = optimize_tags(dfa);
+ optimize_tags(dfa);
minimization(dfa);
fillpoints(dfa, fill);
// ADFA stands for 'DFA with actions'
- DFA *adfa = new DFA(dfa, fill, skeleton, cs, tcpool, name, cond, line);
+ DFA *adfa = new DFA(dfa, fill, skeleton, cs, name, cond, line);
// see note [reordering DFA states]
adfa->reorder();
static void check_tags(const Tagpool &tagpool, size_t oldidx, size_t newidx, bool *badtags);
static bool compare_by_rule(const clos_t &c1, const clos_t &c2);
static void prune_final_items(closure_t &clos, std::valarray<Rule> &rules);
-static tagsave_t *merge_and_check_tags(const closure_t &clos, Tagpool &tagpool, const std::valarray<Rule> &rules, bool *badtags);
+static tagsave_t *merge_and_check_tags(const closure_t &clos, Tagpool &tagpool, tcpool_t &tcpool, const std::valarray<Rule> &rules, bool *badtags);
tagsave_t *closure(const closure_t &clos1, closure_t &clos2,
- Tagpool &tagpool, std::valarray<Rule> &rules, bool *badtags)
+ Tagpool &tagpool, tcpool_t &tcpool, std::valarray<Rule> &rules,
+ bool *badtags)
{
// build tagged epsilon-closure of the given set of NFA states
clos2.clear();
std::sort(clos2.begin(), clos2.end(), compare_by_rule);
// merge tags from different rules, find nondeterministic tags
- return merge_and_check_tags(clos1, tagpool, rules, badtags);
+ return merge_and_check_tags(clos1, tagpool, tcpool, rules, badtags);
}
/* note [epsilon-closures in tagged NFA]
// WARNING: this function assumes that closure items are grouped bu rule
tagsave_t *merge_and_check_tags(const closure_t &clos, Tagpool &tagpool,
- const std::valarray<Rule> &rules, bool *badtags)
+ tcpool_t &tcpool, const std::valarray<Rule> &rules, bool *badtags)
{
const size_t ntag = tagpool.ntags;
tagver_t *tags = tagpool.buffer1;
}
}
- return tagsave_t::convert(tags, ntag);
+ return tcpool.conv_to_save(tags, ntag);
}
} // namespace re2c
typedef closure_t::const_iterator cclositer_t;
tagsave_t *closure(const closure_t &clos1, closure_t &clos2,
- Tagpool &tagpool, std::valarray<Rule> &rules, bool *badtags);
+ Tagpool &tagpool, tcpool_t &tcpool, std::valarray<Rule> &rules,
+ bool *badtags);
clos_t::clos_t()
: state(NULL)
, nchars(charset.size() - 1) // (n + 1) bounds for n ranges
, rules(nfa.rules)
, tags(*nfa.tags)
+ , tcpool(*new tcpool_t)
, maxtagver(0)
{
const size_t ntag = tags.size();
maxtagver = vartag_maxver(tags);
clos1.push_back(clos_t(nfa.root, ZERO_TAGS));
- closure(clos1, clos2, tagpool, rules, badtags);
+ closure(clos1, clos2, tagpool, tcpool, rules, badtags);
clospool.insert(clos2);
// closures are in sync with DFA states
f = std::find_if(clos0.begin(), e, clos_t::final);
if (f != e) {
s->rule = f->state->rule;
- s->tcmd[nchars].save = tagsave_t::convert(tagpool[f->tagidx], ntag);
+ s->tcmd[nchars].save = tcpool.conv_to_save(tagpool[f->tagidx], ntag);
}
// for each alphabet symbol, build tagged epsilon-closure
// find identical closure or add the new one
for (size_t c = 0; c < nchars; ++c) {
reach(clos0, clos1, charset[c]);
- s->tcmd[c].save = closure(clos1, clos2, tagpool, rules, badtags);
+ s->tcmd[c].save = closure(clos1, clos2, tagpool, tcpool, rules, badtags);
s->arcs[c] = clospool.insert(clos2);
}
}
const size_t nchars;
std::valarray<Rule> &rules;
std::valarray<Tag> &tags;
+ tcpool_t &tcpool;
tagver_t maxtagver;
dfa_t(const nfa_t &nfa, const charset_t &charset,
void fillpoints(const dfa_t &dfa, std::vector<size_t> &fill);
void cutoff_dead_rules(dfa_t &dfa, size_t defrule, const std::string &cond);
void insert_fallback_tags(dfa_t &dfa);
-tcpool_t *optimize_tags(dfa_t &dfa);
+void optimize_tags(dfa_t &dfa);
} // namespace re2c
void insert_fallback_tags(dfa_t &dfa)
{
tagver_t maxver = dfa.maxtagver;
+ tcpool_t &pool = dfa.tcpool;
const size_t
nstates = dfa.states.size(),
nsym = dfa.nchars,
// patch commands (backups must go first)
tagcopy_t **p = &s->tcmd[nsym].copy;
- *p = new tagcopy_t(*p, f, b);
+ *p = pool.make_copy(*p, f, b);
for (size_t c = 0; c < nsym; ++c) {
size_t j = s->arcs[c];
if (j != dfa_t::NIL && dfa.states[j]->fallthru) {
p = &s->tcmd[c].copy;
- *p = new tagcopy_t(*p, b, f);
+ *p = pool.make_copy(*p, b, f);
}
}
maxver = std::max(maxver, b);
namespace re2c
{
-static tcpool_t *freeze_tags(dfa_t &dfa);
+static void freeze_tags(dfa_t &dfa);
static cfg_ix_t map_arcs_to_bblocks(const dfa_t &dfa, cfg_ix_t *arc2bb);
static cfg_bb_t *create_bblocks(const dfa_t &dfa, const cfg_ix_t *arc2bb, cfg_ix_t nbblock);
static void basic_block(cfg_bb_t *bb, const cfg_ix_t *succb, const cfg_ix_t *succe, tcmd_t *cmd, tagver_t *use);
static void successors(const dfa_t &dfa, const cfg_ix_t *arc2bb, bool *been, cfg_ix_t *&succ, size_t x);
static void fallback(const dfa_t &dfa, const cfg_ix_t *arc2bb, bool *been, cfg_ix_t *&succ, size_t x);
-tcpool_t *optimize_tags(dfa_t &dfa)
+void optimize_tags(dfa_t &dfa)
{
if (dfa.maxtagver > 0) {
cfg_t cfg(dfa);
delete[] ver2new;
}
- return freeze_tags(dfa);
+ freeze_tags(dfa);
}
/* note [tag freezing]
* They also become immutable, because different commands may
* share representation in memory.
*/
-tcpool_t *freeze_tags(dfa_t &dfa)
+void freeze_tags(dfa_t &dfa)
{
- tcpool_t *tcpool = new tcpool_t;
+ tcpool_t &pool = dfa.tcpool;
const size_t
nstate = dfa.states.size(),
nsym = dfa.nchars;
// transition commands
for(; cmd < fin; ++cmd) {
- *id++ = tcpool->insert(cmd->save, cmd->copy);
+ *id++ = pool.insert(cmd->save, cmd->copy);
}
// final epsilon-transition command
- *id++ = tcpool->insert(fin->save, fin->copy);
+ *id++ = pool.insert(fin->save, fin->copy);
delete[] s->tcmd;
s->tcmd = NULL;
}
-
- return tcpool;
}
cfg_t::cfg_t(dfa_t &a)
static uint32_t hash_tcmd(const tagsave_t *save, const tagcopy_t *copy);
-free_list<tagsave_t*> tagsave_t::freelist;
-
-tagsave_t::tagsave_t(tagsave_t *n, tagver_t v)
- : next(n)
- , ver(v)
-{
- freelist.insert(this);
-}
-
-tagsave_t::~tagsave_t()
-{
- freelist.erase(this);
-}
-
-tagsave_t *tagsave_t::convert(const tagver_t *vers, size_t ntag)
-{
- tagsave_t *s = NULL;
- for (size_t t = ntag; t-- > 0;) {
- const tagver_t v = vers[t];
- if (v != TAGVER_ZERO) {
- s = new tagsave_t(s, v);
- }
- }
- return s;
-}
-
void tagsave_t::swap(tagsave_t &x, tagsave_t &y)
{
std::swap(x.ver, y.ver);
return x.ver == y.ver;
}
-free_list<tagcopy_t*> tagcopy_t::freelist;
-
-tagcopy_t::tagcopy_t(tagcopy_t *n, tagver_t l, tagver_t r)
- : next(n)
- , lhs(l)
- , rhs(r)
-{
- freelist.insert(this);
-}
-
-tagcopy_t::~tagcopy_t()
-{
- freelist.erase(this);
-}
-
void tagcopy_t::swap(tagcopy_t &x, tagcopy_t &y)
{
std::swap(x.lhs, y.lhs);
tccmd_t::tccmd_t(const tagsave_t *s, const tagcopy_t *c): save(s), copy(c) {}
tcpool_t::tcpool_t()
- : index()
+ : alc()
+ , index()
{
// empty command must have static number zero
assert(TCID0 == insert(NULL, NULL));
}
+tagsave_t *tcpool_t::make_save(tagsave_t *next, tagver_t ver)
+{
+ tagsave_t *p = alc.alloct<tagsave_t>(1);
+ p->next = next;
+ p->ver = ver;
+ return p;
+}
+
+tagcopy_t *tcpool_t::make_copy(tagcopy_t *next, tagver_t lhs, tagver_t rhs)
+{
+ tagcopy_t *p = alc.alloct<tagcopy_t>(1);
+ p->next = next;
+ p->lhs = lhs;
+ p->rhs = rhs;
+ return p;
+}
+
+tagsave_t *tcpool_t::conv_to_save(const tagver_t *vers, size_t ntag)
+{
+ tagsave_t *s = NULL;
+ for (size_t t = ntag; t-- > 0;) {
+ const tagver_t v = vers[t];
+ if (v != TAGVER_ZERO) {
+ s = make_save(s, v);
+ }
+ }
+ return s;
+}
+
uint32_t hash_tcmd(const tagsave_t *save, const tagcopy_t *copy)
{
uint32_t h = 0;
#include "src/ir/tag.h"
#include "src/util/c99_stdint.h"
#include "src/util/forbid_copy.h"
-#include "src/util/free_list.h"
#include "src/util/lookup.h"
+#include "src/util/slab_allocator.h"
namespace re2c
{
struct tagsave_t
{
- static free_list<tagsave_t*> freelist;
-
tagsave_t *next;
tagver_t ver;
- static tagsave_t *convert(const tagver_t *vers, size_t ntag);
static void swap(tagsave_t &x, tagsave_t &y);
static bool less(const tagsave_t &x, const tagsave_t &y);
static bool equal(const tagsave_t &x, const tagsave_t &y);
- tagsave_t(tagsave_t *n, tagver_t v);
- ~tagsave_t();
FORBID_COPY(tagsave_t);
};
struct tagcopy_t
{
- static free_list<tagcopy_t*> freelist;
-
tagcopy_t *next;
tagver_t lhs; // left hand side
tagver_t rhs; // right hand side
static bool less(const tagcopy_t &x, const tagcopy_t &y);
static void swap(tagcopy_t &x, tagcopy_t &y);
static bool equal(const tagcopy_t &x, const tagcopy_t &y);
- tagcopy_t(tagcopy_t *n, tagver_t l, tagver_t r);
- ~tagcopy_t();
FORBID_COPY(tagcopy_t);
};
static const tcid_t TCID0 = 0;
-struct tcpool_t
+class tcpool_t
{
-private:
+ typedef slab_allocator_t<~0u, 4096> alc_t;
typedef lookup_t<tccmd_t> index_t;
+
+ alc_t alc;
index_t index;
public:
tcpool_t();
+
+ tagsave_t *make_save(tagsave_t *next, tagver_t ver);
+ tagcopy_t *make_copy(tagcopy_t *next, tagver_t lhs, tagver_t rhs);
+ tagsave_t *conv_to_save(const tagver_t *vers, size_t ntag);
+
tcid_t insert(const tagsave_t *save, const tagcopy_t *copy);
const tccmd_t &operator[](tcid_t id) const;
};
--- /dev/null
+#ifndef _RE2C_UTIL_SLAB_ALLOCATOR_
+#define _RE2C_UTIL_SLAB_ALLOCATOR_
+
+#include "src/util/c99_stdint.h"
+#include <algorithm> // std::for_each
+#include <stdlib.h> // malloc, free
+#include <vector> // slab queue
+
+#include "src/util/forbid_copy.h"
+
+/*
+ * Works nice for tiny POD objects (~30 bytes and lower)
+ * WARNING: Does not free memory for distinct objects!
+ *
+ * Works ~20 times faster, than linux's glibc allocator :]
+ */
+template<uint32_t MAXIMUM_INLINE = 4 * 1024,
+ uint32_t SLAB_SIZE = 1024 * 1024>
+class slab_allocator_t
+{
+ typedef std::vector<char*> slabs_t;
+
+ slabs_t slabs_; /* quasilist of allocated slabs of 'SLAB_SIZE' bytes */
+ char *current_slab_;
+ char *current_slab_end_;
+
+public:
+ slab_allocator_t(): slabs_(), current_slab_(0), current_slab_end_(0) {}
+
+ ~slab_allocator_t() { std::for_each(slabs_.rbegin(), slabs_.rend(), free); }
+
+ void *alloc(size_t size)
+ {
+ char *result;
+
+ /* very large objects */
+ if (size > MAXIMUM_INLINE) {
+ result = static_cast<char*>(malloc(size));
+ slabs_.push_back(result);
+ return result;
+ }
+
+ /* no space in slab */
+ const size_t yet_in_slab = static_cast<size_t>(current_slab_end_ - current_slab_);
+ if (yet_in_slab < size) {
+ current_slab_ = static_cast<char*>(malloc(SLAB_SIZE));
+ current_slab_end_ = current_slab_ + SLAB_SIZE;
+ slabs_.push_back(current_slab_);
+ }
+
+ result = current_slab_;
+ current_slab_ += size;
+
+ return result;
+ }
+
+ template<typename data_t>
+ inline data_t *alloct(size_t n)
+ {
+ return static_cast<data_t*>(alloc(n * sizeof(data_t)));
+ }
+
+ FORBID_COPY(slab_allocator_t);
+};
+
+#endif // _RE2C_UTIL_SLAB_ALLOCATOR_