namespace re2c
{
+/* note [epsilon-closures in tagged NFA]
+ *
+ * The closure includes all NFA states that are reachable by epsilon-paths
+ * from the given set of states and either are final or have non-epsilon
+ * transitions. Note that by construction NFA states cannot have both
+ * epsilon and non-epsilon transitions.
+ *
+ * Each closure state might be reachable by multiple epsilon-paths with
+ * different tags: this means that the regular expression is ambiguous
+ * and can be parsed in different ways. Which parse to choose depends on the
+ * disambiguation policy. RE2C supports two policies: leftmost greedy and
+ * POSIX.
+ *
+ * We use Goldber-Radzik algorithm to find the "shortest path".
+ * Both disambiguation policies forbid epsilon-cycles with negative weight.
+ */
+
+
+/* note [at most one final item per closure]
+ *
+ * By construction NFA has exactly one final state per rule. Thus closure
+ * has at most one final item per rule (in other words, all final items
+ * in closure belong to different rules). The rule with the highest priority
+ * shadowes all other rules. Final items that correspond to shadowed rules
+ * are useless and should be removed as early as possible.
+ *
+ * If we let such items remain in closure, they may prevent the new DFA
+ * state from being merged with other states. This won't affect the final
+ * program: meaningless finalizing tags will be removed by dead code
+ * elimination and subsequent minimization will merge equivalent final
+ * states. However, it's better not to add useless final items at all.
+ *
+ * Note that the first final item reached by the epsilon-closure it the one
+ * with the highest priority (see note [closure items are sorted by rule]).
+ */
+
+
+/* note [the difference between TDFA(0) and TDFA(1)]
+ *
+ * TDFA(0) performs epsilon-closure after transition on symbol,
+ * while TDFA(1) performs it before the transition and uses the lookahead
+ * symbol to filter the closure.
+ *
+ * TDFA(0) is one step ahead of TDFA(1): it consumes a symol, then builds
+ * epsilon-closure, eagerly applies all tags reachable by it and goes to
+ * the next state.
+ *
+ * TDFA(1) is more lazy: it builds epsilon-closure, then filters it with
+ * respect to the current symbol (uses only those states which have outgoing
+ * transitions on this symbol), then applies corresponding tags (probably
+ * not all tags applied by TDFA(0)) and then consumes the symbol and goes
+ * to the next state.
+ *
+ * Thus in general TDFA(1) raises less conflicts than TDFA(0).
+ */
+
+
static void closure_posix(const closure_t &init, closure_t &done, closure_t *shadow, Tagpool &tagpool, const std::vector<Tag> &tags, const prectable_t *prectbl, size_t noldclos);
static void closure_leftmost(const closure_t &init, closure_t &done, closure_t *shadow, Tagpool &tagpool);
static void prune(closure_t &clos, std::valarray<Rule> &rules);
const prectable_t *prectbl_old, prectable_t *&prectbl_new, size_t noldclos);
static bool cmpby_rule_state(const clos_t &x, const clos_t &y);
+
tcmd_t *closure(dfa_t &dfa, closure_t &clos1, closure_t &clos2,
Tagpool &tagpool, newvers_t &newvers, closure_t *shadow,
const prectable_t *prectbl_old, prectable_t *&prectbl_new, size_t noldclos)
return cmd;
}
+
bool cmpby_rule_state(const clos_t &x, const clos_t &y)
{
const nfa_state_t *sx = x.state, *sy = y.state;
}
-/* note [epsilon-closures in tagged NFA]
- *
- * The closure includes all NFA states that are reachable by epsilon-paths
- * from the given set of states and either are final or have non-epsilon
- * transitions. Note that by construction NFA states cannot have both
- * epsilon and non-epsilon transitions.
- *
- * Each closure state might be reachable by multiple epsilon-paths with
- * different tags: this means that the regular expression is ambiguous
- * and can be parsed in different ways. Which parse to choose depends on the
- * disambiguation policy. RE2C supports two policies: leftmost greedy and
- * POSIX.
- *
- * We use Goldber-Radzik algorithm to find the "shortest path".
- * Both disambiguation policies forbid epsilon-cycles with negative weight.
- */
-
static nfa_state_t *relax(clos_t x, closure_t &done,
closure_t *shadow, Tagpool &tagpool, const std::vector<Tag> &tags,
const prectable_t *prectbl, size_t noldclos)
return q;
}
+
static nfa_state_t *explore(nfa_state_t *q, closure_t &done,
closure_t *shadow, Tagpool &tagpool, const std::vector<Tag> &tags,
const prectable_t *prectbl, size_t noldclos)
return p;
}
+
void closure_posix(const closure_t &init, closure_t &done,
closure_t *shadow, Tagpool &tagpool, const std::vector<Tag> &tags,
const prectable_t *prectbl, size_t noldclos)
}
}
-/* note [at most one final item per closure]
- *
- * By construction NFA has exactly one final state per rule. Thus closure
- * has at most one final item per rule (in other words, all final items
- * in closure belong to different rules). The rule with the highest priority
- * shadowes all other rules. Final items that correspond to shadowed rules
- * are useless and should be removed as early as possible.
- *
- * If we let such items remain in closure, they may prevent the new DFA
- * state from being merged with other states. This won't affect the final
- * program: meaningless finalizing tags will be removed by dead code
- * elimination and subsequent minimization will merge equivalent final
- * states. However, it's better not to add useless final items at all.
- *
- * Note that the first final item reached by the epsilon-closure it the one
- * with the highest priority (see note [closure items are sorted by rule]).
- */
void closure_leftmost(const closure_t &init, closure_t &done,
closure_t *shadow, Tagpool &tagpool)
}
}
+
void prune(closure_t &clos, std::valarray<Rule> &rules)
{
clositer_t b = clos.begin(), e = clos.end(), i, j;
clos.resize(n);
}
-/* note [the difference between TDFA(0) and TDFA(1)]
- *
- * TDFA(0) performs epsilon-closure after transition on symbol,
- * while TDFA(1) performs it before the transition and uses the lookahead
- * symbol to filter the closure.
- *
- * TDFA(0) is one step ahead of TDFA(1): it consumes a symol, then builds
- * epsilon-closure, eagerly applies all tags reachable by it and goes to
- * the next state.
- *
- * TDFA(1) is more lazy: it builds epsilon-closure, then filters it with
- * respect to the current symbol (uses only those states which have outgoing
- * transitions on this symbol), then applies corresponding tags (probably
- * not all tags applied by TDFA(0)) and then consumes the symbol and goes
- * to the next state.
- *
- * Thus in general TDFA(1) raises less conflicts than TDFA(0).
- */
void lower_lookahead_to_transition(closure_t &clos)
{
}
}
+
tcmd_t *generate_versions(dfa_t &dfa, closure_t &clos, Tagpool &tagpool, newvers_t &newvers)
{
tcmd_t *cmd = NULL;
return cmd;
}
+
static inline int32_t pack(int32_t longest, int32_t leftmost)
{
// leftmost: higher 2 bits, longest: lower 30 bits
return longest | (leftmost << 30);
}
+
void orders(closure_t &clos, Tagpool &tagpool, const std::vector<Tag> &tags,
const prectable_t *prectbl_old, prectable_t *&prectbl_new, size_t noldclos)
{
* which is further propagated back to the start state of DFA.
*/
+
+/* note [fallback states]
+ *
+ * Find states that are accepting, but may be shadowed
+ * by other accepting states: when the short rule matches,
+ * lexer must try to match longer rules; if this attempt is
+ * unsuccessful it must fallback to the short match.
+ *
+ * In order to find fallback states we need to know if
+ * "none-rule" is reachable from the given state, the information
+ * we have after rule liveness analyses. Fallback states are
+ * needed at different points in time (both before and after
+ * certain transformations on DFA). Fortunately, fallback states
+ * are not affected by these transformations, so we can calculate
+ * them here and save for future use.
+ */
+
+
// reversed DFA
struct rdfa_t
{
FORBID_COPY(rdfa_t);
};
+
static void backprop(const rdfa_t &rdfa, bool *live,
size_t rule, size_t state)
{
}
}
+
static void liveness_analyses(const rdfa_t &rdfa, bool *live)
{
for (size_t i = 0; i < rdfa.nstates; ++i) {
}
}
+
static void warn_dead_rules(const dfa_t &dfa, size_t defrule,
const std::string &cond, const bool *live, Warn &warn)
{
}
}
+
static void remove_dead_final_states(dfa_t &dfa, const bool *fallthru)
{
const size_t
}
}
-/* note [fallback states]
- *
- * Find states that are accepting, but may be shadowed
- * by other accepting states: when the short rule matches,
- * lexer must try to match longer rules; if this attempt is
- * unsuccessful it must fallback to the short match.
- *
- * In order to find fallback states we need to know if
- * "none-rule" is reachable from the given state, the information
- * we have after rule liveness analyses. Fallback states are
- * needed at different points in time (both before and after
- * certain transformations on DFA). Fortunately, fallback states
- * are not affected by these transformations, so we can calculate
- * them here and save for future use.
- */
+
static void find_fallback_states(dfa_t &dfa, const bool *fallthru)
{
const size_t
}
}
+
void cutoff_dead_rules(dfa_t &dfa, size_t defrule, const std::string &cond, Warn &warn)
{
const rdfa_t rdfa(dfa);
const Tagpool &tagpool, const std::vector<Tag> &tags,
const std::valarray<Rule> &rules, const std::string &cond, Warn &warn);
+
const size_t dfa_t::NIL = std::numeric_limits<size_t>::max();
+
nfa_state_t *transition(nfa_state_t *state, uint32_t symbol)
{
if (state->type != nfa_state_t::RAN) {
return NULL;
}
+
void reach(const kernel_t *kernel, closure_t &clos, uint32_t symbol)
{
clos.clear();
}
}
+
dfa_t::dfa_t(const nfa_t &nfa, const opt_t *opts,
const std::string &cond, Warn &warn)
: states()
}
}
-/*
- * For each tag, find maximal number of parallel versions of this tag
- * used in each kernel (degree of non-determinism) and warn about tags with
- * maximum degree two or more.
- *
- * WARNING: this function assumes that kernel items are grouped by rule
- */
+
+// For each tag, find maximal number of parallel versions of this tag
+// used in each kernel (degree of non-determinism) and warn about tags with
+// maximum degree two or more.
+// WARNING: this function assumes that kernel items are grouped by rule
void warn_nondeterministic_tags(const kernels_t &kernels,
const Tagpool &tagpool, const std::vector<Tag> &tags,
const std::valarray<Rule> &rules, const std::string &cond, Warn &warn)
}
}
+
dfa_t::~dfa_t()
{
std::vector<dfa_state_t*>::iterator
static const char *tagname(const Tag &t);
static void dump_tags(const Tagpool &tagpool, hidx_t ttran, size_t tvers);
+
dump_dfa_t::dump_dfa_t(const dfa_t &d, const Tagpool &pool, const nfa_t &n)
: debug(pool.opts->dump_dfa_raw)
, dfa(d)
" edge[arrowhead=vee fontname=fixed]\n\n");
}
+
dump_dfa_t::~dump_dfa_t()
{
if (!debug) return;
fprintf(stderr, "}\n");
}
+
uint32_t dump_dfa_t::index(const nfa_state_t *s) const
{
return static_cast<uint32_t>(s - base);
}
+
static void dump_history(const dfa_t &dfa, const tagtree_t &h, hidx_t i)
{
if (i == HROOT) {
fprintf(stderr, " ");
}
+
void dump_dfa_t::closure_tags(cclositer_t c)
{
if (!debug) return;
}
}
+
void dump_dfa_t::closure(const closure_t &clos, uint32_t state, bool isnew)
{
if (!debug) return;
fprintf(stderr, "</TABLE>>]\n");
}
+
void dump_dfa_t::state0(const closure_t &clos)
{
if (!debug) return;
}
}
+
void dump_dfa_t::state(const closure_t &clos, size_t state, size_t symbol, bool isnew)
{
if (!debug) return;
}
}
+
void dump_dfa_t::final(size_t state, const nfa_state_t *port)
{
if (!debug) return;
fprintf(stderr, "\"]\n");
}
+
void dump_dfa(const dfa_t &dfa)
{
const size_t
fprintf(stderr, "}\n");
}
+
void dump_tcmd_or_tcid(tcmd_t *const *tcmd, const tcid_t *tcid,
size_t sym, const tcpool_t &tcpool)
{
dump_tcmd(cmd);
}
+
void dump_tcmd(const tcmd_t *p)
{
if (!p) return;
}
}
+
const char *tagname(const Tag &t)
{
return t.name ? t.name->c_str() : "";
}
+
void dump_tags(const Tagpool &tagpool, hidx_t ttran, size_t tvers)
{
if (ttran == HROOT) return;
namespace re2c
{
-static void find_overwritten_tags(const dfa_t &dfa, size_t state, bool *been, bool *owrt);
-
/* note [fallback tags]
*
* We need to backup tags in fallback states, because they may be
* only create backup if the origin is overwritten on some path.
*/
+
+static void find_overwritten_tags(const dfa_t &dfa, size_t state, bool *been, bool *owrt);
+
+
void find_overwritten_tags(const dfa_t &dfa, size_t state,
bool *been, bool *owrt)
{
}
}
+
// overwritten tags need 'copy' on all outgoing non-accepting paths
// ('copy' commands must go first, before potential overwrites)
static void backup(dfa_t &dfa, dfa_state_t *s, tagver_t l, tagver_t r)
}
}
+
// WARNING: this function assumes that falthrough and fallback
// attributes of DFA states have already been calculated, see
// note [fallback states]
namespace re2c
{
-static const size_t SCC_INF = std::numeric_limits<size_t>::max();
-static const size_t SCC_UND = SCC_INF - 1;
-
-static bool loopback(size_t node, size_t narcs, const size_t *arcs)
-{
- for (size_t i = 0; i < narcs; ++i)
- {
- if (arcs[i] == node)
- {
- return true;
- }
- }
- return false;
-}
-
/*
* node [finding strongly connected components of DFA]
*
* stack (SCC_INF).
*
*/
+
+
+static const size_t SCC_INF = std::numeric_limits<size_t>::max();
+static const size_t SCC_UND = SCC_INF - 1;
+
+
+static bool loopback(size_t node, size_t narcs, const size_t *arcs)
+{
+ for (size_t i = 0; i < narcs; ++i)
+ {
+ if (arcs[i] == node)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+
static void scc(
const dfa_t &dfa,
std::stack<size_t> &stack,
}
}
+
static void calc_fill(
const dfa_t &dfa,
const std::vector<bool> &trivial,
}
}
+
void fillpoints(const dfa_t &dfa, std::vector<size_t> &fill)
{
const size_t size = dfa.states.size();
namespace re2c
{
+/* note [mapping ignores items with lookahead tags]
+ *
+ * Consider two items X and Y being mapped.
+ *
+ * If tag T belongs to lookahead tags of item X, then all
+ * outgoing transitions from item X update T. Which means
+ * that it doesn't matter what particular version T has in X:
+ * whatever version it has, it will be overwritten by any
+ * outgoing transition.
+ *
+ * Note that lookahead tags are identical for both items
+ * X and Y, because we only try to map DFA states with
+ * identical lookahead tags.
+ */
+
+
+/* note [save(X), copy(Y,X) optimization]
+ *
+ * save(X) command followed by a copy(Y,X) command can be optimized to
+ * save(Y). This helps reduce the number commands and versions (new version
+ * X is gone), but what is more important, it allows to put copy commands
+ * in front of save commands. This order is necessary when it comes to
+ * fallback commands.
+ *
+ * Note that in case of injective mapping there may be more than one copy
+ * command matching the same save command: save(X), copy(Y,X), copy(Z,X).
+ * In this case save command must be replicated for each copy command:
+ * save(Y), save(Z).
+ *
+ * For each save(X) command there must be at least one copy(Y,X) command
+ * (exactly one case of bijective mapping). This is because X version in
+ * save(X) command must be a new version which cannot occur in the older
+ * DFA state. Thus all save commands are transformed (maybe replicated) by
+ * copy commands, and some copy commands are erased by save commands.
+ *
+ * This optimization is applied after checking priority violation, so it
+ * cannot affect the check.
+*/
+
+
+/* note [bijective mappings]
+ *
+ * Suppose we have just constructed a new DFA state Y and want to map it
+ * to an existing DFA state X. States must have identical sets of NFA
+ * substates and identical sets of lookahead tags for each substate.
+ * Furtermore, there must be bijective mapping between versions of X and Y
+ * and this mapping must preserve version order (respect priorities).
+ *
+ * Bijective mappings have a nice property: there is only one possible state
+ * X to which Y can be mapped. Indeed, if there was another state Z that
+ * can be bijectively mapped to Y preserving priorities, then Z itself can
+ * be mapped to X: both (1) and (2) are symmetrical in case of bijection
+ * and the relation is transitive. So the existence of Z is a contradiction.
+ *
+ * In principle, non-bijective mappings are also possible if the new state
+ * is less versatile than the old one (surjection from X to Y). However,
+ * non-bijective mappings lack the 'unique counterpart' property and need
+ * more complex analysis (and are not so useful after all), so we drop them.
+ */
+
+
struct kernel_eq_t
{
Tagpool &tagpool;
&& equal_lookahead_tags(x, y, tagpool, tags);
}
-/* note [mapping ignores items with lookahead tags]
- *
- * Consider two items X and Y being mapped.
- *
- * If tag T belongs to lookahead tags of item X, then all
- * outgoing transitions from item X update T. Which means
- * that it doesn't matter what particular version T has in X:
- * whatever version it has, it will be overwritten by any
- * outgoing transition.
- *
- * Note that lookahead tags are identical for both items
- * X and Y, because we only try to map DFA states with
- * identical lookahead tags.
- */
-
-/* note [save(X), copy(Y,X) optimization]
- *
- * save(X) command followed by a copy(Y,X) command can be optimized to
- * save(Y). This helps reduce the number commands and versions (new version
- * X is gone), but what is more important, it allows to put copy commands
- * in front of save commands. This order is necessary when it comes to
- * fallback commands.
- *
- * Note that in case of injective mapping there may be more than one copy
- * command matching the same save command: save(X), copy(Y,X), copy(Z,X).
- * In this case save command must be replicated for each copy command:
- * save(Y), save(Z).
- *
- * For each save(X) command there must be at least one copy(Y,X) command
- * (exactly one case of bijective mapping). This is because X version in
- * save(X) command must be a new version which cannot occur in the older
- * DFA state. Thus all save commands are transformed (maybe replicated) by
- * copy commands, and some copy commands are erased by save commands.
- *
- * This optimization is applied after checking priority violation, so it
- * cannot affect the check.
-*/
bool kernels_t::operator()(const kernel_t *x, const kernel_t *y)
{
return !nontrivial_cycles;
}
-/* note [bijective mappings]
- *
- * Suppose we have just constructed a new DFA state Y and want to map it
- * to an existing DFA state X. States must have identical sets of NFA
- * substates and identical sets of lookahead tags for each substate.
- * Furtermore, there must be bijective mapping between versions of X and Y
- * and this mapping must preserve version order (respect priorities).
- *
- * Bijective mappings have a nice property: there is only one possible state
- * X to which Y can be mapped. Indeed, if there was another state Z that
- * can be bijectively mapped to Y preserving priorities, then Z itself can
- * be mapped to X: both (1) and (2) are symmetrical in case of bijection
- * and the relation is transitive. So the existence of Z is a contradiction.
- *
- * In principle, non-bijective mappings are also possible if the new state
- * is less versatile than the old one (surjection from X to Y). However,
- * non-bijective mappings lack the 'unique counterpart' property and need
- * more complex analysis (and are not so useful after all), so we drop them.
- */
size_t kernels_t::insert(const closure_t &closure, tagver_t maxver,
const prectable_t *prectbl, tcmd_t *&acts, bool &is_new)
return x;
}
+
static tcmd_t *finalizer(const clos_t &clos, size_t ridx,
dfa_t &dfa, const Tagpool &tagpool, const std::vector<Tag> &tags)
{
return copy;
}
+
void find_state(dfa_t &dfa, size_t origin, size_t symbol, kernels_t &kernels,
const closure_t &closure, tcmd_t *acts, dump_dfa_t &dump, const prectable_t *prectbl)
{
* the same symbol that go to distinguishable states. The algorithm
* loops until the matrix stops changing.
*/
+
+
+/*
+ * note [DFA minimization: Moore algorithm]
+ *
+ * The algorithm maintains partition of DFA states.
+ * Initial partition is coarse: states are distinguished according
+ * to their rule and tag set. Partition is gradually refined: each
+ * set of states is split into minimal number of subsets such that
+ * for all states in a subset transitions on the same symbol go to
+ * the same set of states.
+ * The algorithm loops until partition stops changing.
+ */
+
+
+/* note [distinguish states by tags]
+ *
+ * Final states may have 'rule' tags: tags that must be set when lexer
+ * takes epsilon-transition to the binded action. Final states with
+ * the same rule but different sets on 'rule' tags cannot be merged.
+ *
+ * Compare the following two cases:
+ * "ac" | "bc"
+ * "ac" @p | "bc"
+ * Tail "c" can be deduplicated in the 1st case, but not in the 2nd.
+ */
+
+
static void minimization_table(
size_t *part,
const std::vector<dfa_state_t*> &states,
delete[] tbl;
}
-/*
- * note [DFA minimization: Moore algorithm]
- *
- * The algorithm maintains partition of DFA states.
- * Initial partition is coarse: states are distinguished according
- * to their rule and tag set. Partition is gradually refined: each
- * set of states is split into minimal number of subsets such that
- * for all states in a subset transitions on the same symbol go to
- * the same set of states.
- * The algorithm loops until partition stops changing.
- */
+
static void minimization_moore(
size_t *part,
const std::vector<dfa_state_t*> &states,
delete[] next;
}
-/* note [distinguish states by tags]
- *
- * Final states may have 'rule' tags: tags that must be set when lexer
- * takes epsilon-transition to the binded action. Final states with
- * the same rule but different sets on 'rule' tags cannot be merged.
- *
- * Compare the following two cases:
- * "ac" | "bc"
- * "ac" @p | "bc"
- * Tail "c" can be deduplicated in the 1st case, but not in the 2nd.
- */
void minimization(dfa_t &dfa, dfa_minimization_t type)
{
}
};
+
Tagpool::Tagpool(const opt_t *o, size_t n)
: lookup()
, opts(o)
, cstack()
{}
+
Tagpool::~Tagpool()
{
delete[] buffer;
}
}
+
size_t Tagpool::insert_const(tagver_t ver)
{
std::fill(buffer, buffer + ntags, ver);
return insert(buffer);
}
+
size_t Tagpool::insert_succ(tagver_t fst)
{
for (size_t i = 0; i < ntags; ++i) {
return insert(buffer);
}
+
size_t Tagpool::insert(const tagver_t *tags)
{
const size_t size = ntags * sizeof(tagver_t);
return lookup.push(hash, copy);
}
+
const tagver_t *Tagpool::operator[](size_t idx) const
{
return lookup[idx];
static const tagver_t DELIM = TAGVER_CURSOR - 1;
+
tagtree_t::tagtree_t(): nodes(), path1(), path2() {}
+
hidx_t tagtree_t::pred(hidx_t i) const { return nodes[i].pred; }
+
tag_info_t tagtree_t::info(hidx_t i) const { return nodes[i].info; }
+
tagver_t tagtree_t::elem(hidx_t i) const { return nodes[i].info.neg ? TAGVER_BOTTOM : TAGVER_CURSOR; }
+
size_t tagtree_t::tag(hidx_t i) const { return nodes[i].info.idx; }
+
hidx_t tagtree_t::push(hidx_t idx, tag_info_t info)
{
node_t x = {idx, info};
return static_cast<hidx_t>(nodes.size() - 1);
}
+
tagver_t tagtree_t::last(hidx_t i, size_t t) const
{
for (; i != HROOT; i = pred(i)) {
return TAGVER_ZERO;
}
+
int32_t tagtree_t::compare_reversed(hidx_t x, hidx_t y, size_t t) const
{
// compare in reverse, from tail to head: direction makes
}
}
+
static void reconstruct_history(const tagtree_t &history,
std::vector<tag_info_t> &path, hidx_t idx)
{
}
}
+
static inline int32_t unpack_longest(int32_t value)
{
// lower 30 bits
return value & 0x3fffFFFF;
}
+
static inline int32_t unpack_leftmost(int32_t value)
{
// higher 2 bits
return value >> 30u;
}
+
int32_t tagtree_t::precedence(const clos_t &x, const clos_t &y,
int32_t &rhox, int32_t &rhoy, const prectable_t *prectbl,
const std::vector<Tag> &tags, size_t nclos)
namespace re2c
{
-static uint32_t hash_tcmd(const tcmd_t *tcmd);
-
-bool tcmd_t::equal(const tcmd_t &x, const tcmd_t &y)
-{
- return x.lhs == y.lhs
- && x.rhs == y.rhs
- && equal_history(x.history, y.history);
-}
-
-bool tcmd_t::equal_history(const tagver_t *h, const tagver_t *g)
-{
- for (;;) {
- if (*h != *g) return false;
- if (*h == TAGVER_ZERO) return true;
- ++h; ++g;
- }
-}
-
/* note [topological ordering of copy commands]
*
* The order in which copy commands are executed is important:
* The algorithm starts and ends with all-zero in-degree buffer.
*/
+
+static uint32_t hash_tcmd(const tcmd_t *tcmd);
+
+
+bool tcmd_t::equal(const tcmd_t &x, const tcmd_t &y)
+{
+ return x.lhs == y.lhs
+ && x.rhs == y.rhs
+ && equal_history(x.history, y.history);
+}
+
+
+bool tcmd_t::equal_history(const tagver_t *h, const tagver_t *g)
+{
+ for (;;) {
+ if (*h != *g) return false;
+ if (*h == TAGVER_ZERO) return true;
+ ++h; ++g;
+ }
+}
+
+
bool tcmd_t::iscopy(const tcmd_t *x)
{
return x->rhs != TAGVER_ZERO && x->history[0] == TAGVER_ZERO;
}
+
bool tcmd_t::isset(const tcmd_t *x)
{
if (x->rhs == TAGVER_ZERO) {
return false;
}
+
bool tcmd_t::isadd(const tcmd_t *x)
{
return x->rhs != TAGVER_ZERO && x->history[0] != TAGVER_ZERO;
}
+
bool tcmd_t::topsort(tcmd_t **phead, uint32_t *indeg)
{
tcmd_t *x0 = *phead, *x, *y0 = NULL, **py;
return nontrivial_cycles;
}
+
tcpool_t::tcpool_t()
: alc()
, index()
assert(TCID0 == insert(NULL));
}
+
tcmd_t *tcpool_t::make_copy(tcmd_t *next, tagver_t lhs, tagver_t rhs)
{
tcmd_t *p = alc.alloct<tcmd_t>(1);
return p;
}
+
tcmd_t *tcpool_t::make_set(tcmd_t *next, tagver_t lhs, tagver_t set)
{
const size_t size = sizeof(tcmd_t) + sizeof(tagver_t);
return p;
}
+
tcmd_t *tcpool_t::make_add(tcmd_t *next, tagver_t lhs, tagver_t rhs,
const tagtree_t &history, hidx_t hidx, size_t tag)
{
return p;
}
+
tcmd_t *tcpool_t::copy_add(tcmd_t *next, tagver_t lhs, tagver_t rhs,
const tagver_t *history)
{
return p;
}
+
uint32_t hash_tcmd(const tcmd_t *tcmd)
{
uint32_t h = 0;
return h;
}
+
struct tcmd_eq_t
{
bool operator()(const tcmd_t *x, const tcmd_t *y) const
}
};
+
tcid_t tcpool_t::insert(const tcmd_t *tcmd)
{
const uint32_t h = hash_tcmd(tcmd);
return static_cast<tcid_t>(id);
}
+
const tcmd_t *tcpool_t::operator[](tcid_t id) const
{
return index[id];