diff --git a/NEWS b/NEWS index a3d332185..d266713c2 100644 --- a/NEWS +++ b/NEWS @@ -8,6 +8,19 @@ New in spot 2.7.4.dev (not yet released) - ltldo, ltlcross, and autcross are now preferring posix_spawn() over fork()+exec() when available. + - ltlcross has new options --determinize-max-states=N and + --determinize-max-edges=M to restrict the use of + determinization-based complementation to cases where it produces + automata with at most N states and M edges. By default + determinization is now attempted up to 500 states and 5000 + edges. This is an improvement over the previous default where + determinization-based complementation was not performed at all, + unless -D was specified. + + - ltlcross will no skip unnecessary cross-checks and + consistency-checks (they are unnecessary when all automata + could be complemented and statistics were not required). + Library: - Add generic_accepting_run() as a variant of generic_emptiness_check() that @@ -32,7 +45,7 @@ New in spot 2.7.4.dev (not yet released) allows "autfilt [-D] --small" to minimize very-weak automata whenever they are found to represent obligation properties.) - - There is a new spot::scc_and_mark_filter objet that simplify the + - There is a new spot::scc_and_mark_filter objet that simplifies the creation of filters to restrict spot::scc_info to some particular SCC while cutting new SCCs on given acceptance sets. This is used by spot::generic_emptiness_check() when processing SCCs @@ -51,6 +64,20 @@ New in spot 2.7.4.dev (not yet released) acceptance condition. The output can be alternating only if the input was alternating. + - There is a new class output_aborter that is used to specify + upper bounds on the size of automata produced by some algorithms. + Several functions have been changed to accept an output_aborter. + This includes: + * tgba_determinize() + * tgba_powerset() + * minimize_obligation() + * minimize_wdba() + * remove_alternation() + * product() + * the new complement() + * the postprocessor class, via the "det-max-state" and + "det-max-edges" options. + - SVA's first_match operator can now be used in SERE formulas and that is supported by the ltl_to_tgba_fm() translation. See doc/tl/tl.pdf for the semantics. *WARNING* Because this adds a @@ -66,7 +93,7 @@ New in spot 2.7.4.dev (not yet released) terms of the existing PSL operators. ##[+] and ##[*] are sugar for ##[1:$] and ##[0:$]. - - spot::relabel_apply() make it easier to reverse the effect + - spot::relabel_apply() makes it easier to reverse the effect of spot::relabel() or spot::relabel_bse() on formula. - The LTL simplifier learned the following optional rules: diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index d119e4e93..514c7db2d 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -55,7 +55,7 @@ #include #include #include -#include +#include #include #include #include @@ -86,6 +86,8 @@ enum { OPT_BOGUS, OPT_CSV, OPT_DENSITY, + OPT_DET_MAX_STATES, + OPT_DET_MAX_EDGES, OPT_DUPS, OPT_FAIL_ON_TIMEOUT, OPT_GRIND, @@ -118,8 +120,18 @@ static const argp_option options[] = { "no-complement", OPT_NOCOMP, nullptr, 0, "do not complement deterministic automata to perform extra checks", 0 }, { "determinize", 'D', nullptr, 0, - "determinize non-deterministic automata so that they" + "always determinize non-deterministic automata so that they" "can be complemented; also implicitly sets --products=0", 0 }, + { "determinize-max-states", OPT_DET_MAX_STATES, "N", 0, + "attempt to determinize non-deterministic automata so they can be " + "complemented, unless the deterministic automaton would have more " + "than N states. Without this option or -D, determinizations " + "are attempted up to 500 states.", 0 }, + { "determinize-max-edges", OPT_DET_MAX_EDGES, "N", 0, + "attempt to determinize non-deterministic automata so they can be " + "complemented, unless the deterministic automaton would have more " + "than N edges. Without this option or -D, determinizations " + "are attempted up to 5000 edges.", 0 }, { "stop-on-error", OPT_STOP_ERR, nullptr, 0, "stop on first execution error or failure to pass" " sanity checks (timeouts are OK)", 0 }, @@ -195,6 +207,10 @@ static bool allow_dups = false; static bool no_checks = false; static bool no_complement = false; static bool determinize = false; +static bool max_det_states_given = false; +static bool max_det_edges_given = false; +static unsigned max_det_states = 500U; +static unsigned max_det_edges = 5000U; static bool stop_on_error = false; static int seed = 0; static unsigned products = 1; @@ -427,6 +443,28 @@ parse_opt(int key, char* arg, struct argp_state*) case 'D': determinize = true; products = 0; + max_det_states = -1U; + max_det_edges = -1U; + if (max_det_states_given) + error(2, 0, "Options --determinize-max-states and " + "--determinize are incompatible."); + if (max_det_edges_given) + error(2, 0, "Options --determinize-max-edges and " + "--determinize are incompatible."); + break; + case OPT_DET_MAX_EDGES: + max_det_edges_given = true; + max_det_states = to_pos_int(arg, "--determinize-max-edges"); + if (determinize) + error(2, 0, "Options --determinize-max-edges and " + "--determinize are incompatible."); + break; + case OPT_DET_MAX_STATES: + max_det_states_given = true; + max_det_states = to_pos_int(arg, "--determinize-max-states"); + if (determinize) + error(2, 0, "Options --determinize-max-states and " + "--determinize are incompatible."); break; case ARGP_KEY_ARG: if (arg[0] == '-' && !arg[1]) @@ -434,6 +472,9 @@ parse_opt(int key, char* arg, struct argp_state*) else tools_push_trans(arg); break; + case OPT_AMBIGUOUS: + opt_ambiguous = true; + break; case OPT_AUTOMATA: opt_automata = true; break; @@ -466,16 +507,6 @@ parse_opt(int key, char* arg, struct argp_state*) want_stats = true; json_output = arg ? arg : "-"; break; - case OPT_PRODUCTS: - if (*arg == '+') - { - products_avg = false; - ++arg; - } - products = to_pos_int(arg, "--products"); - if (products == 0) - products_avg = false; - break; case OPT_NOCHECKS: no_checks = true; no_complement = true; @@ -486,6 +517,16 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_OMIT: opt_omit = true; break; + case OPT_PRODUCTS: + if (*arg == '+') + { + products_avg = false; + ++arg; + } + products = to_pos_int(arg, "--products"); + if (products == 0) + products_avg = false; + break; case OPT_REFERENCE: tools_push_trans(arg, true); break; @@ -501,9 +542,6 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_STRENGTH: opt_strength = true; break; - case OPT_AMBIGUOUS: - opt_ambiguous = true; - break; case OPT_VERBOSE: verbose = true; break; @@ -1094,6 +1132,8 @@ namespace } } + bool missing_complement = true; + if (!no_checks) { std::cerr << "Performing sanity checks and gathering statistics..." @@ -1122,7 +1162,7 @@ namespace return smallest_ref; }; - // This are not our definitive choice for reference + // These are not our definitive choice for reference // automata, because the sizes might change after we remove // alternation and Fin acceptance. But we need to know now // if we will have a pair of reference automata in order to @@ -1158,32 +1198,27 @@ namespace std::cerr << ")\n"; } }; - auto complement = [&](const std::vector& x, - std::vector& comp, - unsigned i) - { - if (!no_complement && x[i] && is_universal(x[i])) - comp[i] = dualize(x[i]); - }; + // Remove alternation for (size_t i = 0; i < m; ++i) { unalt(pos, i, 'P'); unalt(neg, i, 'N'); - // Do not complement reference automata if we have a - // reference pair. - if (smallest_pos_ref >= 0 && tools[i].reference) - continue; - complement(pos, comp_pos, i); - complement(neg, comp_neg, i); } - if (determinize && !no_complement) + // Complement + if (!no_complement) { + spot::output_aborter aborter_(max_det_states, + max_det_edges); + spot::output_aborter* aborter = nullptr; + if (max_det_states != -1U || max_det_edges != -1U) + aborter = &aborter_; + print_first = verbose; - auto tmp = [&](std::vector& from, - std::vector& to, int i, - char prefix) + auto comp = [&](std::vector& from, + std::vector& to, int i, + char prefix) { if (from[i] && !to[i]) { @@ -1193,29 +1228,41 @@ namespace return; if (print_first) { - std::cerr << "info: complementing non-deterministic " - "automata via determinization...\n"; + std::cerr << "info: complementing automata...\n"; print_first = false; } - spot::postprocessor p; - p.set_type(spot::postprocessor::Generic); - p.set_pref(spot::postprocessor::Deterministic); - p.set_level(spot::postprocessor::Low); - to[i] = dualize(p.run(from[i])); + if (verbose) + std::cerr << "info: " << prefix << i; + if (aborter && aborter->too_large(from[i]) + && !spot::is_universal(from[i])) + missing_complement = true; + else + to[i] = spot::complement(from[i], aborter); if (verbose) { - std::cerr << "info: " << prefix << i << "\t("; - printsize(from[i]); - std::cerr << ") -> ("; - printsize(to[i]); - std::cerr << ")\tComp(" << prefix << i << ")\n"; + if (to[i]) + { + std::cerr << "\t("; + printsize(from[i]); + std::cerr << ") -> ("; + printsize(to[i]); + std::cerr << ")\tComp(" << prefix << i << ")\n"; + } + else + { + std::cerr << "\tnot complemented"; + if (aborter) + aborter->print_reason(std::cerr << " (") << ')'; + std::cerr << '\n'; + } } } }; + missing_complement = false; for (unsigned i = 0; i < m; ++i) { - tmp(pos, comp_pos, i, 'P'); - tmp(neg, comp_neg, i, 'N'); + comp(pos, comp_pos, i, 'P'); + comp(neg, comp_neg, i, 'N'); } } @@ -1362,15 +1409,41 @@ namespace (*nstats)[i].product_scc.reserve(products); } } - for (unsigned p = 0; p < products; ++p) + // Decide if we need products with state-space. + unsigned actual_products = products; + if (actual_products) + { + if (missing_complement) + { + if (verbose) + std::cerr + << ("info: complements not computed for some automata\ninfo: " + "continuing with cross_checks and consistency_checks\n"); + } + else if (want_stats) + { + if (verbose) + std::cerr + << ("info: running cross_checks and consistency_checks" + "just for statistics\n"); + } + else + { + if (verbose) + std::cerr + << "info: cross_checks and consistency_checks unnecessary\n"; + actual_products = 0; + } + } + for (unsigned p = 0; p < actual_products; ++p) { // build a random state-space. spot::srand(seed); if (verbose) - std::cerr << "info: building state-space #" << p << '/' << products - << " of " << states << " states with seed " << seed - << '\n'; + std::cerr << "info: building state-space #" << (p+1) << '/' + << products << " of " << states + << " states with seed " << seed << '\n'; auto statespace = spot::random_graph(states, density, ap, dict); @@ -1408,7 +1481,7 @@ namespace std::cerr << ("warning: not enough memory to build " "product of P") << i << " with state-space"; if (products > 1) - std::cerr << " #" << p << '/' << products << '\n'; + std::cerr << " #" << (p+1) << '/' << products << '\n'; std::cerr << '\n'; ++oom_count; } diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 8fc759f54..8b2439252 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -103,6 +103,14 @@ for the presence of an accepting self-loop.") }, { DOC("degen-remscc", "If non-zero (the default), make sure the output \ of the degenalization has as many SCCs as the input, by removing superfluous \ ones.") }, + { DOC("det-max-states", "When defined to a positive integer N, \ +determinizations will be aborted whenever the number of generated \ +states would exceed N. In this case a non-deterministic automaton \ +will be returned.")}, + { DOC("det-max-edges", "When defined to a positive integer N, \ +determinizations will be aborted whenever the number of generated \ +edges would exceed N. In this case a non-deterministic automaton \ +will be returned.")}, { DOC("det-scc", "Set to 0 to disable scc-based optimizations in \ the determinization algorithm.") }, { DOC("det-simul", "Set to 0 to disable simulation-based optimizations in \ diff --git a/doc/org/ltlcross.org b/doc/org/ltlcross.org index 12593f0fb..352fb3f7d 100644 --- a/doc/org/ltlcross.org +++ b/doc/org/ltlcross.org @@ -15,34 +15,37 @@ The main differences with LBTT are: - *support for PSL formulas in addition to LTL* - support for (non-alternating) automata with *any type of acceptance condition*, - support for *weak alternating automata*, - - additional intersection *checks with the complement* (option =-D=), allowing - to check equivalence of automata more precisely, + - additional intersection *checks with the complement* allowing to + check equivalence of automata more precisely, - *more statistics*, especially: - the number of logical transitions represented by each physical edge, - the number of deterministic states and automata - the number of SCCs with their various strengths (nonaccepting, terminal, weak, strong) - the number of terminal, weak, and strong automata - - an option to *reduce counterexample* by attempting to mutate and + - an option to *reduce counterexamples* by attempting to mutate and shorten troublesome formulas (option =--grind=), - statistics output in *CSV* for easier post-processing, - *more precise time measurement* (LBTT was only precise to 1/100 of a second, reporting most times as "0.00s"). -Although =ltlcross= performs the same sanity checks as LBTT, it does +Although =ltlcross= performs similar sanity checks as LBTT, it does not implement any of the interactive features of LBTT. In our almost 10-year usage of LBTT, we never had to use its interactive features to understand bugs in our translation. Therefore =ltlcross= will report problems, maybe with a conterexample, but you will be on your own to -investigate and fix them. +investigate and fix them (the =--grind= option may help you reduce the +problem to a shorter formula). The core of =ltlcross= is a loop that does the following steps: - Input a formula - Translate the formula and its negation using each configured translator. If there are 3 translators, the positive and negative translations - will be denoted =P0=, =N0=, =P1=, =N1=, =P2=, =N2=. Optionally - build complemented automata denoted =Comp(P0)=, =Comp(N0)=, etc. + will be denoted =P0=, =N0=, =P1=, =N1=, =P2=, =N2=. + - Optionally build complemented automata denoted =Comp(P0)=, =Comp(N0)=, etc. + (By default, this is done only for small automata, but see options =-D=, + =--determinize-max-states= and =--determinize-max-edges=.) - Perform sanity checks between all these automata to detect any problem. - - Build the products of these automata with a random state-space (the same + - Optionally build the products of these automata with a random state-space (the same state-space for all translations). (If the =--products=N= option is given, =N= products are performed instead.) - Gather statistics if requested. @@ -282,29 +285,34 @@ positive and negative formulas by the ith translator). The cycle part is denoted by =cycle{...}=. - Complemented intersection check. If $P_i$ and $N_i$ are - deterministic, =ltlcross= builds their complements, $Comp(P_i)$ - and $Comp(N_i)$, and then ensures that $Comp(P_i)\otimes - Comp(N_i)$ is empty. If only one of them is deterministic, for - instance $P_i$, we check that $P_j\otimes Comp(P_i)$ for all $j - \ne i$; likewise if it's $N_i$ that is deterministic. + deterministic or if they are small enough, =ltlcross= attempts to + build their complements, $Comp(P_i)$ and $Comp(N_i)$. - By default this check is only done for deterministic automata, - because complementation is relatively cheap is that case (at least - it is cheap for simple acceptance conditions). Using option - =--determinize=, =ltlcross= can be instructed to perform - complementation of non-deterministic automata as well, ensuring - precise equivalence checks between all automata. However be aware - that this determinization + complementation may generate large - automata. + Complementation is not always attempted, especially when it + requires a determinization-based construction. The conditions + specifying when the complement automata are constructed can be + modified with the =--determinize-max-states=N= and + =--determinize-max-edges=M= options, which abort the + complementation if it would produce an automaton with more than + =N= states (500 by default) or more than =M= edges (5000 by + default). Alternatively, use =--determinize= (a.k.a. =-D=) to + force the complementation of all automata. + + If both complement automata could be computed, =ltlcross= ensures that + $Comp(P_i)\otimes Comp(N_i)$ is empty. + + If only one automaton has been complemented, for instance $P_i$, + =ltlcross= checks that $P_j\otimes Comp(P_i)$ for all $j \ne i$; + likewise if it's $N_i$ that is deterministic. When validating a translator with =ltlcross= without using the =--determinize= option we highly recommend to include a translator with good deterministic output to augment test coverage. Using - '=ltl2tgba -lD %f >%O=' will produce deterministic automata for - all obligation properties and many recurrence properties. Using - '=ltl2dstar --ltl2nba=spin:pathto/ltl2tgba@-Ds %L %O=' will - systematically produce a deterministic Rabin automaton (that - =ltlcross= can complement easily). + '=ltl2tgba -D %f >%O=' will produce deterministic automata for all + obligation properties and many recurrence properties. Using + '=ltl2tgba -PD %f >%O=' will systematically produce a + deterministic Parity automaton (that =ltlcross= can complement + easily). - Cross-comparison checks: for some state-space $S$, all $P_i\otimes S$ are either all empty, or all non-empty. @@ -325,7 +333,7 @@ positive and negative formulas by the ith translator). These products tests may sometime catch errors that were not captured by the first two tests if one non-deterministic automaton recognize less words than what it should. If the input automata - are deterministic or the =--determinize= option is used, this test + are all deterministic or the =--determinize= option is used, this test is redundant and can be disabled. (In fact, the =--determinize= option implies option =--product=0= to do so.) @@ -1111,16 +1119,16 @@ produce transition-based generalized Büchi automata, while =ltl3ba -H1= produces co-Büchi alternating automata. #+BEGIN_SRC sh :prologue "export SPOT_HOA_TOLERANT=1; exec 2>&1" -ltlcross -f 'FGa' ltl2tgba 'ltl3ba -H1' --determinize --verbose +ltlcross -f 'FGa' ltl2tgba 'ltl3ba -H1' --verbose #+END_SRC #+RESULTS: #+begin_example F(G(a)) -Running [P0]: ltl2tgba -H 'F(G(a))'>'lcr-o0-cNwEjy' -Running [P1]: ltl3ba -H1 -f '<>([](a))'>'lcr-o1-WQo28q' -Running [N0]: ltl2tgba -H '!(F(G(a)))'>'lcr-o0-KT96Zj' -Running [N1]: ltl3ba -H1 -f '!(<>([](a)))'>'lcr-o1-WE1RXc' +Running [P0]: ltl2tgba -H 'F(G(a))'>'lcr-o0-ltzvEc' +Running [P1]: ltl3ba -H1 -f '<>([](a))'>'lcr-o1-dqnX28' +Running [N0]: ltl2tgba -H '!(F(G(a)))'>'lcr-o0-wmIXr5' +Running [N1]: ltl3ba -H1 -f '!(<>([](a)))'>'lcr-o1-yJesT1' info: collected automata: info: P0 (2 st.,3 ed.,1 sets) info: N0 (1 st.,2 ed.,1 sets) deterministic complete @@ -1129,14 +1137,14 @@ info: N1 (3 st.,5 ed.,1 sets) univ-edges complete Performing sanity checks and gathering statistics... info: getting rid of universal edges... info: N1 (3 st.,5 ed.,1 sets) -> (2 st.,4 ed.,1 sets) -info: complementing non-deterministic automata via determinization... -info: P0 (2 st.,3 ed.,1 sets) -> (2 st.,4 ed.,2 sets) Comp(P0) -info: P1 (2 st.,3 ed.,1 sets) -> (2 st.,4 ed.,2 sets) Comp(P1) +info: complementing automata... +info: P0 (2 st.,3 ed.,1 sets) -> (2 st.,4 ed.,1 sets) Comp(P0) +info: N0 (1 st.,2 ed.,1 sets) -> (1 st.,2 ed.,1 sets) Comp(N0) +info: P1 (2 st.,3 ed.,1 sets) -> (2 st.,4 ed.,1 sets) Comp(P1) +info: N1 (2 st.,4 ed.,1 sets) -> (2 st.,4 ed.,1 sets) Comp(N1) info: getting rid of any Fin acceptance... -info: Comp(P0) (2 st.,4 ed.,2 sets) -> (2 st.,4 ed.,1 sets) info: Comp(N0) (1 st.,2 ed.,1 sets) -> (2 st.,3 ed.,1 sets) info: P1 (2 st.,3 ed.,1 sets) -> (2 st.,3 ed.,1 sets) -info: Comp(P1) (2 st.,4 ed.,2 sets) -> (2 st.,4 ed.,1 sets) info: Comp(N1) (2 st.,4 ed.,1 sets) -> (3 st.,6 ed.,1 sets) info: check_empty P0*N0 info: check_empty Comp(N0)*Comp(P0) @@ -1144,6 +1152,7 @@ info: check_empty P0*N1 info: check_empty P1*N0 info: check_empty P1*N1 info: check_empty Comp(N1)*Comp(P1) +info: cross_checks and consistency_checks unnecessary No problem detected. #+end_example @@ -1162,41 +1171,35 @@ alternating automata are supported) into non-alternating automata. Here only =N1= needs this transformation. Then =ltlcross= computes the complement of these four -automata. Since =P0= and =P1= are nondeterministic and the -=--determinize= option was given, a first pass determinize and -complete these two automata, creating =Comp(P0)= and =Comp(P1)=. - -Apparently =N0= and =N1= are already deterministic, so their -complement could be obtained by just complementing their acceptance -condition (this is not written -- we only deduce so because they do -not appear in the list of automata that had to be determinized). +automata. Now that =ltlcross= has four complemented automata, it has to make sure they use only =Inf= acceptance because that is what our emptiness check procedure can handle. So there is a new pass over all automata, rewriting them to get rid of any =Fin= acceptance. -After this preparatory work, it is time for actually comparing these +After this preparatory work, it is time to actually compare these automata. Together, the tests =P0*N0= and =Comp(N0)*Comp(P0)= ensure that the automaton =N0= is really the complement of =P0=. Similarly =P1*N1= and =Comp(N1)*Comp(P1)= ensure that =N1= is the complement of =P1=. Finally =P0*N1= and =P1*N0= ensure that =P1= is equivalent to =P0= and =N1= is equivalent to =N0=. -Note that if we had not used the =--determinize= option, the procedure -would look slightly more complex: +Note that if we reduce =ltlcross='s ability to determinize +automata for complementation, the procedure +can look slightly more complex: #+BEGIN_SRC sh :prologue "export SPOT_HOA_TOLERANT=1; exec 2>&1" -ltlcross -f 'FGa' ltl2tgba 'ltl3ba -H1' --verbose +ltlcross -f 'FGa' ltl2tgba 'ltl3ba -H1' --determinize-max-states=1 --verbose #+END_SRC #+RESULTS: #+begin_example F(G(a)) -Running [P0]: ltl2tgba -H 'F(G(a))'>'lcr-o0-Ot1KDa' -Running [P1]: ltl3ba -H1 -f '<>([](a))'>'lcr-o1-Kvzdfm' -Running [N0]: ltl2tgba -H '!(F(G(a)))'>'lcr-o0-X2dURx' -Running [N1]: ltl3ba -H1 -f '!(<>([](a)))'>'lcr-o1-wuLpzJ' +Running [P0]: ltl2tgba -H 'F(G(a))'>'lcr-o0-HHyVWR' +Running [P1]: ltl3ba -H1 -f '<>([](a))'>'lcr-o1-scKnIH' +Running [N0]: ltl2tgba -H '!(F(G(a)))'>'lcr-o0-6Wloux' +Running [N1]: ltl3ba -H1 -f '!(<>([](a)))'>'lcr-o1-MQ7Rin' info: collected automata: info: P0 (2 st.,3 ed.,1 sets) info: N0 (1 st.,2 ed.,1 sets) deterministic complete @@ -1205,6 +1208,11 @@ info: N1 (3 st.,5 ed.,1 sets) univ-edges complete Performing sanity checks and gathering statistics... info: getting rid of universal edges... info: N1 (3 st.,5 ed.,1 sets) -> (2 st.,4 ed.,1 sets) +info: complementing automata... +info: P0 not complemented (more than 1 states required) +info: N0 (1 st.,2 ed.,1 sets) -> (1 st.,2 ed.,1 sets) Comp(N0) +info: P1 not complemented (more than 1 states required) +info: N1 (2 st.,4 ed.,1 sets) -> (2 st.,4 ed.,1 sets) Comp(N1) info: getting rid of any Fin acceptance... info: Comp(N0) (1 st.,2 ed.,1 sets) -> (2 st.,3 ed.,1 sets) info: P1 (2 st.,3 ed.,1 sets) -> (2 st.,3 ed.,1 sets) @@ -1215,7 +1223,9 @@ info: check_empty Comp(N0)*N1 info: check_empty P1*N0 info: check_empty Comp(N1)*N0 info: check_empty P1*N1 -info: building state-space #0/1 of 200 states with seed 0 +info: complements were not computed for some automata +info: continuing with cross_checks and consistency_checks +info: building state-space #1/1 of 200 states with seed 0 info: state-space has 4136 edges info: building product between state-space and P0 (2 st., 3 ed.) info: product has 400 st., 8298 ed. @@ -1263,10 +1273,10 @@ ltlcross -f 'FGa' ltl2tgba --reference 'ltl3ba -H1' --verbose #+RESULTS: #+begin_example F(G(a)) -Running [P0]: ltl3ba -H1 -f '<>([](a))'>'lcr-o0-hsnlkV' -Running [P1]: ltl2tgba -H 'F(G(a))'>'lcr-o1-R0jOmP' -Running [N0]: ltl3ba -H1 -f '!(<>([](a)))'>'lcr-o0-7GwxvJ' -Running [N1]: ltl2tgba -H '!(F(G(a)))'>'lcr-o1-5sgPFD' +Running [P0]: ltl3ba -H1 -f '<>([](a))'>'lcr-o0-bh9PHg' +Running [P1]: ltl2tgba -H 'F(G(a))'>'lcr-o1-LvvYEm' +Running [N0]: ltl3ba -H1 -f '!(<>([](a)))'>'lcr-o0-bcUDEs' +Running [N1]: ltl2tgba -H '!(F(G(a)))'>'lcr-o1-Pw1REy' info: collected automata: info: P0 (2 st.,3 ed.,1 sets) info: N0 (3 st.,5 ed.,1 sets) univ-edges complete @@ -1275,31 +1285,18 @@ info: N1 (1 st.,2 ed.,1 sets) deterministic complete Performing sanity checks and gathering statistics... info: getting rid of universal edges... info: N0 (3 st.,5 ed.,1 sets) -> (2 st.,4 ed.,1 sets) +info: complementing automata... +info: P1 (2 st.,3 ed.,1 sets) -> (2 st.,4 ed.,1 sets) Comp(P1) +info: N1 (1 st.,2 ed.,1 sets) -> (1 st.,2 ed.,1 sets) Comp(N1) info: getting rid of any Fin acceptance... info: P0 (2 st.,3 ed.,1 sets) -> (2 st.,3 ed.,1 sets) info: Comp(N1) (1 st.,2 ed.,1 sets) -> (2 st.,3 ed.,1 sets) info: P0 and N0 assumed correct and used as references info: check_empty P0*N1 info: check_empty P1*N0 -info: check_empty Comp(N1)*N0 info: check_empty P1*N1 -info: building state-space #0/1 of 200 states with seed 0 -info: state-space has 4136 edges -info: building product between state-space and P0 (2 st., 3 ed.) -info: product has 400 st., 8298 ed. -info: 2 SCCs -info: building product between state-space and P1 (2 st., 3 ed.) -info: product has 400 st., 8298 ed. -info: 2 SCCs -info: building product between state-space and N0 (2 st., 4 ed.) -info: product has 400 st., 8272 ed. -info: 1 SCCs -info: building product between state-space and N1 (1 st., 2 ed.) -info: product has 200 st., 4136 ed. -info: 1 SCCs -info: cross_check {P0,P1}, state-space #0/1 -info: cross_check {N0,N1}, state-space #0/1 -info: consistency_check (P1,N1), state-space #0/1 +info: check_empty Comp(N1)*Comp(P1) +info: cross_checks and consistency_checks unnecessary No problem detected. #+end_example diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index 9984ec620..25d91f473 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2016-2019 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -349,7 +349,7 @@ namespace spot } - twa_graph_ptr run(bool named_states) + twa_graph_ptr run(bool named_states, const output_aborter* aborter) { // First, we classify each SCC into three possible classes: // @@ -442,6 +442,9 @@ namespace spot state_set v; while (!todo.empty()) { + if (aborter && aborter->too_large(res)) + return nullptr; + unsigned s = todo.top(); todo.pop(); @@ -505,14 +508,15 @@ namespace spot twa_graph_ptr remove_alternation(const const_twa_graph_ptr& aut, - bool named_states) + bool named_states, + const output_aborter* aborter) { if (aut->is_existential()) // Nothing to do, why was this function called at all? return std::const_pointer_cast(aut); alternation_remover ar(aut); - return ar.run(named_states); + return ar.run(named_states, aborter); } diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index efc320dab..a4665aacf 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018 Laboratoire de Recherche et Développement +// Copyright (C) 2016, 2018, 2019 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -20,6 +20,7 @@ #pragma once #include +#include #include namespace spot @@ -98,10 +99,14 @@ namespace spot /// acceptance is only used in presence of size-1 rejecting-SCCs.) /// /// \param named_states name each state for easier debugging + /// + /// \param aborter Return nullptr if the built automaton would + /// be larger than the size specified by the \a aborter. /// @} SPOT_API twa_graph_ptr remove_alternation(const const_twa_graph_ptr& aut, - bool named_states = false); + bool named_states = false, + const output_aborter* aborter = nullptr); // Remove universal edges on the fly. diff --git a/spot/twaalgos/complement.cc b/spot/twaalgos/complement.cc index 947501250..a6945b5a3 100644 --- a/spot/twaalgos/complement.cc +++ b/spot/twaalgos/complement.cc @@ -511,17 +511,31 @@ namespace spot } twa_graph_ptr - complement(const const_twa_graph_ptr& aut) + complement(const const_twa_graph_ptr& aut, const output_aborter* aborter) { if (!aut->is_existential() || is_universal(aut)) return dualize(aut); if (is_very_weak_automaton(aut)) - return remove_alternation(dualize(aut)); + return remove_alternation(dualize(aut), aborter); // Determinize - spot::postprocessor p; + spot::option_map m; + if (aborter) + { + m.set("det-max-states", aborter->max_states()); + m.set("det-max-edges", aborter->max_edges()); + } + if (aut->num_states() > 32) + { + m.set("ba-simul", 0); + m.set("simul", 0); + } + spot::postprocessor p(&m); p.set_type(spot::postprocessor::Generic); p.set_pref(spot::postprocessor::Deterministic); p.set_level(spot::postprocessor::Low); - return dualize(p.run(std::const_pointer_cast(aut))); + auto det = p.run(std::const_pointer_cast(aut)); + if (!det) + return nullptr; + return dualize(det); } } diff --git a/spot/twaalgos/complement.hh b/spot/twaalgos/complement.hh index fef481df7..330bff664 100644 --- a/spot/twaalgos/complement.hh +++ b/spot/twaalgos/complement.hh @@ -20,6 +20,7 @@ #pragma once #include +#include namespace spot { @@ -70,8 +71,12 @@ namespace spot /// - any other type of input is determized before /// complementation. /// + /// If an output_aborter is supplied, it is used to + /// abort the construction of larger automata. + /// /// complement_semidet() is not yet used. SPOT_API twa_graph_ptr - complement(const const_twa_graph_ptr& aut); + complement(const const_twa_graph_ptr& aut, + const output_aborter* aborter = nullptr); } diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index 92cfc65e0..20f4a54ba 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018 Laboratoire de Recherche et +// Copyright (C) 2015-2019 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -804,7 +804,8 @@ namespace spot twa_graph_ptr tgba_determinize(const const_twa_graph_ptr& a, bool pretty_print, bool use_scc, - bool use_simulation, bool use_stutter) + bool use_simulation, bool use_stutter, + const output_aborter* aborter) { if (!a->is_existential()) throw std::runtime_error @@ -938,6 +939,8 @@ namespace spot // The main loop while (!todo.empty()) { + if (aborter && aborter->too_large(res)) + return nullptr; const safra_state& curr = todo.front().get().first; unsigned src_num = todo.front().get().second; todo.pop_front(); diff --git a/spot/twaalgos/determinize.hh b/spot/twaalgos/determinize.hh index 1b426f5dc..ab0c993a5 100644 --- a/spot/twaalgos/determinize.hh +++ b/spot/twaalgos/determinize.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) 2015, 2016, 2019 Laboratoire de Recherche et +// Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -19,6 +19,7 @@ #pragma once +#include #include namespace spot @@ -72,10 +73,15 @@ namespace spot /// might be worth to call /// spot::check_stutter_invariance() first if /// possible.) + /// + /// \param aborter abort the construction if the constructed + /// automaton would be too large. Return nullptr + /// in this case. SPOT_API twa_graph_ptr tgba_determinize(const const_twa_graph_ptr& aut, bool pretty_print = false, bool use_scc = true, bool use_simulation = true, - bool use_stutter = true); + bool use_stutter = true, + const output_aborter* aborter = nullptr); } diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index c024d204b..e637e3fe8 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include @@ -374,7 +373,8 @@ namespace spot return res; } - twa_graph_ptr minimize_wdba(const const_twa_graph_ptr& a) + twa_graph_ptr minimize_wdba(const const_twa_graph_ptr& a, + const output_aborter* aborter) { if (!a->is_existential()) throw std::runtime_error @@ -389,9 +389,15 @@ namespace spot power_map pm; bool input_is_det = is_deterministic(a); if (input_is_det) - det_a = std::const_pointer_cast(a); + { + det_a = std::const_pointer_cast(a); + } else - det_a = tgba_powerset(a, pm); + { + det_a = tgba_powerset(a, pm, aborter); + if (!det_a) + return nullptr; + } // For each SCC of the deterministic automaton, determine if it // is accepting or not. @@ -420,8 +426,10 @@ namespace spot } else { - twa_graph_ptr prod = spot::product(a, det_a); - //print_dot(std::cerr, prod, "s"); + twa_graph_ptr prod = spot::product(a, det_a, aborter); + if (!prod) + return nullptr; + const product_states* pmap = prod->get_named_prop("product-states"); assert(pmap); @@ -564,21 +572,38 @@ namespace spot minimize_obligation(const const_twa_graph_ptr& aut_f, formula f, const_twa_graph_ptr aut_neg_f, - bool reject_bigger) + bool reject_bigger, + const output_aborter* aborter) { if (!aut_f->is_existential()) throw std::runtime_error ("minimize_obligation() does not support alternation"); - // FIXME: We should build scc_info once, pass it to minimize_wdba - // and reuse it for is_terminal_automaton(), - // is_weak_automaton(), and is_very_weak_automaton(). - // FIXME: we should postpone the call to minimize_wdba() until - // we know for sure that we can verify (or that we do not need - // to verify) its output, rather than computing in cases where - // we may discard it. - auto min_aut_f = minimize_wdba(aut_f); + bool minimization_will_be_correct = false; + // WDBA-minimization necessarily work for obligations + if ((f && f.is_syntactic_obligation()) + // Weak deterministic automata are obligations + || (aut_f->prop_weak() && is_deterministic(aut_f)) + // Guarantee automata are obligations as well. + || is_terminal_automaton(aut_f)) + { + minimization_will_be_correct = true; + } + else if (!aut_neg_f) + { + // The minimization might not be correct and will need to + // be checked. Are we able to build aut_neg_f? + if (!(is_deterministic(aut_f) || f || is_very_weak_automaton(aut_f))) + return nullptr; + } + // FIXME: We should build scc_info once, and reuse it between + // minimize_wdba is_terminal_automaton(), is_weak_automaton(), + // and is_very_weak_automaton(). + auto min_aut_f = minimize_wdba(aut_f, aborter); + + if (!min_aut_f) + return std::const_pointer_cast(aut_f); if (reject_bigger) { // Abort if min_aut_f has more states than aut_f. @@ -587,29 +612,18 @@ namespace spot return std::const_pointer_cast(aut_f); } - // if f is a syntactic obligation formula, the WDBA minimization - // must be correct. - if (f && f.is_syntactic_obligation()) + if (minimization_will_be_correct) return min_aut_f; - // If the input automaton was already weak and deterministic, the - // output is necessary correct. - if (aut_f->prop_weak() && is_deterministic(aut_f)) - return min_aut_f; - - // If aut_f is a guarantee automaton, the WDBA minimization must be - // correct. - if (is_terminal_automaton(aut_f)) - return min_aut_f; - - // Build negation automaton if not supplied. + // The minimization might not be correct and will need to + // be checked. Build negation automaton if not supplied. if (!aut_neg_f) { if (is_deterministic(aut_f)) { // If the automaton is deterministic, complementing is // easy. - aut_neg_f = remove_fin(dualize(aut_f)); + aut_neg_f = dualize(aut_f); } else if (f) { @@ -631,15 +645,10 @@ namespace spot return nullptr; } } - - // If the negation is a guarantee automaton, then the - // minimization is correct. - if (is_terminal_automaton(aut_neg_f)) - return min_aut_f; - // Make sure the minimized WDBA does not accept more words than // the input. - if (product(min_aut_f, aut_neg_f)->is_empty()) + auto prod = product(min_aut_f, aut_neg_f, aborter); + if (prod && prod->is_empty()) { assert((bool)min_aut_f->prop_weak()); return min_aut_f; diff --git a/spot/twaalgos/minimize.hh b/spot/twaalgos/minimize.hh index d7c188146..98a161ab1 100644 --- a/spot/twaalgos/minimize.hh +++ b/spot/twaalgos/minimize.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2018 +// Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2018, 2019 // Laboratoire de Recherche et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -20,6 +20,7 @@ #pragma once #include +#include #include namespace spot @@ -92,7 +93,12 @@ namespace spot month = oct } \endverbatim */ - SPOT_API twa_graph_ptr minimize_wdba(const const_twa_graph_ptr& a); + /// + /// If an \a output_aborter is given, the determinization is aborted + /// whenever it would produce an automaton that is too large. In + /// that case, a nullptr is returned. + SPOT_API twa_graph_ptr minimize_wdba(const const_twa_graph_ptr& a, + const output_aborter* aborter = nullptr); /// \brief Minimize an automaton if it represents an obligation property. /// @@ -149,10 +155,15 @@ namespace spot /// determinization step during minimize_wdba().) Note that /// checking the size of the minimized WDBA occurs before ensuring /// that the minimized WDBA is correct. + /// + /// If an \a output_aborter is given, the determinization is aborted + /// whenever it would produce an automaton that is too large. In + /// this case, aut_f is returned unchanged. SPOT_API twa_graph_ptr minimize_obligation(const const_twa_graph_ptr& aut_f, formula f = nullptr, const_twa_graph_ptr aut_neg_f = nullptr, - bool reject_bigger = false); + bool reject_bigger = false, + const output_aborter* aborter = nullptr); /// @} } diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index 0c14af47f..76749fdba 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -71,6 +71,8 @@ namespace spot det_scc_ = opt->get("det-scc", 1); det_simul_ = opt->get("det-simul", 1); det_stutter_ = opt->get("det-stutter", 1); + det_max_states_ = opt->get("det-max-states", -1); + det_max_edges_ = opt->get("det-max-edges", -1); simul_ = opt->get("simul", -1); scc_filter_ = opt->get("scc-filter", -1); ba_simul_ = opt->get("ba-simul", -1); @@ -337,12 +339,19 @@ namespace spot twa_graph_ptr dba = nullptr; twa_graph_ptr sim = nullptr; + output_aborter aborter_ + (det_max_states_ >= 0 ? static_cast(det_max_states_) : -1U, + det_max_edges_ >= 0 ? static_cast(det_max_edges_) : -1U); + output_aborter* aborter = + (det_max_states_ >= 0 || det_max_edges_ >= 0) ? &aborter_ : nullptr; + // (Small,Low) is the only configuration where we do not run // WDBA-minimization. if ((PREF_ != Small || level_ != Low) && wdba_minimize_) { + // FIXME: This should be level_ <= Medium I believe. bool reject_bigger = (PREF_ == Small) && (level_ == Medium); - dba = minimize_obligation(a, f, nullptr, reject_bigger); + dba = minimize_obligation(a, f, nullptr, reject_bigger, aborter); if (dba && dba->prop_inherently_weak().is_true() && dba->prop_universal().is_true()) @@ -459,11 +468,17 @@ namespace spot if ((PREF_ == Deterministic && (type_ == Generic || want_parity)) && !dba) { dba = tgba_determinize(to_generalized_buchi(sim), - false, det_scc_, det_simul_, det_stutter_); - dba = simplify_acc(dba); - if (level_ != Low) - dba = simulation(dba); - sim = nullptr; + false, det_scc_, det_simul_, det_stutter_, + aborter); + // Setting det-max-states or det-max-edges may cause tgba_determinize + // to fail. + if (dba) + { + dba = simplify_acc(dba); + if (level_ != Low) + dba = simulation(dba); + sim = nullptr; + } } // Now dba contains either the result of WDBA-minimization (in diff --git a/spot/twaalgos/postproc.hh b/spot/twaalgos/postproc.hh index 948b49b96..8a75dfe32 100644 --- a/spot/twaalgos/postproc.hh +++ b/spot/twaalgos/postproc.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -240,6 +240,8 @@ namespace spot bool det_scc_ = true; bool det_simul_ = true; bool det_stutter_ = true; + int det_max_states_ = -1; + int det_max_edges_ = -1; int simul_ = -1; int scc_filter_ = -1; int ba_simul_ = -1; diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index b80d69253..04c334b4d 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -70,8 +70,19 @@ namespace spot }; } + std::ostream& output_aborter::print_reason(std::ostream& os) const + { + os << "more than "; + if (reason_is_states_) + os << max_states_ << " states required"; + else + os << max_edges_ << " edges required"; + return os; + } + twa_graph_ptr - tgba_powerset(const const_twa_graph_ptr& aut, power_map& pm, bool merge) + tgba_powerset(const const_twa_graph_ptr& aut, power_map& pm, bool merge, + const output_aborter* aborter) { unsigned ns = aut->num_states(); unsigned nap = aut->ap().size(); @@ -245,6 +256,12 @@ namespace spot pm.map_.emplace_back(std::move(ps)); } res->new_edge(src_num, dst_num, num2bdd[c]); + if (aborter && aborter->too_large(res)) + { + for (auto v: toclean) + delete v; + return nullptr; + } } } @@ -256,10 +273,11 @@ namespace spot } twa_graph_ptr - tgba_powerset(const const_twa_graph_ptr& aut) + tgba_powerset(const const_twa_graph_ptr& aut, + const output_aborter* aborter) { power_map pm; - return tgba_powerset(aut, pm); + return tgba_powerset(aut, pm, true, aborter); } @@ -428,10 +446,13 @@ namespace spot // Do not merge edges in the deterministic automaton. If we // add two self-loops labeled by "a" and "!a", we do not want // these to be merged as "1" before the acceptance has been fixed. - auto det = tgba_powerset(aut, pm, false); - if ((threshold_states > 0) - && (pm.map_.size() > aut->num_states() * threshold_states)) + unsigned max_states = aut->num_states() * threshold_states; + if (max_states == 0) + max_states = ~0U; + output_aborter aborter(max_states); + auto det = tgba_powerset(aut, pm, false, &aborter); + if (!det) return nullptr; if (fix_dba_acceptance(det, aut, pm, threshold_cycles)) return nullptr; diff --git a/spot/twaalgos/powerset.hh b/spot/twaalgos/powerset.hh index 5661109a9..d1d601c05 100644 --- a/spot/twaalgos/powerset.hh +++ b/spot/twaalgos/powerset.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2014, 2015 Laboratoire de Recherche et +// Copyright (C) 2011, 2013-2015, 2019 Laboratoire de Recherche et // Développement de l'Epita. // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -24,6 +24,7 @@ #include #include +#include #include namespace spot @@ -41,6 +42,43 @@ namespace spot } }; + /// \brief Helper object to specify when an algorithm + /// should abort its construction. + class SPOT_API output_aborter + { + unsigned max_states_; + unsigned max_edges_; + mutable bool reason_is_states_; + public: + output_aborter(unsigned max_states, + unsigned max_edges = ~0U) + : max_states_(max_states), max_edges_(max_edges) + { + } + + unsigned max_states() const + { + return max_states_; + } + + unsigned max_edges() const + { + return max_edges_; + } + + bool too_large(const const_twa_graph_ptr& aut) const + { + bool too_many_states = aut->num_states() > max_states_; + if (!too_many_states && (aut->num_edges() <= max_edges_)) + return false; + // Only update the reason if we return true; + reason_is_states_ = too_many_states; + return true; + } + + std::ostream& print_reason(std::ostream&) const; + }; + /// \ingroup twa_misc /// \brief Build a deterministic automaton, ignoring acceptance conditions. @@ -53,12 +91,17 @@ namespace spot /// associated to each state of the deterministic automaton. /// The \a merge argument can be set to false to prevent merging of /// transitions. + /// + /// If ab \a aborter is given, abort the construction whenever it + /// would build an automaton that is too large, and return nullptr. //@{ SPOT_API twa_graph_ptr tgba_powerset(const const_twa_graph_ptr& aut, - power_map& pm, bool merge = true); + power_map& pm, bool merge = true, + const output_aborter* aborter = nullptr); SPOT_API twa_graph_ptr - tgba_powerset(const const_twa_graph_ptr& aut); + tgba_powerset(const const_twa_graph_ptr& aut, + const output_aborter* aborter = nullptr); //@} diff --git a/spot/twaalgos/product.cc b/spot/twaalgos/product.cc index 5579e706e..756213e1e 100644 --- a/spot/twaalgos/product.cc +++ b/spot/twaalgos/product.cc @@ -48,7 +48,8 @@ namespace spot const const_twa_graph_ptr& right, unsigned left_state, unsigned right_state, - twa_graph_ptr res, T merge_acc) + twa_graph_ptr res, T merge_acc, + const output_aborter* aborter) { std::unordered_map s2n; std::deque> todo; @@ -78,6 +79,11 @@ namespace spot return; while (!todo.empty()) { + if (aborter && aborter->too_large(res)) + { + res = nullptr; + return; + } auto top = todo.front(); todo.pop_front(); for (auto& l: left->out(top.first.first)) @@ -99,7 +105,8 @@ namespace spot const const_twa_graph_ptr& right, unsigned left_state, unsigned right_state, - bool and_acc) + bool and_acc, + const output_aborter* aborter) { if (SPOT_UNLIKELY(!(left->is_existential() && right->is_existential()))) throw std::runtime_error @@ -148,7 +155,7 @@ namespace spot return accmark; else return rejmark; - }); + }, aborter); else product_main(left, right, left_state, right_state, res, [&] (acc_cond::mark_t ml, acc_cond::mark_t mr) @@ -157,7 +164,7 @@ namespace spot return accmark; else return rejmark; - }); + }, aborter); } else if (!rightweak) { @@ -181,7 +188,7 @@ namespace spot return mr; else return rejmark; - }); + }, aborter); } else { @@ -203,7 +210,7 @@ namespace spot return mr; else return accmark; - }); + }, aborter); } } @@ -230,7 +237,7 @@ namespace spot return ml; else return rejmark; - }); + }, aborter); } else { @@ -252,7 +259,7 @@ namespace spot return ml; else return accmark; - }); + }, aborter); } } @@ -271,10 +278,13 @@ namespace spot [&] (acc_cond::mark_t ml, acc_cond::mark_t mr) { return ml | (mr << left_num); - }); + }, aborter); } + if (!res) // aborted + return nullptr; + // The product of two non-deterministic automata could be // deterministic. Likewise for non-complete automata. if (left->prop_universal() && right->prop_universal()) @@ -297,17 +307,19 @@ namespace spot twa_graph_ptr product(const const_twa_graph_ptr& left, const const_twa_graph_ptr& right, unsigned left_state, - unsigned right_state) + unsigned right_state, + const output_aborter* aborter) { - return product_aux(left, right, left_state, right_state, true); + return product_aux(left, right, left_state, right_state, true, aborter); } twa_graph_ptr product(const const_twa_graph_ptr& left, - const const_twa_graph_ptr& right) + const const_twa_graph_ptr& right, + const output_aborter* aborter) { return product(left, right, left->get_init_state_number(), - right->get_init_state_number()); + right->get_init_state_number(), aborter); } twa_graph_ptr product_or(const const_twa_graph_ptr& left, @@ -316,7 +328,7 @@ namespace spot unsigned right_state) { return product_aux(complete(left), complete(right), - left_state, right_state, false); + left_state, right_state, false, nullptr); } twa_graph_ptr product_or(const const_twa_graph_ptr& left, diff --git a/spot/twaalgos/product.hh b/spot/twaalgos/product.hh index b38f54e8e..e78b18f7c 100644 --- a/spot/twaalgos/product.hh +++ b/spot/twaalgos/product.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2018 Laboratoire de Recherche et +// Copyright (C) 2014, 2015, 2018, 2019 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -45,9 +46,13 @@ namespace spot /// "product-states" with type spot::product_states. This stores /// the pair of original state numbers associated to each state of /// the product. + /// + /// If an \a aborter is given, the function will return nullptr + /// whenever the resulting product would be too large. SPOT_API twa_graph_ptr product(const const_twa_graph_ptr& left, - const const_twa_graph_ptr& right); + const const_twa_graph_ptr& right, + const output_aborter* aborter = nullptr); /// \ingroup twa_algorithms /// \brief Intersect two automata using a synchronous product @@ -68,11 +73,15 @@ namespace spot /// "product-states" with type spot::product_states. This stores /// the pair of original state numbers associated to each state of /// the product. + /// + /// If an \a aborter is given, the function will return nullptr + /// whenever the resulting product would be too large. SPOT_API twa_graph_ptr product(const const_twa_graph_ptr& left, const const_twa_graph_ptr& right, unsigned left_state, - unsigned right_state); + unsigned right_state, + const output_aborter* aborter = nullptr); /// \ingroup twa_algorithms /// \brief Sum two automata using a synchronous product diff --git a/tests/core/ltlcross3.test b/tests/core/ltlcross3.test index 749e38077..3b8019647 100755 --- a/tests/core/ltlcross3.test +++ b/tests/core/ltlcross3.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2018 Laboratoire de Recherche et Développement de +# Copyright (C) 2012-2019 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -34,7 +34,7 @@ check_csv() # Make sure ltlcross quotes formulas correctly cat >formula <<\EOF G"a'-'>'b" -FGa +FGa & GFb EOF run 0 ltlcross -F formula --csv=out.csv \ "ltl2tgba -s %f >%N" \ @@ -48,11 +48,11 @@ info: check_empty P1*N0 info: check_empty P1*N1 info: check_empty Comp(N1)*Comp(P1) info: check_empty P0*N0 +info: check_empty Comp(N0)*Comp(P0) info: check_empty P0*N1 -info: check_empty Comp(N0)*N1 info: check_empty P1*N0 -info: check_empty Comp(N1)*N0 info: check_empty P1*N1 +info: check_empty Comp(N1)*Comp(P1) EOF grep 'info: check_empty' error > ce diff -u ce ceplan @@ -60,6 +60,7 @@ diff -u ce ceplan ltlcross -F formula --csv=out.csv \ --ref "ltl2tgba -s %f >%N" \ "ltl2tgba --lenient -s %s >%N" \ + --determinize-max-states=0 \ --verbose 2> error cat >ceplan < ce @@ -318,3 +318,18 @@ ltlcross --color --products=0 ltl2tgba -f GFa -f FGa --csv=out.csv grep product out.csv && exit 1 check_csv out.csv +# --determinize and --determinize-max-states are incompatible. +ltlcross -f a ltl2tgba --determinize --determinize-max-states=10 2>stderr && + exit 1 +grep 'max-states.*incompatible' stderr +ltlcross -f a ltl2tgba --determinize-max-states=10 --determinize 2>stderr && + exit 1 +grep 'max-states.*incompatible' stderr + +# --determinize and --determinize-max-edges are incompatible. +ltlcross -f a ltl2tgba --determinize-max-edges=10 --determinize 2>stderr && + exit 1 +grep 'max-edges.*incompatible' stderr +ltlcross -f a ltl2tgba --determinize-max-edges=10 --determinize 2>stderr && + exit 1 +grep 'max-edges.*incompatible' stderr diff --git a/tests/core/ltlcrossce2.test b/tests/core/ltlcrossce2.test index 3e8861fbc..60233fa59 100755 --- a/tests/core/ltlcrossce2.test +++ b/tests/core/ltlcrossce2.test @@ -97,7 +97,9 @@ esac EOF chmod +x fake -genltl --eh=9 | ltlcross 'ltl2tgba' './fake %f >%O' 2>errors && exit 1 +genltl --eh=9 | + ltlcross --determinize-max-states=5 'ltl2tgba' './fake %f >%O' 2>errors && + exit 1 cat errors grep 'error: P0\*Comp(P1) is nonempty' errors grep 'error: {P0} disagree with {P1}' errors diff --git a/tests/core/sccsimpl.test b/tests/core/sccsimpl.test index c62989b13..ae9f456a1 100755 --- a/tests/core/sccsimpl.test +++ b/tests/core/sccsimpl.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2013, 2015, 2018 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) 2011, 2013, 2015, 2018, 2019 Laboratoire de Recherche +# et Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -316,7 +316,7 @@ AP: 1 "p0" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc complete -properties: deterministic very-weak +properties: deterministic terminal --BODY-- State: 0 {0} [t] 0 @@ -331,7 +331,7 @@ AP: 1 "p0" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc complete -properties: deterministic very-weak +properties: deterministic terminal --BODY-- State: 0 {0} [t] 0 diff --git a/tests/core/wdba2.test b/tests/core/wdba2.test index fb20d0a25..ca49bad94 100755 --- a/tests/core/wdba2.test +++ b/tests/core/wdba2.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015, 2018 Laboratoire de Recherche et +# Copyright (C) 2012, 2014, 2015, 2018, 2019 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -67,7 +67,7 @@ Start: 2 AP: 1 "p1" acc-name: Buchi Acceptance: 1 Inf(0) -properties: implicit-labels state-acc complete deterministic very-weak +properties: implicit-labels state-acc complete deterministic terminal --BODY-- State: 0 {0} 0 0 diff --git a/tests/python/stutter-inv.ipynb b/tests/python/stutter-inv.ipynb index 7d92119e5..d283decf5 100644 --- a/tests/python/stutter-inv.ipynb +++ b/tests/python/stutter-inv.ipynb @@ -302,7 +302,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb85b6780> >" + " *' at 0x7f691c29f3f0> >" ] }, "metadata": {}, @@ -603,7 +603,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb8570cf0> >" + " *' at 0x7f691c31c360> >" ] }, "metadata": {}, @@ -812,7 +812,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb8570cf0> >" + " *' at 0x7f691c31c360> >" ] }, "metadata": {}, @@ -964,7 +964,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb85b6750> >" + " *' at 0x7f691c29f840> >" ] }, "metadata": {}, @@ -1062,7 +1062,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb8570450> >" + " *' at 0x7f691c29f690> >" ] }, "metadata": {}, @@ -1272,7 +1272,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb857f120> >" + " *' at 0x7f691c29f7e0> >" ] }, "metadata": {}, @@ -1401,7 +1401,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb8570510> >" + " *' at 0x7f691c29f780> >" ] }, "metadata": {}, @@ -1452,87 +1452,87 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "b\n", "\n", "\n", - "\n", + "\n", "1\n", - "\n", - "1\n", - "\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "b\n", "\n", "\n", - "\n", + "\n", "2\n", - "\n", - "2\n", - "\n", + "\n", + "2\n", + "\n", "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fbbb850fae0> >" + " *' at 0x7f691c29f810> >" ] }, "metadata": {}, @@ -1792,7 +1792,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb850fb10> >" + " *' at 0x7f691c23bb10> >" ] }, "metadata": {}, @@ -2106,7 +2106,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbbb850fb10> >" + " *' at 0x7f691c23bb10> >" ] }, "metadata": {},